config.name,config.backend.name,config.backend.version,config.backend._target_,config.backend.task,config.backend.library,config.backend.model,config.backend.processor,config.backend.device,config.backend.device_ids,config.backend.seed,config.backend.inter_op_num_threads,config.backend.intra_op_num_threads,config.backend.model_kwargs.trust_remote_code,config.backend.processor_kwargs.trust_remote_code,config.backend.hub_kwargs.trust_remote_code,config.backend.no_weights,config.backend.device_map,config.backend.torch_dtype,config.backend.eval_mode,config.backend.to_bettertransformer,config.backend.low_cpu_mem_usage,config.backend.attn_implementation,config.backend.cache_implementation,config.backend.autocast_enabled,config.backend.autocast_dtype,config.backend.torch_compile,config.backend.torch_compile_target,config.backend.quantization_scheme,config.backend.quantization_config.bits,config.backend.quantization_config.version,config.backend.deepspeed_inference,config.backend.peft_type,config.scenario.name,config.scenario._target_,config.scenario.iterations,config.scenario.duration,config.scenario.warmup_runs,config.scenario.input_shapes.batch_size,config.scenario.input_shapes.num_choices,config.scenario.input_shapes.sequence_length,config.scenario.new_tokens,config.scenario.latency,config.scenario.memory,config.scenario.energy,config.scenario.generate_kwargs.max_new_tokens,config.scenario.generate_kwargs.min_new_tokens,config.launcher.name,config.launcher._target_,config.launcher.device_isolation,config.launcher.device_isolation_action,config.launcher.start_method,config.environment.cpu,config.environment.cpu_count,config.environment.cpu_ram_mb,config.environment.system,config.environment.machine,config.environment.platform,config.environment.processor,config.environment.python_version,config.environment.gpu,config.environment.gpu_count,config.environment.gpu_vram_mb,config.environment.optimum_benchmark_version,config.environment.optimum_benchmark_commit,config.environment.transformers_version,config.environment.transformers_commit,config.environment.accelerate_version,config.environment.accelerate_commit,config.environment.diffusers_version,config.environment.diffusers_commit,config.environment.optimum_version,config.environment.optimum_commit,config.environment.timm_version,config.environment.timm_commit,config.environment.peft_version,config.environment.peft_commit,report.traceback,config.launcher.numactl,report.prefill.memory.unit,report.prefill.memory.max_ram,report.prefill.memory.max_global_vram,report.prefill.memory.max_process_vram,report.prefill.memory.max_reserved,report.prefill.memory.max_allocated,report.prefill.latency.unit,report.prefill.latency.count,report.prefill.latency.total,report.prefill.latency.mean,report.prefill.latency.stdev,report.prefill.latency.p50,report.prefill.latency.p90,report.prefill.latency.p95,report.prefill.latency.p99,report.prefill.latency.values,report.prefill.throughput.unit,report.prefill.throughput.value,report.prefill.energy.unit,report.prefill.energy.cpu,report.prefill.energy.ram,report.prefill.energy.gpu,report.prefill.energy.total,report.prefill.efficiency.unit,report.prefill.efficiency.value,report.decode.memory.unit,report.decode.memory.max_ram,report.decode.memory.max_global_vram,report.decode.memory.max_process_vram,report.decode.memory.max_reserved,report.decode.memory.max_allocated,report.decode.latency.unit,report.decode.latency.count,report.decode.latency.total,report.decode.latency.mean,report.decode.latency.stdev,report.decode.latency.p50,report.decode.latency.p90,report.decode.latency.p95,report.decode.latency.p99,report.decode.latency.values,report.decode.throughput.unit,report.decode.throughput.value,report.decode.energy.unit,report.decode.energy.cpu,report.decode.energy.ram,report.decode.energy.gpu,report.decode.energy.total,report.decode.efficiency.unit,report.decode.efficiency.value,report.per_token.memory,report.per_token.latency.unit,report.per_token.latency.count,report.per_token.latency.total,report.per_token.latency.mean,report.per_token.latency.stdev,report.per_token.latency.p50,report.per_token.latency.p90,report.per_token.latency.p95,report.per_token.latency.p99,report.per_token.latency.values,report.per_token.throughput.unit,report.per_token.throughput.value,report.per_token.energy,report.per_token.efficiency,config.backend.hub_kwargs.revision,config.backend.hub_kwargs.force_download,config.backend.hub_kwargs.local_files_only,config.backend.quantization_config.exllama_config.version,config.backend.quantization_config.exllama_config.max_input_len,config.backend.quantization_config.exllama_config.max_batch_size 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa58-7af50ddb7799966c5ce4db38;be011b48-0af0-4651-b018-b39f726fad17) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa06-6d9562733c7f0a2005faff54;36051902-3fdd-4bea-bcaf-26e4ffbf97f0) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2981.855232,9259.450368,0.0,8613.003264,8211.364864,s,10,10.951241455078126,1.0951241455078125,0.002069112877122291,1.0949886474609376,1.0979957763671875,1.0981948486328124,1.0983541064453124,"[1.0979515380859375, 1.0983939208984375, 1.0932901611328125, 1.0924078369140624, 1.0937889404296874, 1.092544677734375, 1.0941727294921875, 1.0958045654296875, 1.096221435546875, 1.0966656494140625]",tokens/s,233.76345143161097,kWh,1.2900002946456276e-05,7.068370924207557e-06,5.9710658879597037e-05,7.967903275026086e-05,tokens/kWh,3212890.4074724964,MB,2986.262528,9330.753536,0.0,8684.306432,8503.627264,s,10,640.8562343749999,64.0856234375,0.007507663361876726,64.08716796875,64.093946875,64.09397148437499,64.09399117187499,"[64.08627734375, 64.0873125, 64.09369921875, 64.09394140625, 64.0870234375, 64.09399609375, 64.08822265625, 64.078796875, 64.0717890625, 64.07517578125]",tokens/s,0.9830597975135754,kWh,0.000756652800159322,0.0004147102533278666,0.0034761336142379974,0.004647496667725186,tokens/kWh,13555.684813613145,,s,629,649.691616149902,1.0328960511127223,0.130104123840355,1.0171883544921876,1.0177021728515625,1.01789716796875,2.1117173046874997,"[1.0174935302734376, 1.0171514892578124, 1.0171883544921876, 1.017997314453125, 1.0175242309570312, 1.0175477905273438, 1.0175784912109376, 1.017881591796875, 1.0172057495117188, 1.0168115234375, 1.0167367553710938, 1.0173132934570313, 1.0168719482421875, 1.0171760864257813, 1.0166824951171876, 1.0168237915039062, 1.0169927978515625, 1.0175252685546874, 1.0170921020507813, 1.0169978637695312, 1.0169354248046876, 1.0170029907226563, 1.0173900756835939, 1.017280517578125, 1.016859619140625, 1.0172426147460938, 1.016791015625, 1.0168565673828125, 1.0172825317382812, 1.01675927734375, 1.0171392211914063, 1.0170572509765625, 1.0173419799804688, 1.0176777954101563, 1.0177576904296874, 1.0170183715820313, 1.0177105712890624, 1.0174996337890625, 1.0173941650390625, 1.0173890380859374, 1.017080810546875, 1.0169651489257812, 1.0173880615234374, 1.0172467041015625, 1.0171791381835937, 1.016754150390625, 1.0172498168945312, 1.0174290161132813, 1.0169159545898439, 1.0170194091796876, 1.016964111328125, 1.0168411865234375, 1.0170531616210938, 1.0171678466796874, 1.0177003784179688, 1.0177116088867189, 1.0175713500976562, 1.0176112670898438, 1.01741259765625, 1.01737060546875, 1.0172507934570312, 1.0171494140625, 2.116391845703125, 1.01690673828125, 1.0169548950195313, 1.0181068725585938, 1.0178170776367188, 1.0172692260742187, 1.0174985961914063, 1.017291748046875, 1.0175293579101563, 1.0175150146484375, 1.016748046875, 1.0170767211914062, 1.0172333984375, 1.0175641479492188, 1.0175477905273438, 1.0169292602539062, 1.016869873046875, 1.0169712524414063, 1.0170634155273437, 1.0173245239257813, 1.0168094482421874, 1.016943603515625, 1.0171064453125, 1.0171781005859375, 1.017275390625, 1.0166548461914062, 1.017407470703125, 1.0169978637695312, 1.016574951171875, 1.0173306884765625, 1.0168370971679688, 1.016875, 1.0170009765625, 1.0174505004882812, 1.0167992553710938, 1.0171494140625, 1.0171607055664063, 1.0168944702148437, 1.0170101928710937, 1.017059326171875, 1.017080810546875, 1.0172037353515626, 1.0174771118164063, 1.0174095458984376, 1.0170245361328125, 1.019852783203125, 1.0168125610351562, 1.0170029907226563, 1.0175538940429687, 1.0178017578125, 1.0176777954101563, 1.016995849609375, 1.0182564086914063, 1.0176224975585937, 1.0173235473632813, 1.01770751953125, 1.0170552368164063, 1.01711669921875, 1.0171351318359374, 1.0170449829101562, 1.016859619140625, 1.0170890502929688, 1.0175088500976563, 2.11154345703125, 1.0170613403320312, 1.0173931274414063, 1.0170582885742188, 1.01701123046875, 1.01686474609375, 1.0172272338867188, 1.0170921020507813, 1.0172078247070313, 1.0167490844726563, 1.0166835327148438, 1.0169088134765625, 1.0174351196289062, 1.0175641479492188, 1.017354248046875, 1.0168699340820313, 1.0168974609375, 1.0171945190429688, 1.0176737670898437, 1.0177105102539064, 1.017945068359375, 1.0175324096679688, 1.0171945190429688, 1.0176880493164062, 1.0172406005859376, 1.0171146240234374, 1.0182410278320313, 1.0179573974609375, 1.0179983520507812, 1.0176327514648438, 1.0174893798828124, 1.0176010131835938, 1.0182543334960938, 1.0177720336914062, 1.0173716430664062, 1.017248779296875, 1.016859619140625, 1.0170859375, 1.0173767700195313, 1.0172733154296876, 1.0168115234375, 1.0170787963867187, 1.0168923950195312, 1.0172713012695314, 1.0172866821289062, 1.0175477905273438, 1.0167623901367187, 1.0171729736328126, 1.0170921020507813, 1.0174443359375, 1.0174791870117188, 1.017380859375, 1.0176481323242188, 1.0177054443359375, 1.0170101928710937, 1.01709619140625, 1.0171812133789062, 1.0176399536132812, 1.0173388671875, 1.017680908203125, 1.0173245239257813, 1.0175344848632812, 1.016958984375, 2.111784912109375, 1.0173778076171875, 1.0168678588867188, 1.0177362060546875, 1.01760205078125, 1.0175693359375, 1.0170152587890624, 1.0167971801757814, 1.0172047119140626, 1.0175958862304688, 1.016896484375, 1.0167449340820311, 1.017417724609375, 1.0172200927734374, 1.016796142578125, 1.0171340942382812, 1.016943603515625, 1.01682177734375, 1.0168862915039063, 1.0173562622070313, 1.017080810546875, 1.0174678955078125, 1.0173972778320313, 1.0175897827148437, 1.017354248046875, 1.0172293090820312, 1.0175057983398437, 1.0172456665039062, 1.0170859375, 1.0177197875976562, 1.017154541015625, 1.0173880615234374, 1.0175590209960939, 1.01743408203125, 1.016933349609375, 1.0172835693359374, 1.017112548828125, 1.0172262573242188, 1.0170572509765625, 1.017354248046875, 1.01725390625, 1.0173184204101562, 1.018461181640625, 1.0179942626953125, 1.0194544677734374, 1.0179215087890625, 1.0178590698242187, 1.0177402954101562, 1.017133056640625, 1.0170347290039063, 1.0172620849609375, 1.0173992919921875, 1.0176665649414063, 1.01707470703125, 1.0167613525390624, 1.0173092041015626, 1.0173604125976563, 1.0174525146484374, 1.017565185546875, 1.0170890502929688, 1.01747509765625, 1.017607177734375, 1.0175682373046875, 2.112203857421875, 1.017459716796875, 1.0166384887695312, 1.0169661865234374, 1.0174985961914063, 1.0174044189453124, 1.016933349609375, 1.0176819458007813, 1.0176041259765625, 1.0174843139648437, 1.017217041015625, 1.0171586303710938, 1.01719140625, 1.017375732421875, 1.0171525268554686, 1.0169210815429688, 1.017154541015625, 1.0176266479492186, 1.0178191528320313, 1.0170921020507813, 1.0168934326171875, 1.0171627807617187, 1.0176942138671874, 1.017333740234375, 1.0175324096679688, 1.0170572509765625, 1.0169077758789062, 1.017111572265625, 1.0173767700195313, 1.0171566162109376, 1.0170460205078125, 1.0172252197265625, 1.0173870239257812, 1.0172252197265625, 1.0171300048828125, 1.0168084716796875, 1.017064453125, 1.0177720336914062, 1.016975341796875, 1.0169774169921875, 1.0169395141601563, 1.0170787963867187, 1.0171217651367188, 1.0177638549804688, 1.0169712524414063, 1.016958984375, 1.01680126953125, 1.0175764770507814, 1.0173480834960937, 1.0168197021484375, 1.017017333984375, 1.0169978637695312, 1.0169896850585938, 1.0174003295898437, 1.0170214233398438, 1.01707568359375, 1.0175559692382812, 1.01893017578125, 1.0172241821289063, 1.0170316772460937, 1.01722216796875, 1.0172119140625, 1.0173163452148437, 2.1100595703125, 1.0167705688476563, 1.0177894287109375, 1.0171873168945313, 1.0167449340820311, 1.0167142333984376, 1.016875, 1.0168862915039063, 1.0167982177734376, 1.0173235473632813, 1.0169313354492187, 1.0169343872070313, 1.0175621337890626, 1.017955322265625, 1.0172938232421875, 1.0173009643554687, 1.017554931640625, 1.0171617431640625, 1.0173388671875, 1.0177402954101562, 1.0175774536132813, 1.0175682373046875, 1.0177136840820313, 1.0181427001953125, 1.0177576904296874, 1.0176788330078126, 1.017692138671875, 1.017617431640625, 1.0178262939453124, 1.0180515747070313, 1.0169354248046876, 1.0168514404296876, 1.0176296997070313, 1.0172395629882813, 1.0167859497070313, 1.0172784423828125, 1.0170859375, 1.0170224609375, 1.0186465454101563, 1.0175170288085937, 1.0169405517578125, 1.0179154052734376, 1.0179379272460938, 1.01726318359375, 1.0171544799804688, 1.0171340942382812, 1.0169609985351562, 1.0169047241210938, 1.017776123046875, 1.017396240234375, 1.0173921508789063, 1.0171002807617187, 1.0185277709960938, 1.017439208984375, 1.017312255859375, 1.017007080078125, 1.0172160034179687, 1.0172160034179687, 1.017218017578125, 1.0172958984375, 1.0172764282226563, 1.0170203857421876, 1.017691162109375, 2.111909912109375, 1.0176123046875, 1.0175139770507813, 1.0174228515625, 1.0174617309570313, 1.0175221557617187, 1.0169579467773437, 1.0177576904296874, 1.0171986083984375, 1.0168289184570312, 1.0170685424804689, 1.0173532104492187, 1.0175938720703126, 1.017185302734375, 1.01743408203125, 1.0171443481445313, 1.016764404296875, 1.0172661743164062, 1.0177013549804688, 1.0171986083984375, 1.017049072265625, 1.0170828857421874, 1.0174166870117187, 1.0170664672851562, 1.0174453735351563, 1.0168790893554687, 1.0178897705078125, 1.0175170288085937, 1.0171791381835937, 1.0170726928710938, 1.0169456176757812, 1.0167869262695313, 1.0170419311523438, 1.0173788452148438, 1.017333740234375, 1.0175242309570312, 1.018076171875, 1.01758056640625, 1.0170736694335938, 1.0170521850585938, 1.0168186645507813, 1.0173572998046876, 1.0176532592773437, 1.01709619140625, 1.0169476928710937, 1.0168115844726562, 1.0169046630859375, 1.0173613891601563, 1.0177177734375, 1.0168975219726561, 1.016826904296875, 1.0167296142578126, 1.017049072265625, 1.0175293579101563, 1.0174402465820314, 1.0174054565429687, 1.0173552856445311, 1.017469970703125, 1.0172620849609375, 1.0167603149414062, 1.0176635131835938, 1.0175570068359374, 1.0175396118164062, 2.113271728515625, 1.0164520874023437, 1.0175570068359374, 1.0175221557617187, 1.0169467163085937, 1.01684326171875, 1.01673779296875, 1.0166527709960937, 1.016406005859375, 1.01699072265625, 1.0166343383789063, 1.0166343383789063, 1.016573974609375, 1.0167418823242187, 1.0167675170898438, 1.0167920532226562, 1.0167357177734375, 1.0169302978515624, 1.0171954956054687, 1.0174054565429687, 1.0170368041992188, 1.0171791381835937, 1.0176737060546874, 1.0177310791015626, 1.0174033813476562, 1.0174044189453124, 1.017459716796875, 1.0175098876953126, 1.0173675537109375, 1.0176378784179687, 1.017259033203125, 1.0173460693359375, 1.0176511840820313, 1.0174822387695313, 1.0173767700195313, 1.0175150146484375, 1.0174218139648437, 1.0178508911132813, 1.0178406372070312, 1.0176204833984375, 1.0173767700195313, 1.0178017578125, 1.0179000244140626, 1.0172119140625, 1.0165678100585938, 1.0165330200195313, 1.0163292236328125, 1.0167285766601561, 1.0169968872070312, 1.0166988525390626, 1.0169210815429688, 1.0170480346679687, 1.0171238403320313, 1.0167817993164063, 1.0167633666992189, 1.0170337524414061, 1.0168043823242188, 1.0171392211914063, 1.017153564453125, 1.0167838745117188, 1.0169251708984375, 1.0174423217773438, 1.01701220703125, 2.113585205078125, 1.0164449462890626, 1.016585205078125, 1.0164551391601562, 1.01684326171875, 1.016680419921875, 1.016753173828125, 1.0173470458984375, 1.0173030395507812, 1.0169528198242188, 1.0168330078125, 1.0170194091796876, 1.01732763671875, 1.0167736206054687, 1.0168514404296876, 1.0169835815429686, 1.0166835327148438, 1.01719140625, 1.0171043701171876, 1.0166845703125, 1.017260009765625, 1.0176849975585938, 1.0174935302734376, 1.017529296875, 1.0172733154296876, 1.0169231567382813, 1.0173982543945312, 1.0170020141601563, 1.016616943359375, 1.0167767333984374, 1.0169467163085937, 1.0172088623046875, 1.01715966796875, 1.016932373046875, 1.0170357666015626, 1.0171320190429687, 1.01718017578125, 1.0174566650390624, 1.0167255249023437, 1.0167971801757814, 1.0167633666992189, 1.0170337524414061, 1.0170050659179688, 1.0170439453125, 1.01673779296875, 1.0168330078125, 1.0170828857421874, 1.017059326171875, 1.0168739624023437, 1.0171300048828125, 1.0171238403320313, 1.016974365234375, 1.0169026489257813, 1.0171504516601562, 1.0168207397460938, 1.0168319702148438, 1.0172692260742187, 1.0173624877929688, 1.016796142578125, 1.0168391723632813, 1.0171893920898438, 1.017365478515625, 1.016943603515625, 2.1139599609375, 1.0168893432617188, 1.0171945190429688, 1.016859619140625, 1.0165811157226563, 1.0167500610351563, 1.01697021484375, 1.016826904296875, 1.0168453369140624, 1.0170132446289062, 1.0168053588867187, 1.0173511962890625, 1.01734912109375, 1.0167859497070313, 1.0169036865234375, 1.0171238403320313, 1.0169866333007813, 1.0171996459960937, 1.0168934326171875, 1.0169763793945312, 1.0167183227539063, 1.0170234985351563, 1.0178928833007812, 1.0167654418945313, 1.0169620361328124, 1.0170040283203126, 1.0167449340820311, 1.0173562622070313, 1.016680419921875, 1.0172979125976562, 1.0165924072265624, 1.017101318359375, 1.0172999877929687, 1.0170224609375, 1.0172262573242188, 1.0168402099609375, 1.0170101928710937, 1.0167879638671875, 1.01705419921875, 1.0175529174804687, 1.0173265991210938, 1.01718017578125, 1.0174719848632812, 1.0170245361328125, 1.0174248657226563, 1.016958984375, 1.0166087646484374, 1.0166466674804688, 1.0171975708007812, 1.01707470703125, 1.0169210815429688, 1.0175795288085938, 1.0173245239257813, 1.0168084716796875, 1.0169722900390625, 1.0170470581054687, 1.0169948120117187, 1.0169896850585938, 1.0172006225585937, 1.0173716430664062, 1.0176337890625, 1.0176676025390625, 1.0172323608398437]",tokens/s,0.9681516343515072,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1441.677312,1709.703168,0.0,1063.256064,942.605312,s,10,0.8730960388183594,0.08730960388183594,0.002035304872210584,0.08713673782348633,0.08910322875976563,0.09054942092895508,0.09170637466430664,"[0.09199561309814454, 0.0843604507446289, 0.08696444702148437, 0.08719612884521484, 0.08721517181396485, 0.08707734680175781, 0.08465267181396484, 0.0866138916015625, 0.08823846435546875, 0.08878185272216797]",tokens/s,2932.0943930345643,kWh,1.0099359104323139e-06,5.533989598310048e-07,2.5805410008034e-06,4.143875871066718e-06,tokens/kWh,61777912.265045814,MB,1441.677312,1709.703168,0.0,1063.256064,942.607872,s,10,53.98236181640625,5.398236181640624,0.0409841937132303,5.390563720703125,5.449904833984375,5.452959252929687,5.455402788085937,"[5.38043359375, 5.40069384765625, 5.41887353515625, 5.44691552734375, 5.456013671875, 5.44922607421875, 5.37513134765625, 5.35530859375, 5.36842431640625, 5.33134130859375]",tokens/s,11.670478630457612,kWh,6.202919682701882e-05,3.3993588802392576e-05,0.00015045469253839705,0.00024647747816780845,tokens/kWh,255601.4467054386,,s,629,54.67969024658198,0.08693114506610816,0.010438416792585154,0.08622489929199219,0.08683417816162109,0.08736747589111328,0.17189485961914075,"[0.08353689575195312, 0.08669286346435547, 0.08684236907958984, 0.08664883422851563, 0.08732160186767578, 0.0871107177734375, 0.08675321960449218, 0.08646348571777343, 0.08578867340087891, 0.08561151885986328, 0.08533920288085937, 0.08318355560302734, 0.08327986907958984, 0.08331366729736328, 0.08347750091552734, 0.08323891448974609, 0.08337407684326172, 0.08320102691650391, 0.08308223724365234, 0.08320921325683593, 0.08336589050292968, 0.08362290954589843, 0.08334028625488281, 0.08379084777832031, 0.08326246643066407, 0.08351641845703126, 0.08637849426269531, 0.0860948486328125, 0.08636211395263672, 0.0862208023071289, 0.0863825912475586, 0.08643379211425781, 0.08343961334228515, 0.08492339324951172, 0.08729497528076172, 0.08630579376220703, 0.08625049591064453, 0.08637340545654297, 0.08612451171875, 0.08631295776367187, 0.0867215347290039, 0.08637235260009765, 0.08630169677734376, 0.08631193542480468, 0.08447795104980468, 0.08495410919189453, 0.08645938873291016, 0.08668978881835937, 0.08635699462890625, 0.08412057495117188, 0.0861143035888672, 0.08629350280761719, 0.08610099029541016, 0.0859535369873047, 0.086508544921875, 0.08645426940917969, 0.0861286392211914, 0.08638976287841797, 0.08641228485107422, 0.08600883483886719, 0.08566067504882813, 0.08624947357177734, 0.16856166076660156, 0.08411341094970703, 0.0832890853881836, 0.08320819091796874, 0.08320409393310547, 0.08308633422851562, 0.08329011535644532, 0.08373766326904297, 0.08319993591308594, 0.08375193786621093, 0.08421683502197265, 0.08632524871826172, 0.0862371826171875, 0.08634674835205078, 0.08365670776367187, 0.0833986587524414, 0.08349798583984375, 0.08350003051757812, 0.08335769653320313, 0.08395059204101563, 0.0839208984375, 0.08677375793457032, 0.0867154541015625, 0.08665900421142578, 0.0864194564819336, 0.08657305908203125, 0.08616550445556641, 0.08598429107666015, 0.08726729583740235, 0.08673792266845703, 0.08624230194091796, 0.0869222412109375, 0.08644096374511719, 0.0866344985961914, 0.08689356994628906, 0.08668672180175781, 0.08644608306884766, 0.08659046173095702, 0.08682291412353516, 0.08598323059082032, 0.08748851013183594, 0.08676249694824219, 0.08683213043212891, 0.08647679901123047, 0.08678195190429687, 0.08769843292236328, 0.08739942169189453, 0.08640614318847656, 0.08639897918701171, 0.08644403076171875, 0.08633344268798829, 0.08639488220214844, 0.08622694396972656, 0.0867215347290039, 0.0865638427734375, 0.08646451568603515, 0.08639078521728516, 0.08660889434814453, 0.08628224182128906, 0.08633753967285156, 0.08671437072753906, 0.08621875, 0.08635084533691406, 0.17366220092773438, 0.08666828918457031, 0.08657817840576172, 0.08662118530273437, 0.08642969512939454, 0.08634265899658203, 0.0862525405883789, 0.08606208038330078, 0.08631501007080078, 0.08632012939453125, 0.08620851135253907, 0.08628530883789062, 0.08648499298095703, 0.08680038452148438, 0.08667135620117188, 0.08662732696533203, 0.08640409851074218, 0.08657817840576172, 0.08667033386230469, 0.08634060668945312, 0.08654131317138672, 0.08610816192626954, 0.08561766052246093, 0.08718950653076171, 0.08702668762207032, 0.08669388580322265, 0.08728883361816406, 0.08666214752197265, 0.08715264129638672, 0.08645017242431641, 0.08640409851074218, 0.08617984008789062, 0.08535756683349609, 0.0866324462890625, 0.08659967803955078, 0.08661299133300782, 0.08663346862792969, 0.08641843414306641, 0.08643379211425781, 0.08636006164550782, 0.08653209686279296, 0.0865771484375, 0.08655257415771485, 0.08642047882080078, 0.08629657745361329, 0.08657100677490234, 0.0862955551147461, 0.08327884674072265, 0.08376831817626954, 0.08357273864746094, 0.08349286651611328, 0.08337715148925781, 0.0860549087524414, 0.08430796813964844, 0.08347443389892578, 0.08344064331054687, 0.08356454467773437, 0.08309248352050781, 0.08637235260009765, 0.08680242919921875, 0.0863078384399414, 0.08653311920166015, 0.08646041870117188, 0.1741107177734375, 0.08636214447021484, 0.08619312286376953, 0.08634162902832031, 0.08638976287841797, 0.08647885131835938, 0.08649215698242188, 0.08664064025878906, 0.08642771148681641, 0.08624428558349609, 0.08617369842529297, 0.08573542022705079, 0.08636109161376954, 0.08620134735107422, 0.086329345703125, 0.08637644958496093, 0.08652902221679687, 0.08682086181640625, 0.08738508605957031, 0.0865249252319336, 0.0863815689086914, 0.0862894058227539, 0.08674918365478515, 0.08637337493896484, 0.08735027313232421, 0.08632422637939453, 0.08617984008789062, 0.08628428649902344, 0.08778956604003907, 0.08680754852294922, 0.08660582733154297, 0.08646348571777343, 0.08672358703613281, 0.0874486083984375, 0.08708297729492187, 0.08639078521728516, 0.08637337493896484, 0.08624742126464843, 0.08626278686523438, 0.08637651062011718, 0.08677881622314453, 0.08631501007080078, 0.08682803344726563, 0.0861112289428711, 0.08643583679199218, 0.08626380920410157, 0.08634572601318359, 0.086181884765625, 0.08633036804199219, 0.08633856201171874, 0.08504627227783203, 0.08628121948242187, 0.08638771057128906, 0.08684748840332031, 0.0863825912475586, 0.08656588745117187, 0.08662528228759765, 0.08654131317138672, 0.08605388641357421, 0.08622694396972656, 0.0862955551147461, 0.08632524871826172, 0.08606412506103515, 0.1738434600830078, 0.0866170883178711, 0.08668978881835937, 0.0868823013305664, 0.08633548736572266, 0.08676761627197266, 0.08665599822998046, 0.08651468658447266, 0.08632012939453125, 0.08640306854248046, 0.08641433715820312, 0.0875315170288086, 0.08692838287353516, 0.0863477783203125, 0.0873861083984375, 0.08702054595947266, 0.08652390289306641, 0.08673382568359375, 0.08648191833496094, 0.0868485107421875, 0.08659967803955078, 0.0865249252319336, 0.08798515319824218, 0.08668672180175781, 0.0869713897705078, 0.08641024017333984, 0.08619417572021484, 0.08626278686523438, 0.08664371490478516, 0.0865577621459961, 0.08645420837402344, 0.08650342559814453, 0.08614502716064452, 0.0860057601928711, 0.08658636474609376, 0.08687513732910156, 0.08634572601318359, 0.08673484802246094, 0.086614013671875, 0.08654745483398438, 0.08647782135009766, 0.08652288055419922, 0.08698777770996094, 0.08646348571777343, 0.08669286346435547, 0.08713728332519531, 0.08633344268798829, 0.08693965148925781, 0.0867000961303711, 0.08665286254882812, 0.08626483154296875, 0.0867430419921875, 0.08634982299804687, 0.08623616027832032, 0.0861685791015625, 0.08641228485107422, 0.08606719970703125, 0.08660889434814453, 0.08628736114501953, 0.08668057250976563, 0.08639794921875, 0.08680242919921875, 0.08641741180419922, 0.1737769012451172, 0.08605696105957031, 0.08600371551513672, 0.08643071746826173, 0.08781107330322266, 0.0866519012451172, 0.08646246337890626, 0.08642668914794922, 0.08613164520263672, 0.08653823852539062, 0.08642047882080078, 0.0866324462890625, 0.08596377563476562, 0.0863815689086914, 0.08631398773193359, 0.08695295715332031, 0.08793907165527344, 0.08786227416992187, 0.08662630462646484, 0.08779571533203125, 0.08724275207519532, 0.08729190063476562, 0.0877844467163086, 0.08636006164550782, 0.0858818588256836, 0.08638771057128906, 0.086949951171875, 0.08719251251220703, 0.08616550445556641, 0.08660377502441406, 0.08642457580566407, 0.08629452514648438, 0.0865054702758789, 0.08606105804443359, 0.08653311920166015, 0.08622796630859375, 0.0860200958251953, 0.08642253112792969, 0.08642355346679688, 0.0863641586303711, 0.08620851135253907, 0.0867583999633789, 0.08634880065917969, 0.08620851135253907, 0.08670003509521484, 0.08732057952880859, 0.08644608306884766, 0.08616960144042969, 0.08388813018798828, 0.08584806060791016, 0.08618905639648437, 0.0861788787841797, 0.0874474868774414, 0.08617574310302735, 0.08607027435302735, 0.0862064666748047, 0.08648703765869141, 0.08633241271972657, 0.08608153533935547, 0.08628838348388672, 0.08599142456054687, 0.08555622100830078, 0.08707379150390625, 0.1688248291015625, 0.08354815673828125, 0.08360345458984375, 0.08642150115966797, 0.08649830627441406, 0.0861808624267578, 0.08627609252929687, 0.08619622039794922, 0.0862033920288086, 0.08743424224853516, 0.08643276977539062, 0.08662425231933593, 0.08616754913330078, 0.08617164611816407, 0.0859135971069336, 0.08686489868164063, 0.08547532653808594, 0.08608972930908203, 0.08635596466064453, 0.08650649261474609, 0.08669593811035156, 0.0856258544921875, 0.08557164764404297, 0.08602413177490234, 0.08610304260253906, 0.08613478088378906, 0.0858818588256836, 0.08467046356201172, 0.08328396606445312, 0.08320614624023437, 0.08348467254638672, 0.0833433609008789, 0.08343142700195312, 0.08517120361328125, 0.08331775665283203, 0.0835225601196289, 0.08325939178466797, 0.0829675521850586, 0.08333618927001953, 0.0835962905883789, 0.08292864227294922, 0.08303923034667969, 0.0840273895263672, 0.08592588806152343, 0.08553369903564453, 0.08563097381591797, 0.08609180450439453, 0.08550192260742187, 0.08622489929199219, 0.08613069152832031, 0.08599961853027344, 0.08535244750976563, 0.0853544921875, 0.08618495941162109, 0.08482406616210937, 0.08524288177490234, 0.08417894744873047, 0.08835072326660157, 0.08645426940917969, 0.08594847869873047, 0.08594425964355469, 0.08639794921875, 0.0862003173828125, 0.17308876037597656, 0.0861470718383789, 0.08535763549804687, 0.08573228454589844, 0.08777318572998047, 0.0860579833984375, 0.08572621154785157, 0.086181884765625, 0.08619213104248047, 0.08773222351074218, 0.08648397064208985, 0.08551526641845703, 0.08338022613525391, 0.08344371032714844, 0.08378880310058594, 0.08320511627197266, 0.08437350463867188, 0.08477490997314453, 0.08600780487060547, 0.08639385223388672, 0.08647065734863281, 0.08623411560058594, 0.08597503662109375, 0.08415744018554687, 0.08344166564941406, 0.08589516448974609, 0.08594534301757813, 0.08557977294921874, 0.08637545776367188, 0.08613065338134765, 0.08599756622314453, 0.08644300842285156, 0.08653823852539062, 0.08349593353271484, 0.08332083129882813, 0.08336589050292968, 0.08353075408935547, 0.08340172576904296, 0.08323072052001954, 0.08343756866455078, 0.08349286651611328, 0.0832890853881836, 0.0835041275024414, 0.08353897857666015, 0.08367203521728515, 0.08369664001464844, 0.08330963134765625, 0.08333510589599609, 0.08366182708740234, 0.08354617309570313, 0.08324396514892578, 0.08303308868408203, 0.08764422607421875, 0.084233154296875, 0.08548761749267578, 0.08656588745117187, 0.08594841766357422, 0.08611126708984375, 0.08648700714111328, 0.08606719970703125, 0.08635289764404297, 0.08466329956054687, 0.08477286529541016, 0.17457868957519532, 0.08449132537841797, 0.08596371459960937, 0.08606515502929687, 0.08642355346679688, 0.08605696105957031, 0.08621673583984375, 0.08608969879150391, 0.08441241455078125, 0.08336179351806641, 0.08336179351806641, 0.08567501068115234, 0.08582860565185547, 0.083346435546875, 0.08477388763427735, 0.0841707534790039, 0.08664268493652344, 0.08617881774902343, 0.0860057601928711, 0.08586239624023438, 0.08551219177246094, 0.08600678253173828, 0.08574361419677734, 0.08587980651855469, 0.0835389404296875, 0.08575692749023438, 0.08598118591308594, 0.085970947265625, 0.08765235137939453, 0.08452095794677734, 0.08327577972412109, 0.08366387176513672, 0.08551423645019532, 0.08583475494384765, 0.08568831634521484, 0.08532889556884765, 0.0860057601928711, 0.08593408203125, 0.08623308563232422, 0.08616754913330078, 0.08630989074707031, 0.08582454681396484, 0.08615420532226563, 0.08506265258789063, 0.08292249298095702, 0.08326246643066407, 0.08425062561035156, 0.08737894439697266, 0.08324813079833984, 0.08346521759033203, 0.08319385528564453, 0.08310169219970703, 0.08324198150634765, 0.08323072052001954, 0.08317030334472657, 0.08586962890625, 0.08609273529052734, 0.08769741058349609, 0.08657305908203125, 0.08599244689941406, 0.08583679962158203, 0.08583782196044921, 0.08327680206298828, 0.17533644104003906, 0.08587776184082031, 0.08605081939697265, 0.0857548828125, 0.08574873352050781, 0.0859535369873047, 0.08613581085205078, 0.08587161254882812, 0.08592998504638671, 0.08673795318603515, 0.08629654693603515, 0.08595455932617188, 0.08510157012939454, 0.08576204681396485, 0.08615424346923828, 0.08628736114501953, 0.08378880310058594, 0.08452607727050782, 0.08607129669189453, 0.0862586898803711, 0.08617062377929688, 0.08422502136230468, 0.08376422119140625, 0.0832890853881836, 0.08342630767822265, 0.0846397476196289, 0.08346630096435546, 0.08343545532226562, 0.08319590759277344, 0.08344371032714844, 0.08367001342773438, 0.0833259506225586, 0.0832511978149414, 0.08327372741699218, 0.0835389404296875, 0.0833966064453125, 0.08333004760742188, 0.0835758056640625, 0.08591462707519532, 0.08338739013671875, 0.08385945892333985, 0.08679219055175781, 0.0835594253540039, 0.08360345458984375, 0.08362598419189453, 0.08332288360595703, 0.08334137725830078, 0.08309037017822266, 0.08337209320068359, 0.08316102600097657, 0.08313855743408204, 0.08323993682861328, 0.08327168273925781, 0.08664064025878906, 0.08615526580810547, 0.08606719970703125, 0.08610099029541016, 0.08608665466308593, 0.08620543670654297, 0.0832573471069336, 0.08329011535644532, 0.08356147003173828, 0.08407449340820312]",tokens/s,11.50335704469939,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3984.965632,12732.33408,0.0,12085.886976,11337.364992,s,10,10.92237109375,1.092237109375,0.0021117743883837157,1.0918812255859374,1.0945805541992188,1.0956890197753906,1.0965757922363282,"[1.0967974853515625, 1.094334228515625, 1.0893026123046874, 1.090765625, 1.0907347412109376, 1.09039990234375, 1.0912408447265625, 1.09275048828125, 1.0935235595703126, 1.0925216064453125]",tokens/s,234.38134247836382,kWh,1.2875690162181854e-05,7.054504655134223e-06,6.32269116926043e-05,8.315710650992037e-05,tokens/kWh,3078510.1928655975,MB,3984.965632,12732.33408,0.0,12085.886976,11686.79936,s,10,637.5050507812499,63.75050507812499,0.0049395574759148195,63.749460937500004,63.757564453125,63.759594726562504,63.7612189453125,"[63.74835546875, 63.7478046875, 63.75041796875, 63.7437265625, 63.75711328125, 63.74664453125, 63.761625, 63.75044140625, 63.74853515625, 63.75038671875]",tokens/s,0.9882274645949038,kWh,0.0007525312550531495,0.00041245463154615205,0.0037184816692274027,0.004883467555826704,tokens/kWh,12900.669305119396,,s,629,646.3186791381836,1.027533671125888,0.12973576665422035,1.0118389892578126,1.012325793457031,1.0126516357421875,2.103177431640625,"[1.0117550048828126, 1.0119987182617187, 1.0120816650390625, 1.0120765991210938, 1.0125403442382812, 1.011704833984375, 1.0115768432617187, 1.0117868041992188, 1.01175390625, 1.0114559936523437, 1.0114959106445311, 1.0115798950195312, 1.0117611694335937, 1.01186865234375, 1.0123243408203124, 1.0116085815429687, 1.01121435546875, 1.0115245971679687, 1.0116761474609375, 1.011673095703125, 1.0115952758789062, 1.0115348510742188, 1.011820556640625, 1.0118010864257811, 1.0121104125976563, 1.0117160034179689, 1.0116536254882813, 1.0114908447265625, 1.0122445068359376, 1.0119515991210937, 1.0117140502929687, 1.011957763671875, 1.0116392822265625, 1.0119588012695313, 1.0129981689453125, 1.011984375, 1.0124021606445313, 1.01186865234375, 1.0122096557617188, 1.0120274047851563, 1.0117069091796875, 1.0121103515625, 1.0121615600585938, 1.0125332641601563, 1.0121226196289062, 1.01194140625, 1.0114242553710937, 1.0118287353515625, 1.011631103515625, 1.0119935913085938, 1.0117672729492186, 1.0120878295898437, 1.0116167602539063, 1.0122476196289063, 1.0120057983398438, 1.0121021728515625, 1.011689453125, 1.0124891967773437, 1.0117969970703125, 1.0116424560546875, 1.0114559326171875, 1.0116290283203124, 2.106623046875, 1.0121912841796874, 1.012216796875, 1.0120878295898437, 1.01224755859375, 1.0123724365234374, 1.0121103515625, 1.011904541015625, 1.0120908813476563, 1.0119536743164061, 1.0122240600585937, 1.0117017211914063, 1.0118287353515625, 1.0117017822265626, 1.0116792602539062, 1.0114119873046874, 1.0114088745117187, 1.01127783203125, 1.011852294921875, 1.01174169921875, 1.011968994140625, 1.0117713623046876, 1.0116188354492188, 1.0120038452148437, 1.011915771484375, 1.0112901000976562, 1.0112747802734374, 1.0113668823242188, 1.01144677734375, 1.0113177490234375, 1.0119198608398436, 1.0120233154296876, 1.0116013793945313, 1.0115082397460937, 1.0116761474609375, 1.0115245971679687, 1.0114263305664062, 1.0112634887695313, 1.011557373046875, 1.0114109497070312, 1.0121984252929688, 1.0121513061523437, 1.01176318359375, 1.0114232177734375, 1.011726318359375, 1.011304443359375, 1.0149703979492188, 1.0115143432617189, 1.012031494140625, 1.0119803466796875, 1.0133125, 1.0118717651367188, 1.0115481567382814, 1.0116792602539062, 1.0120653076171875, 1.0115502319335938, 1.0118072509765625, 1.0117089233398437, 1.0117550048828126, 1.0120355834960937, 1.0122034912109374, 1.011757080078125, 1.0119905395507813, 2.102846435546875, 1.0113546142578125, 1.0115645141601564, 1.011766357421875, 1.0118368530273438, 1.011746826171875, 1.0118656005859374, 1.0115020751953125, 1.0114826049804688, 1.0119618530273438, 1.01157373046875, 1.0117109985351562, 1.0118194580078126, 1.0120345458984374, 1.0119188232421874, 1.0118768920898438, 1.0121174926757812, 1.0112481079101563, 1.011708984375, 1.011843017578125, 1.011999755859375, 1.0120202026367187, 1.0120222778320314, 1.0137774047851562, 1.011525634765625, 1.0120479125976563, 1.011984375, 1.0118092651367188, 1.0117805786132812, 1.0120990600585937, 1.0119854125976562, 1.01212158203125, 1.0122670288085938, 1.0120601806640626, 1.0118174438476562, 1.0118410034179688, 1.0115552978515625, 1.011240966796875, 1.011661865234375, 1.0114876708984375, 1.011441650390625, 1.01127783203125, 1.0116198120117188, 1.0113597412109374, 1.0116414184570313, 1.0120242309570313, 1.0122291259765626, 1.013486572265625, 1.0116536254882813, 1.0121564331054687, 1.0123980712890626, 1.0119321899414062, 1.012010009765625, 1.0122987670898438, 1.0121973876953125, 1.0126295166015624, 1.0126663818359376, 1.0119352416992187, 1.0116690063476563, 1.0120150756835937, 1.0120540771484374, 1.01182666015625, 1.0116351928710938, 2.10330615234375, 1.0118348999023437, 1.0117386474609376, 1.0117161254882812, 1.01167822265625, 1.0121646118164063, 1.0119137573242187, 1.01201611328125, 1.0119761962890625, 1.0122608032226563, 1.0121830444335937, 1.0119669799804687, 1.0134896850585937, 1.0120386352539064, 1.0123622436523438, 1.0113034057617187, 1.0113535766601562, 1.011219482421875, 1.0112798461914063, 1.0111006469726562, 1.0113710327148437, 1.0122311401367188, 1.0115225830078125, 1.0115430297851562, 1.0117457885742187, 1.0114498291015626, 1.0119761962890625, 1.0120970458984375, 1.0119505615234374, 1.0119619140625, 1.0121400146484374, 1.0117672729492186, 1.0114600830078124, 1.0114866943359375, 1.011557373046875, 1.011900390625, 1.0118594360351563, 1.01203662109375, 1.0116956176757812, 1.0115460815429687, 1.0117755126953125, 1.0116608276367187, 1.01161474609375, 1.0111918334960937, 1.0146948852539062, 1.01151953125, 1.011521484375, 1.0112010498046875, 1.01165771484375, 1.0114376220703125, 1.01186865234375, 1.0118800048828125, 1.0115624389648437, 1.0114273071289062, 1.011641357421875, 1.0113966064453126, 1.0116741333007813, 1.011451904296875, 1.0115655517578126, 1.011557373046875, 1.012046875, 1.0120038452148437, 1.011968017578125, 2.10362060546875, 1.0117232666015625, 1.0121318359375, 1.0118696899414064, 1.0120601806640626, 1.0119393310546876, 1.012031494140625, 1.0123212890625, 1.0119556884765626, 1.01214208984375, 1.0124891967773437, 1.0119710693359374, 1.0119178466796874, 1.0113935546875, 1.0116761474609375, 1.0115359497070313, 1.011611572265625, 1.01361767578125, 1.0116761474609375, 1.0114703369140625, 1.0116137084960937, 1.011589111328125, 1.0115932006835937, 1.0114774780273437, 1.0115348510742188, 1.0114713745117188, 1.0118184814453124, 1.0116055297851563, 1.01148876953125, 1.0116792602539062, 1.0118379516601563, 1.0119649047851562, 1.0117376708984376, 1.011568603515625, 1.0120294189453125, 1.01218505859375, 1.0119823608398437, 1.0122332153320313, 1.011926025390625, 1.0118748168945313, 1.0124400634765625, 1.01216357421875, 1.011631103515625, 1.0115266723632812, 1.0125834350585938, 1.0121605224609376, 1.0119229736328126, 1.0119270629882813, 1.01148876953125, 1.0118287353515625, 1.011999755859375, 1.011862548828125, 1.0116915283203125, 1.0118143920898437, 1.011768310546875, 1.0122393798828124, 1.0125045776367188, 1.0162032470703124, 1.0122127075195313, 1.0123765869140624, 1.0122915649414062, 1.012389892578125, 1.0124400634765625, 2.102681640625, 1.0123212280273437, 1.0120990600585937, 1.0122465209960938, 1.0117007446289064, 1.012094970703125, 1.0112553100585937, 1.0112625122070313, 1.0114201049804687, 1.0113854370117188, 1.01134228515625, 1.0119854125976562, 1.0117590942382813, 1.0113126220703126, 1.0112696533203125, 1.011483642578125, 1.0116823120117187, 1.0114539794921875, 1.0115082397460937, 1.0114959106445311, 1.0115481567382814, 1.0120242919921876, 1.0115635375976562, 1.0115850219726563, 1.0117254028320313, 1.0121840209960937, 1.0118276977539062, 1.0116751098632812, 1.0118184814453124, 1.0114385986328125, 1.0118215942382813, 1.0118092651367188, 1.0117406616210938, 1.011473388671875, 1.0117386474609376, 1.0118164672851562, 1.01148876953125, 1.0114867553710938, 1.0149037475585938, 1.0118359375, 1.0122781982421876, 1.0117642211914062, 1.0119249877929688, 1.011294189453125, 1.0117027587890626, 1.0116690063476563, 1.0123939819335936, 1.0120908813476563, 1.0122567749023437, 1.0120653076171875, 1.0122485961914063, 1.012369384765625, 1.012421630859375, 1.0119987182617187, 1.0126663818359376, 1.0115481567382814, 1.0117089233398437, 1.0119823608398437, 1.0117867431640626, 1.0115614624023437, 1.0122546997070312, 1.0117652587890624, 1.012073486328125, 2.104281005859375, 1.011789794921875, 1.0118133544921875, 1.0122199096679687, 1.01174169921875, 1.0117130126953124, 1.0126079711914062, 1.0127093505859375, 1.0125383911132813, 1.0122393798828124, 1.01216357421875, 1.0123182373046875, 1.0122199096679687, 1.0121246948242189, 1.0119669799804687, 1.0121113891601563, 1.0128097534179688, 1.01222705078125, 1.012335693359375, 1.0115757446289062, 1.0118441162109375, 1.0118328247070312, 1.011631103515625, 1.0136719360351563, 1.0121174926757812, 1.01250048828125, 1.012552734375, 1.0127390747070313, 1.0122342529296875, 1.0118225708007813, 1.0121768798828126, 1.0121298217773438, 1.0121860961914062, 1.0118389892578126, 1.012316162109375, 1.0120714111328124, 1.0119198608398436, 1.0119669799804687, 1.0115543823242188, 1.0116760864257812, 1.0120601806640626, 1.0119342041015624, 1.0120653076171875, 1.0115552978515625, 1.0121380004882812, 1.0123212890625, 1.0120806274414063, 1.0117672729492186, 1.0117805786132812, 1.0118287353515625, 1.0118615112304687, 1.0117703857421876, 1.0114365234375, 1.0116065063476563, 1.0118512573242187, 1.0121676635742187, 1.0124697875976563, 1.0122608642578126, 1.0120376586914062, 1.0119403686523438, 1.0120181884765624, 1.0120386352539064, 1.01167822265625, 2.1052109375, 1.0120621948242188, 1.01188916015625, 1.0118225708007813, 1.01174169921875, 1.012010009765625, 1.0117171020507814, 1.0117847290039061, 1.0114641723632813, 1.01161474609375, 1.0114928588867187, 1.0118482055664062, 1.0115112915039062, 1.0117078857421875, 1.0117007446289064, 1.011862548828125, 1.0131630249023438, 1.0117345581054686, 1.0117489013671874, 1.0116443481445312, 1.0115972900390624, 1.0121307983398438, 1.01167822265625, 1.0120202026367187, 1.0114754638671875, 1.012262939453125, 1.0119618530273438, 1.0120448608398438, 1.01155224609375, 1.0120181884765624, 1.0120068969726563, 1.0124298095703126, 1.0118533325195314, 1.0121185302734375, 1.0118779296875, 1.0119566650390626, 1.0118379516601563, 1.0119434204101563, 1.0118062133789063, 1.0120775756835938, 1.0119024658203124, 1.0125721435546875, 1.0118563842773438, 1.0118348999023437, 1.0117273559570312, 1.01222705078125, 1.011905517578125, 1.0115379028320313, 1.0121062622070311, 1.0116116333007812, 1.011979248046875, 1.012168701171875, 1.0120171508789062, 1.01224658203125, 1.0122731323242187, 1.0115819091796876, 1.0118994140625, 1.0117642211914062, 1.0116351928710938, 1.011962890625, 1.0119782104492188, 1.01182568359375, 1.012173828125, 2.105998291015625, 1.0114345092773438, 1.0115174560546876, 1.0114590454101562, 1.0117990112304687, 1.011937255859375, 1.0119331665039062, 1.011800048828125, 1.0117058715820313, 1.0113648681640626, 1.0117181396484376, 1.0116608276367187, 1.0114611206054687, 1.0114600830078124, 1.0114508666992188, 1.0114754638671875, 1.0120274047851563, 1.0133237915039062, 1.0123274536132814, 1.0115994262695311, 1.0119392700195313, 1.011694580078125, 1.0119700317382812, 1.0124237060546875, 1.0117488403320312, 1.0116751098632812, 1.0119721069335939, 1.011651611328125, 1.01169970703125, 1.0113812255859376, 1.0121625366210938, 1.0115175170898438, 1.0117620849609374, 1.011493896484375, 1.0118062133789063, 1.0119086303710938, 1.01222607421875, 1.0115870971679688, 1.0119854125976562, 1.0115471801757812, 1.0122720336914062, 1.0117857055664063, 1.0124462280273439, 1.0115880737304688, 1.0122782592773438, 1.0121298217773438, 1.0129663696289062, 1.011736572265625, 1.0120570678710938, 1.0118963623046875, 1.0122628784179688, 1.0121093139648438, 1.0120888061523436, 1.011789794921875, 1.0123253784179687, 1.0119198608398436, 1.0131046142578124, 1.0120990600585937, 1.0116629028320312, 1.0112337646484375, 1.0118236083984375, 1.0113085327148437, 1.0116392822265625, 2.104349609375, 1.0117294311523437, 1.0120775756835938, 1.0116546630859375, 1.01159423828125, 1.0118696899414064, 1.0115491943359376, 1.0113505249023438, 1.0112051391601562, 1.0120068969726563, 1.0118911743164063, 1.011857421875, 1.01180419921875, 1.0114385375976562, 1.011646484375, 1.011788818359375, 1.0118615112304687, 1.0118656005859374, 1.0119014282226562, 1.0118348999023437, 1.011794921875, 1.011900390625, 1.0119556884765626, 1.0122608642578126, 1.0124257202148437, 1.0121984252929688, 1.0144102172851563, 1.011820556640625, 1.0118195190429689, 1.0117386474609376, 1.0117069091796875, 1.0122393798828124, 1.011646484375, 1.0119147338867187, 1.011462158203125, 1.012073486328125, 1.0115911865234375, 1.011684326171875, 1.0119321899414062, 1.0118062133789063, 1.0121994018554688, 1.0123673706054688, 1.0117744750976563, 1.011873779296875, 1.0116044921875, 1.011989501953125, 1.0118911743164063, 1.01174169921875, 1.011662841796875, 1.0115194702148438, 1.0117089233398437, 1.0126878662109375, 1.0129653930664062, 1.0122413940429686, 1.0118389892578126, 1.0119505615234374, 1.0117611694335937, 1.01174169921875, 1.0118338623046874, 1.0117755126953125, 1.0121144409179688, 1.0123131103515626, 1.0117294311523437]",tokens/s,0.973204117879934,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3934.998528,12732.33408,0.0,12085.886976,11337.370624,s,10,10.913714233398439,1.0913714233398437,0.0019187724556758634,1.0909278564453126,1.0936456420898437,1.0942043518066407,1.0946513195800782,"[1.0947630615234376, 1.093521484375, 1.0894732666015625, 1.0895545654296874, 1.089643798828125, 1.0893114013671874, 1.0899912109375, 1.091864501953125, 1.093216552734375, 1.0923743896484375]",tokens/s,234.56725595451456,kWh,1.2859468128946092e-05,7.046563530275307e-06,6.250710556117145e-05,8.241313722039284e-05,tokens/kWh,3106300.871854854,MB,3934.998528,12732.33408,0.0,12085.886976,11686.804992,s,10,636.61697265625,63.661697265625,0.007269593222884917,63.66015234375,63.672558203125,63.6732771484375,63.6738523046875,"[63.6561171875, 63.66687109375, 63.65788671875, 63.66241796875, 63.65402734375, 63.67399609375, 63.6723984375, 63.656875, 63.65174609375, 63.66463671875]",tokens/s,0.9896060379467405,kWh,0.0007518513685133722,0.0004120810364553653,0.003630345543163038,0.004794277948131776,tokens/kWh,13140.664909624129,,s,629,645.425664550782,1.0261139341029908,0.12959104210993952,1.0104422607421875,1.0109677490234374,1.0113603271484375,2.100010478515625,"[1.0103787231445311, 1.0104750366210937, 1.010914306640625, 1.0098585815429688, 1.0104279174804687, 1.010324462890625, 1.0105446166992187, 1.010070556640625, 1.0100776977539063, 1.0105692138671876, 1.0102251586914062, 1.0103941040039062, 1.0105692138671876, 1.0099476318359375, 1.010234375, 1.0106050415039063, 1.0103367919921875, 1.0100828247070313, 1.0105968627929687, 1.0105446166992187, 1.0105169677734376, 1.0106787719726562, 1.0105886840820313, 1.0103572387695312, 1.0107811889648437, 1.010609130859375, 1.010071533203125, 1.0102784423828124, 1.0099865112304687, 1.010282470703125, 1.0107012939453126, 1.0107606811523437, 1.0109655151367187, 1.0103797607421876, 1.0103644409179688, 1.0103593139648437, 1.0102118530273438, 1.0102118530273438, 1.0101124877929688, 1.0109286499023438, 1.0103910522460937, 1.0106552124023438, 1.011262451171875, 1.0107944946289062, 1.0104873046875, 1.0106204223632813, 1.0099425048828126, 1.0105272216796874, 1.00997119140625, 1.0104494018554688, 1.0101258544921874, 1.010229248046875, 1.0103654174804688, 1.0102947998046874, 1.0099885864257812, 1.0105845947265626, 1.0102702026367187, 1.0102896728515625, 1.0102702026367187, 1.0103797607421876, 1.0103336791992188, 1.0109767456054688, 2.1042431640625, 1.0108385009765626, 1.0106071166992188, 1.0101176147460937, 1.0099619750976563, 1.01039306640625, 1.0101083984375, 1.0104063720703125, 1.0112655639648438, 1.0104002685546876, 1.0102466430664063, 1.0105128784179687, 1.0102671508789063, 1.0101801147460938, 1.0101739501953124, 1.0103828735351563, 1.0103910522460937, 1.0109398803710938, 1.0110330810546875, 1.0103602905273437, 1.0109020385742187, 1.0112982788085938, 1.010745361328125, 1.0104309692382814, 1.0102783813476564, 1.010271240234375, 1.0101309204101563, 1.0107698974609376, 1.0114027709960938, 1.01064501953125, 1.0102230834960937, 1.0105385131835938, 1.0106644287109374, 1.0104248046875, 1.0108897094726563, 1.010545654296875, 1.0105814819335937, 1.0107955322265625, 1.0110156860351562, 1.0106214599609376, 1.01065625, 1.01066650390625, 1.0106920776367188, 1.0103101196289062, 1.0108272705078125, 1.0104053955078125, 1.0105886840820313, 1.010440185546875, 1.0111314086914063, 1.010808837890625, 1.0105968627929687, 1.010450439453125, 1.0106644287109374, 1.010177001953125, 1.0106286010742187, 1.01064599609375, 1.01049853515625, 1.0107146606445312, 1.0105742797851562, 1.0107597045898438, 1.0104780883789062, 1.01089794921875, 1.0108590087890625, 2.10035302734375, 1.0105681762695313, 1.0103634033203126, 1.0108939819335938, 1.0105936279296874, 1.01033984375, 1.0102159423828125, 1.0105128784179687, 1.0100869140625, 1.0100756225585938, 1.0101299438476563, 1.0103162841796876, 1.0100142211914063, 1.010387939453125, 1.0102691650390625, 1.0100736083984374, 1.0103839111328126, 1.0102691650390625, 1.0101923828125, 1.0100633544921875, 1.0100695190429687, 1.0103623657226561, 1.0100408325195311, 1.0103367919921875, 1.0102200317382812, 1.0101944580078126, 1.0105272216796874, 1.0104760131835937, 1.010466796875, 1.0102435913085936, 1.010566162109375, 1.010461669921875, 1.0103326416015626, 1.0106859741210938, 1.0104954833984374, 1.0103705444335938, 1.0109921264648438, 1.0105057373046875, 1.0102046508789062, 1.01007666015625, 1.0101852416992188, 1.0097950439453125, 1.0098770141601563, 1.0107269287109375, 1.0103705444335938, 1.0104033203125, 1.0112020263671875, 1.0103848876953125, 1.0109419555664063, 1.0105897216796875, 1.0109766845703125, 1.010745361328125, 1.0108344116210937, 1.011294189453125, 1.0108703002929686, 1.0104647827148439, 1.0111211547851562, 1.010250732421875, 1.0104473876953124, 1.0103490600585938, 1.0106961669921875, 1.010408447265625, 1.0106736450195313, 2.09991064453125, 1.0102855834960938, 1.010808837890625, 1.0101883544921875, 1.0098963623046875, 1.0099046630859374, 1.0102015380859375, 1.0112860107421875, 1.0114365234375, 1.0114171142578126, 1.011398681640625, 1.0108375854492186, 1.011353515625, 1.0101831665039063, 1.0099578857421876, 1.0098401489257813, 1.0104351806640626, 1.0098911743164063, 1.01047705078125, 1.0109163818359375, 1.0101514282226562, 1.0102528076171875, 1.0105303344726562, 1.010460693359375, 1.0104515380859376, 1.0105640258789061, 1.0108231811523438, 1.0104279174804687, 1.0102528076171875, 1.0102149047851563, 1.0106552124023438, 1.0108037109375, 1.0104227905273437, 1.010044921875, 1.0103521118164063, 1.0104022827148438, 1.0106019897460938, 1.0105252075195312, 1.0107361450195314, 1.0110341186523437, 1.0109389038085939, 1.0102589721679687, 1.0107658081054687, 1.01032958984375, 1.0109112548828125, 1.0104422607421875, 1.0104935302734375, 1.0104278564453124, 1.0109645385742188, 1.0107493896484374, 1.01064501953125, 1.0110023803710937, 1.0106593017578125, 1.0102763671875, 1.01026611328125, 1.0103255004882812, 1.0103726196289062, 1.010171875, 1.0105169677734376, 1.0103091430664062, 1.0100910034179686, 1.0103613891601562, 1.010420654296875, 2.10003857421875, 1.0099517211914062, 1.010044921875, 1.0100490112304688, 1.010212890625, 1.0100838623046875, 1.0100510864257812, 1.0103336791992188, 1.0101473388671875, 1.0099937133789063, 1.0099507446289062, 1.0100213623046874, 1.0101913452148437, 1.010255859375, 1.0110208129882812, 1.0101217041015624, 1.0101422119140624, 1.0106214599609376, 1.0106583251953125, 1.0105067749023438, 1.0107003173828124, 1.0106572875976563, 1.0101596069335939, 1.0103726196289062, 1.0107791137695312, 1.0102958374023439, 1.0105692138671876, 1.010567138671875, 1.0101862182617187, 1.0100357055664062, 1.0101422119140624, 1.0100562133789062, 1.0101636962890626, 1.0102886352539062, 1.0103009033203125, 1.0100858764648437, 1.0102108154296876, 1.0106234741210938, 1.0103255004882812, 1.0102271728515626, 1.0103224487304687, 1.0102650756835938, 1.0104852294921876, 1.0101156005859375, 1.0108948364257813, 1.0105333862304688, 1.0106009521484376, 1.010366455078125, 1.0103654174804688, 1.0105374755859375, 1.0105303344726562, 1.010418701171875, 1.0103674926757813, 1.0106214599609376, 1.0106808471679687, 1.0101104736328126, 1.0104002685546876, 1.010713623046875, 1.010361328125, 1.0107811889648437, 1.0112348022460937, 1.0109830322265625, 1.010874267578125, 2.099938232421875, 1.0099097900390626, 1.010356201171875, 1.0107750244140625, 1.0106531982421876, 1.0112010498046875, 1.0109235229492188, 1.0112593994140624, 1.011103759765625, 1.0114088745117187, 1.0117509155273436, 1.0115809326171874, 1.0110750732421876, 1.0116761474609375, 1.0117703857421876, 1.01070849609375, 1.0111211547851562, 1.0108140869140625, 1.010822021484375, 1.0101463012695313, 1.010524169921875, 1.010092041015625, 1.0108528442382811, 1.0102384643554687, 1.0105374755859375, 1.0103961791992186, 1.0102907104492187, 1.0105763549804687, 1.0101422119140624, 1.0106972045898437, 1.010524169921875, 1.0101381225585937, 1.01081396484375, 1.0101923828125, 1.010398193359375, 1.0102046508789062, 1.0100275268554688, 1.0102159423828125, 1.01076171875, 1.0107730102539063, 1.0104595947265624, 1.0105743408203125, 1.0107811889648437, 1.0105466918945312, 1.010503662109375, 1.0107811889648437, 1.0115594482421875, 1.0113648681640626, 1.0109235229492188, 1.0113966064453126, 1.0115973510742187, 1.0105558471679688, 1.0106337280273439, 1.0101053466796874, 1.0102333374023438, 1.0100643920898438, 1.0104063720703125, 1.0102118530273438, 1.0105886840820313, 1.0109163818359375, 1.01098291015625, 1.0103121948242189, 1.0104627075195312, 2.1005556640625, 1.0100828247070313, 1.0103214111328125, 1.0101217041015624, 1.0104524536132813, 1.0106480712890624, 1.009934326171875, 1.0109644775390625, 1.01163427734375, 1.011917724609375, 1.0118707275390626, 1.0117642211914062, 1.0120970458984375, 1.0110791625976563, 1.0113136596679688, 1.0106326904296874, 1.010629638671875, 1.0110320434570312, 1.0109531860351562, 1.0102200317382812, 1.0101647338867188, 1.01035107421875, 1.0101319580078125, 1.0104688720703126, 1.0105006103515626, 1.01138330078125, 1.010892822265625, 1.0108292846679687, 1.0109214477539064, 1.0098134765625, 1.0101145629882813, 1.0102262573242187, 1.0102721557617187, 1.010165771484375, 1.0108426513671875, 1.0108047485351563, 1.010640869140625, 1.0113280029296876, 1.0108528442382811, 1.010176025390625, 1.0101810913085938, 1.0100869140625, 1.0103460083007811, 1.010440185546875, 1.0107904052734376, 1.0101801147460938, 1.0104586181640625, 1.0103050537109375, 1.0111918334960937, 1.0107914428710938, 1.0108416137695313, 1.0108283081054688, 1.0106810302734375, 1.0106591186523437, 1.0115072021484375, 1.0104279174804687, 1.0108436279296875, 1.0108283081054688, 1.01055078125, 1.0102097778320311, 1.010460693359375, 1.0103121948242189, 1.0103951416015624, 2.102578125, 1.0104268798828124, 1.0102036743164062, 1.010355224609375, 1.0103173217773438, 1.0101637573242188, 1.01056201171875, 1.0103060302734375, 1.0101749877929687, 1.0104330444335938, 1.0104094848632812, 1.0101801147460938, 1.0102282104492188, 1.0102640380859376, 1.0102354125976563, 1.01035107421875, 1.0101268310546876, 1.01020263671875, 1.0103245849609375, 1.010389892578125, 1.0105927734375, 1.0102783813476564, 1.0102159423828125, 1.0109481201171875, 1.0104248046875, 1.0102097778320311, 1.0102999267578125, 1.0106634521484374, 1.01051904296875, 1.0106911010742188, 1.0106603393554687, 1.0103428955078124, 1.010208740234375, 1.0103828735351563, 1.0103501586914063, 1.0101267700195313, 1.010428955078125, 1.0099415283203126, 1.0103070678710937, 1.010746337890625, 1.0105733032226563, 1.0100193481445312, 1.0105620727539062, 1.0108969116210937, 1.010830322265625, 1.010567138671875, 1.0105261840820312, 1.0104903564453125, 1.010502685546875, 1.010763916015625, 1.0106510009765626, 1.01055078125, 1.010597900390625, 1.010534423828125, 1.01037158203125, 1.0100613403320313, 1.0104227905273437, 1.0101196899414062, 1.0103214111328125, 1.0106286010742187, 1.0109337768554687, 1.0104739990234375, 1.0107742309570313, 2.104293212890625, 1.010323486328125, 1.0104320068359376, 1.0102138671875, 1.0099435424804688, 1.0105354614257813, 1.0101022338867187, 1.0104801025390624, 1.0105220947265625, 1.0102742919921874, 1.0100582275390626, 1.0102661743164063, 1.0102158813476563, 1.0104473876953124, 1.0102271728515626, 1.0102630615234376, 1.0100213623046874, 1.0100828247070313, 1.0104155883789063, 1.01005517578125, 1.010208740234375, 1.0101239013671874, 1.0102445068359376, 1.0100910034179686, 1.0104801025390624, 1.0102210693359375, 1.0102210693359375, 1.0105426025390625, 1.0104248046875, 1.0105814819335937, 1.0103746337890624, 1.0100991821289063, 1.0102630615234376, 1.0103203735351562, 1.0105231323242188, 1.0105108642578124, 1.0102191162109375, 1.0101605224609376, 1.0104053955078125, 1.0104586181640625, 1.0102518920898438, 1.0101390380859374, 1.0103285522460939, 1.0101493530273438, 1.0104166259765626, 1.0106941528320312, 1.0107689208984374, 1.0102313232421876, 1.0105620727539062, 1.01051708984375, 1.0104871826171875, 1.01003369140625, 1.0101371459960937, 1.0099342651367187, 1.0106521606445313, 1.0104391479492187, 1.0105620727539062, 1.010513916015625, 1.010798583984375, 1.0104074096679687, 1.0103634033203126, 1.0103224487304687, 1.010681884765625, 2.103435302734375, 1.010229248046875, 1.0106736450195313, 1.0107843017578124, 1.0107432861328125, 1.0110986328125, 1.0106265869140625, 1.010587646484375, 1.0101248168945312, 1.0100542602539062, 1.0107769775390625, 1.0108426513671875, 1.0104627075195312, 1.0106603393554687, 1.0108734741210939, 1.01037255859375, 1.0107083740234375, 1.0104166259765626, 1.0103275756835937, 1.0101319580078125, 1.0107750244140625, 1.0107258911132813, 1.0107606811523437, 1.0102907104492187, 1.0104279174804687, 1.0105446166992187, 1.0105364379882813, 1.0103009643554688, 1.010494384765625, 1.0100807495117188, 1.0106644287109374, 1.0108026733398439, 1.0108375244140626, 1.010492431640625, 1.0103746337890624, 1.0101227416992187, 1.0101484375, 1.0099373168945311, 1.0103756713867187, 1.0104944458007812, 1.0109050903320314, 1.0101473388671875, 1.0104903564453125, 1.0099251098632813, 1.0105374755859375, 1.010134033203125, 1.0101810913085938, 1.0101248168945312, 1.0104063720703125, 1.0102702026367187, 1.0105569458007813, 1.0107965698242187, 1.01083544921875, 1.0107893676757813, 1.01082421875, 1.0101309204101563, 1.0103572387695312, 1.010207763671875, 1.010361328125, 1.0107053833007813, 1.0118082275390625, 1.01166796875, 1.01172021484375]",tokens/s,0.9745506485828798,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa523-62c6c43d32f792b12bc008c6;20fcdfd6-cd19-4baa-b381-01182c8f2baa) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2013.822976,5480.382464,0.0,4833.93536,4503.282688,s,10,5.711301574707031,0.5711301574707031,0.0015604684158887843,0.5710088500976562,0.5724867248535156,0.573356430053711,0.5740521942138671,"[0.5715228271484375, 0.5742261352539062, 0.5684718017578125, 0.5703966064453125, 0.570494873046875, 0.5693699340820313, 0.5704375610351563, 0.5720609130859375, 0.57229345703125, 0.5720274658203125]",tokens/s,448.2340787846278,kWh,6.721677934681928e-06,3.683185084507083e-06,3.147118258432934e-05,4.187604560351835e-05,tokens/kWh,6113280.189438215,MB,2014.928896,5480.382464,0.0,4833.93536,4688.699392,s,10,334.74296874999993,33.474296875,0.0037315247196552594,33.473017578124995,33.479158984375,33.4801732421875,33.4809846484375,"[33.47309765625, 33.47263671875, 33.4715390625, 33.4729375, 33.47534765625, 33.47058984375, 33.47893359375, 33.4811875, 33.4778203125, 33.46887890625]",tokens/s,1.8820410249468458,kWh,0.00039524016447641235,0.00021662581418956808,0.0018242358853134645,0.002436101863979445,tokens/kWh,25860.987560301615,,s,629,339.3508396606445,0.5395084891266209,0.06790528508400501,0.531294189453125,0.5317521484375,0.5319190551757812,1.1017576318359374,"[0.5316085815429688, 0.5315625, 0.5310873413085937, 0.5312276611328125, 0.5310341186523437, 0.5311795043945312, 0.5312860107421875, 0.53180517578125, 0.5313863525390625, 0.5315205078125, 0.5309071655273437, 0.5309808349609375, 0.5307955322265625, 0.5312429809570313, 0.530924560546875, 0.5313556518554687, 0.5308876953125, 0.531230712890625, 0.5310802001953125, 0.5317345581054688, 0.5312921752929688, 0.5313382568359375, 0.5308221435546875, 0.5310607299804687, 0.5308549194335938, 0.5315635375976563, 0.5311006469726562, 0.5312788696289062, 0.5312388916015625, 0.5312849731445313, 0.5308907470703125, 0.5311416015625, 0.5308528442382813, 0.5311743774414063, 0.531367919921875, 0.5315932006835937, 0.5311682739257813, 0.5317539672851562, 0.5317908325195313, 0.531473388671875, 0.53108837890625, 0.53146728515625, 0.531220458984375, 0.5315338134765625, 0.5310525512695312, 0.531900390625, 0.5317529296875, 0.5317069091796875, 0.53103515625, 0.53153076171875, 0.5311344604492187, 0.5311918334960938, 0.5308630981445313, 0.5311641845703124, 0.5309531860351563, 0.531577880859375, 0.5316065063476563, 0.5314017333984375, 0.5316864013671875, 0.5318963012695312, 0.5315604248046875, 0.53161279296875, 1.1062353515625, 0.5315419921875, 0.5316771850585937, 0.5312737426757812, 0.5311528930664062, 0.5314109497070313, 0.5312112426757812, 0.5310986328125, 0.5311057739257813, 0.53089892578125, 0.5317980346679687, 0.531167236328125, 0.53102490234375, 0.5308078002929687, 0.5314508666992187, 0.5309224853515625, 0.5310689086914062, 0.5308067626953125, 0.5315430297851562, 0.5309972534179688, 0.5317294311523437, 0.5312696533203125, 0.5314498291015625, 0.5310535888671875, 0.5314406127929687, 0.5309706420898438, 0.5315850219726562, 0.5310156860351563, 0.5313699951171875, 0.5310637817382813, 0.5316474609375, 0.5311948852539062, 0.53110888671875, 0.53115185546875, 0.531330078125, 0.5310965576171875, 0.5311754150390625, 0.5309706420898438, 0.5319178466796874, 0.532337646484375, 0.5315972900390625, 0.531103759765625, 0.5311692504882812, 0.5309296875, 0.5313453979492188, 0.53089794921875, 0.532917236328125, 0.5309666137695312, 0.5310463256835938, 0.5309081420898437, 0.5315809326171875, 0.5311968994140625, 0.5312440185546875, 0.5310320434570313, 0.5312368774414062, 0.5308692626953125, 0.5317509155273438, 0.5314529418945313, 0.5314232177734375, 0.5311610717773437, 0.531356689453125, 0.5314232177734375, 0.5313045043945313, 1.1011102294921875, 0.5311477661132813, 0.5314263305664062, 0.531145751953125, 0.5313720092773437, 0.5311477661132813, 0.5315952758789062, 0.5309235229492187, 0.5311928100585938, 0.531162109375, 0.5311211547851562, 0.53140478515625, 0.5317939453125, 0.53121435546875, 0.5318656005859375, 0.531483642578125, 0.5315000610351562, 0.5312716674804687, 0.5315645141601563, 0.531324951171875, 0.5316557006835938, 0.5313607788085938, 0.5315901489257813, 0.5309573364257812, 0.5312061157226563, 0.5308528442382813, 0.5311078491210938, 0.5310279541015624, 0.5312245483398438, 0.5308385009765625, 0.5313310546875, 0.53131982421875, 0.5318225708007812, 0.5312051391601562, 0.5311702880859375, 0.5310709838867187, 0.5313167114257813, 0.5311600341796875, 0.5312184448242188, 0.531252197265625, 0.5313607788085938, 0.5311314086914063, 0.5312163696289063, 0.5310596923828125, 0.531451904296875, 0.5310115966796874, 0.5313280029296875, 0.53153076171875, 0.5314334716796875, 0.5314508666992187, 0.5315317993164063, 0.5316935424804687, 0.5310279541015624, 0.5312266235351563, 0.5312102661132813, 0.531051513671875, 0.5313505249023438, 0.5309439697265625, 0.53108837890625, 0.5311426391601562, 0.5314908447265625, 0.5311590576171875, 0.5316188354492187, 1.1020093994140625, 0.5310126342773438, 0.5312286987304687, 0.5310289916992188, 0.531694580078125, 0.5315460815429688, 0.5315625, 0.5309337768554687, 0.5313526000976563, 0.5313218383789062, 0.5313914794921875, 0.53136279296875, 0.5317498779296875, 0.5313587036132813, 0.5314078979492187, 0.531162109375, 0.5312286987304687, 0.531251220703125, 0.5314600830078124, 0.5311160278320313, 0.53119384765625, 0.5310310668945313, 0.5316546630859375, 0.531409912109375, 0.531567626953125, 0.5313894653320312, 0.5315020751953125, 0.531135498046875, 0.5310525512695312, 0.5308528442382813, 0.5313935546875, 0.5313812255859375, 0.5312890625, 0.530966552734375, 0.531019775390625, 0.5310494995117188, 0.5313024291992188, 0.5313894653320312, 0.531294189453125, 0.5311016845703125, 0.5310525512695312, 0.5311856689453125, 0.531736572265625, 0.531641357421875, 0.5321881713867187, 0.5313126220703125, 0.5313751220703125, 0.5310965576171875, 0.531794921875, 0.5309716186523438, 0.5311610717773437, 0.5310167236328125, 0.5313290405273438, 0.5308538818359375, 0.5310453491210938, 0.5313873901367188, 0.5312819213867187, 0.5309849853515625, 0.531398681640625, 0.5310084838867187, 0.5321144409179688, 0.531578857421875, 0.5316700439453125, 1.10214453125, 0.530830322265625, 0.5315020751953125, 0.5308692626953125, 0.5310894165039063, 0.5316024169921875, 0.5316566772460938, 0.5311129760742187, 0.5313003540039063, 0.5309224853515625, 0.5312870483398437, 0.5310771484375, 0.5316566772460938, 0.5308528442382813, 0.5312696533203125, 0.5311334228515625, 0.5315010375976562, 0.5314529418945313, 0.5316433715820312, 0.5310289916992188, 0.5314703369140625, 0.5313914794921875, 0.5314805908203125, 0.5312102661132813, 0.5313157348632812, 0.5312808837890625, 0.5316044921875, 0.5312051391601562, 0.5314078979492187, 0.5316720581054688, 0.5313822631835937, 0.5311006469726562, 0.53110888671875, 0.5308703002929688, 0.5316167602539063, 0.5314221801757812, 0.531800048828125, 0.5311160278320313, 0.5314561157226563, 0.531411865234375, 0.5317734375, 0.5317805786132812, 0.5310750732421875, 0.5310975952148438, 0.53131982421875, 0.53121435546875, 0.531240966796875, 0.530924560546875, 0.5315112915039063, 0.5309634399414063, 0.53119384765625, 0.5311242065429688, 0.5314119873046875, 0.5314744262695312, 0.5316505737304688, 0.5320745239257813, 0.531435546875, 0.53260595703125, 0.5313802490234375, 0.5314990234375, 0.531493896484375, 0.531061767578125, 0.5313065185546875, 1.1010294189453125, 0.5310013427734375, 0.53124609375, 0.5309849853515625, 0.5314959106445313, 0.5311447143554687, 0.531198974609375, 0.531135498046875, 0.5317611694335938, 0.5310453491210938, 0.5312368774414062, 0.5310105590820312, 0.5315205688476563, 0.5312000122070313, 0.531442626953125, 0.5309010009765625, 0.53104638671875, 0.5309081420898437, 0.5313760986328125, 0.5308211059570312, 0.5312214965820312, 0.5310167236328125, 0.5315963134765626, 0.531188720703125, 0.5312471313476562, 0.5308661499023437, 0.5318123779296875, 0.5314027709960938, 0.5315277099609375, 0.5311959228515625, 0.5315491943359375, 0.5311867065429687, 0.5311385498046876, 0.5308538818359375, 0.5313402709960937, 0.5308897094726562, 0.53110888671875, 0.5310873413085937, 0.532853759765625, 0.5313351440429688, 0.531751953125, 0.5309685668945312, 0.5313668823242188, 0.5309788208007813, 0.5314406127929687, 0.5319198608398438, 0.5315972900390625, 0.5316341552734375, 0.531399658203125, 0.5313290405273438, 0.5312604370117188, 0.531052490234375, 0.5311856689453125, 0.5311262817382812, 0.5311273193359375, 0.5309450073242188, 0.5313003540039063, 0.5310310668945313, 0.531162109375, 0.5316484985351563, 0.5313546142578125, 0.531051513671875, 0.5314641723632813, 1.10335595703125, 0.5312788696289062, 0.5317376098632812, 0.5317816162109374, 0.5310443725585937, 0.5310945434570312, 0.531515380859375, 0.5312010498046875, 0.5312890625, 0.5315604248046875, 0.5310709838867187, 0.5312757568359375, 0.5313177490234375, 0.5313668823242188, 0.5315430297851562, 0.5313382568359375, 0.531462158203125, 0.5310822143554688, 0.5318031616210938, 0.5317324829101563, 0.5316137084960938, 0.5309522094726562, 0.5314713745117188, 0.531346435546875, 0.5311826171875, 0.5308856201171875, 0.5316904907226563, 0.5310525512695312, 0.5313648681640625, 0.5309481201171875, 0.5315020751953125, 0.5309798583984375, 0.5310975952148438, 0.5311068115234375, 0.53123583984375, 0.5308887329101563, 0.5311610717773437, 0.5312593994140625, 0.5315113525390625, 0.5320181274414062, 0.53134130859375, 0.5311498413085938, 0.5313812255859375, 0.5318276977539063, 0.5313341674804688, 0.5315061645507813, 0.5313771362304688, 0.5312665405273438, 0.53157373046875, 0.5311734008789063, 0.5318348999023438, 0.5309931640625, 0.5319618530273438, 0.5310545654296875, 0.531431396484375, 0.5314805908203125, 0.5322670288085938, 0.5313526000976563, 0.5318143920898437, 0.5314058227539062, 0.5316392822265625, 0.5314242553710937, 0.5316956176757812, 1.1039549560546875, 0.5310596923828125, 0.531430419921875, 0.5309603881835937, 0.5313013916015625, 0.5310545654296875, 0.5314334716796875, 0.5311129760742187, 0.5314866943359375, 0.5309327392578125, 0.5313535766601563, 0.5317621459960937, 0.5320017700195312, 0.5313474731445312, 0.5321390380859375, 0.5313034057617188, 0.5315491943359375, 0.5315614624023437, 0.5321195678710937, 0.5317386474609375, 0.531430419921875, 0.53125732421875, 0.5315266723632812, 0.5314437255859376, 0.5318656005859375, 0.5314652099609375, 0.5318246459960938, 0.5309419555664062, 0.5313320922851562, 0.5310873413085937, 0.5319567260742187, 0.5315020751953125, 0.5317171020507813, 0.5314866943359375, 0.531135498046875, 0.5316986694335938, 0.5315419921875, 0.531198974609375, 0.5315286865234375, 0.5312481079101562, 0.53157373046875, 0.5313710327148438, 0.531556396484375, 0.5311170043945312, 0.5320908813476563, 0.5312860107421875, 0.5311273193359375, 0.53108837890625, 0.5317069091796875, 0.5314140014648437, 0.5318666381835937, 0.5313013916015625, 0.5315419921875, 0.5312225341796875, 0.531567626953125, 0.5311692504882812, 0.53146728515625, 0.5312901000976562, 0.5316116333007812, 0.5311057739257813, 0.5314898071289063, 0.5310587158203125, 0.5314539794921875, 1.1042969970703125, 0.5312860107421875, 0.5315072021484375, 0.5316034545898437, 0.5322137451171876, 0.5317847290039063, 0.5316690063476562, 0.5311959228515625, 0.531631103515625, 0.5317171020507813, 0.531820556640625, 0.5314273071289063, 0.5316249389648438, 0.5310904541015625, 0.531863525390625, 0.5312501831054688, 0.53151025390625, 0.531252197265625, 0.5320970458984375, 0.5310699462890625, 0.5312184448242188, 0.5309183959960937, 0.5319987182617187, 0.531577880859375, 0.5319649047851562, 0.5313546142578125, 0.53127783203125, 0.5312000122070313, 0.5314058227539062, 0.5309552612304688, 0.5314765014648437, 0.5311477661132813, 0.5315399780273438, 0.5311395874023438, 0.5313935546875, 0.5317590942382813, 0.531430419921875, 0.5314949340820313, 0.53096240234375, 0.5309450073242188, 0.5312819213867187, 0.5316137084960938, 0.5311641845703124, 0.5311508178710938, 0.5317099609375, 0.531009521484375, 0.5310576782226563, 0.530977783203125, 0.5313966064453125, 0.531272705078125, 0.531178466796875, 0.53085693359375, 0.5311764526367188, 0.5310115966796874, 0.531294189453125, 0.5309757690429687, 0.5316322021484375, 0.5310668334960937, 0.531282958984375, 0.531072021484375, 0.5319813232421875, 0.5315072021484375, 0.5314345092773437, 1.1038515625, 0.5315747680664062, 0.5315563354492188, 0.5314283447265625, 0.5311314086914063, 0.5308887329101563, 0.5313218383789062, 0.5309593505859375, 0.5309398803710937, 0.5308262329101563, 0.5310136108398438, 0.5309296875, 0.5314590454101562, 0.5310699462890625, 0.5316587524414063, 0.5311928100585938, 0.5312696533203125, 0.5310955810546875, 0.5310802001953125, 0.5312481079101562, 0.5316751098632813, 0.5312921752929688, 0.5321625366210937, 0.531294189453125, 0.5315594482421875, 0.5310873413085937, 0.5322465209960937, 0.5310218505859375, 0.5312512817382813, 0.5308609619140625, 0.531178466796875, 0.5309429931640625, 0.5310105590820312, 0.5310392456054688, 0.5313372192382813, 0.5314283447265625, 0.5313710327148438, 0.5309235229492187, 0.5312706298828125, 0.5308815307617187, 0.5310955810546875, 0.5310003051757812, 0.5311764526367188, 0.5315164184570312, 0.53134130859375, 0.5313402709960937, 0.5311385498046876, 0.5311682739257813, 0.53187890625, 0.5310679321289062, 0.5314652099609375, 0.5309286499023438, 0.5317437744140625, 0.5314334716796875, 0.5315911865234375, 0.5309706420898438, 0.5311815795898438, 0.5312235717773437, 0.5312030639648437, 0.5309204711914063, 0.5312102661132813, 0.5308764038085938, 0.5311703491210937]",tokens/s,1.853538952869569,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3149.217792,5128.060928,0.0,4481.613824,4276.256768,s,10,3.149311706542969,0.3149311706542969,0.0015717060185626956,0.31493319702148437,0.3165461700439453,0.3169147476196289,0.31720960968017575,"[0.3172833251953125, 0.3164642639160156, 0.3132740478515625, 0.3144434814453125, 0.31399490356445314, 0.3119559020996094, 0.3140972900390625, 0.31542291259765626, 0.3162637023925781, 0.31611187744140623]",tokens/s,812.8760308741042,kWh,3.694201881686847e-06,2.024221956560268e-06,1.6665230346062446e-05,2.238365418430956e-05,tokens/kWh,11436917.220578322,MB,3149.217792,5128.060928,0.0,4481.613824,4465.661952,s,10,184.204947265625,18.420494726562502,0.008042192397515377,18.4165078125,18.4298185546875,18.432060644531248,18.433854316406247,"[18.41758203125, 18.4293203125, 18.415013671875, 18.434302734375, 18.41543359375, 18.427755859375, 18.427966796875, 18.41258203125, 18.409763671875, 18.4152265625]",tokens/s,3.420103582188457,kWh,0.0002180770655886995,0.00011952181134027568,0.0009648250270677349,0.00130242390399671,tokens/kWh,48371.3480739057,,s,629,186.7401061401367,0.2968841115105512,0.03741416455160097,0.29225473022460935,0.29295676879882815,0.29340855712890623,0.6063264038085938,"[0.29236837768554685, 0.29334426879882813, 0.29221682739257815, 0.29205914306640623, 0.2920222778320313, 0.2922946472167969, 0.2924492797851562, 0.2926929931640625, 0.29193624877929686, 0.2920509338378906, 0.29204376220703127, 0.2921123962402344, 0.291873779296875, 0.2915205078125, 0.29172222900390626, 0.29199871826171875, 0.2923089904785156, 0.2930401306152344, 0.2921902160644531, 0.29266842651367186, 0.29187994384765625, 0.2924564514160156, 0.2924277648925781, 0.2920263671875, 0.29210009765625, 0.2922608642578125, 0.2919096374511719, 0.29193624877929686, 0.29192294311523437, 0.29198541259765626, 0.29212261962890623, 0.29261724853515625, 0.2921973571777344, 0.29344256591796875, 0.2920785827636719, 0.2922250366210937, 0.29242059326171876, 0.2925332336425781, 0.2925537414550781, 0.29240524291992187, 0.2921902160644531, 0.2919741516113281, 0.29227825927734374, 0.2922506103515625, 0.2918768615722656, 0.29220248413085936, 0.29238067626953124, 0.29187481689453126, 0.29204788208007815, 0.2923376770019531, 0.2920396728515625, 0.29325619506835937, 0.2925700988769531, 0.29266226196289064, 0.29220452880859377, 0.2922823791503906, 0.2919710693359375, 0.29259878540039064, 0.2924472351074219, 0.29214208984375, 0.2923765869140625, 0.2920519714355469, 0.6092022705078125, 0.29223724365234377, 0.2937958374023438, 0.29247589111328126, 0.29319781494140623, 0.2925875244140625, 0.2924288024902344, 0.29230181884765627, 0.29243902587890624, 0.2923919372558594, 0.2927564697265625, 0.29259161376953124, 0.29243084716796874, 0.29296743774414064, 0.2923427734375, 0.29186456298828123, 0.2922250366210937, 0.29213082885742186, 0.29205810546875, 0.2923417663574219, 0.2929213562011719, 0.2921850891113281, 0.2936012878417969, 0.29257931518554686, 0.29359002685546876, 0.29242059326171876, 0.2921574401855469, 0.2920642700195312, 0.2926080017089844, 0.29214617919921876, 0.2923816833496094, 0.29216973876953123, 0.29204684448242185, 0.2921922607421875, 0.2923284606933594, 0.2922137451171875, 0.2923929748535156, 0.292546630859375, 0.2926417236328125, 0.2925137939453125, 0.2924062805175781, 0.2922659912109375, 0.29193319702148435, 0.29348370361328124, 0.29205792236328126, 0.2921850891113281, 0.2963548278808594, 0.292305908203125, 0.2925137939453125, 0.2923642883300781, 0.2921707458496094, 0.2921430969238281, 0.2926448669433594, 0.2927411193847656, 0.29220761108398435, 0.292232177734375, 0.29215130615234375, 0.2934067077636719, 0.29257626342773435, 0.29231924438476564, 0.2922465209960938, 0.2922557373046875, 0.2923213195800781, 0.6058434448242187, 0.2923243408203125, 0.29236737060546875, 0.2920919189453125, 0.2922966918945312, 0.2922536926269531, 0.2924298095703125, 0.29252813720703125, 0.29234893798828127, 0.2925455322265625, 0.2922403869628906, 0.29219635009765627, 0.2920130615234375, 0.29205810546875, 0.2921932678222656, 0.2922680358886719, 0.2919772033691406, 0.29201919555664063, 0.29235302734375, 0.2921318359375, 0.29199154663085936, 0.2918656005859375, 0.29201611328125, 0.294361083984375, 0.2921062316894531, 0.29225473022460935, 0.2920929260253906, 0.2933616638183594, 0.29293466186523437, 0.29222708129882813, 0.292073486328125, 0.29259878540039064, 0.2922178649902344, 0.29183078002929685, 0.29233355712890624, 0.2924349365234375, 0.291989501953125, 0.29248818969726564, 0.29223526000976563, 0.29176217651367187, 0.2918082580566406, 0.2931097717285156, 0.29253836059570315, 0.29301043701171875, 0.2920130615234375, 0.2920407409667969, 0.2919710388183594, 0.29457406616210935, 0.2923519897460938, 0.2923028564453125, 0.2920929260253906, 0.2918901672363281, 0.29224755859375, 0.2919526672363281, 0.291937255859375, 0.29211032104492185, 0.29193011474609376, 0.29155224609375, 0.2918410339355469, 0.2920980529785156, 0.292168701171875, 0.29260595703125, 0.292453369140625, 0.6075504760742187, 0.2925127563476563, 0.2923765869140625, 0.29210931396484374, 0.29227825927734374, 0.2921656188964844, 0.29187890625, 0.29214105224609377, 0.29254144287109374, 0.29202023315429687, 0.29206732177734374, 0.293718017578125, 0.2936227722167969, 0.29316915893554685, 0.29292236328125, 0.29260595703125, 0.292431884765625, 0.2927585144042969, 0.2929725341796875, 0.29310772705078125, 0.29271142578125, 0.2924615783691406, 0.29305242919921876, 0.29276776123046877, 0.2925915832519531, 0.29245440673828127, 0.29250765991210936, 0.2925107116699219, 0.29247589111328126, 0.29257830810546875, 0.29249331665039063, 0.2926243896484375, 0.2935541687011719, 0.2926243896484375, 0.29202532958984373, 0.2919198608398437, 0.292021240234375, 0.29210418701171875, 0.2919884948730469, 0.2921758728027344, 0.2921983947753906, 0.292052978515625, 0.2918523254394531, 0.2925229797363281, 0.2951219177246094, 0.2927698059082031, 0.2923653259277344, 0.29271551513671873, 0.2925813903808594, 0.29277389526367187, 0.2923816833496094, 0.2925250549316406, 0.2931272277832031, 0.2930205993652344, 0.29253631591796875, 0.2923786315917969, 0.2930882568359375, 0.2928087158203125, 0.29209906005859376, 0.2928845520019531, 0.29307998657226564, 0.2928609313964844, 0.2925823974609375, 0.6065142211914063, 0.2926080322265625, 0.29234066772460937, 0.29249432373046874, 0.2920980529785156, 0.2918707275390625, 0.2923858032226562, 0.2918133850097656, 0.2919024963378906, 0.2923048706054687, 0.29231924438476564, 0.29207757568359377, 0.29218612670898436, 0.29211032104492185, 0.29206936645507814, 0.29310873413085936, 0.2925721740722656, 0.2921850891113281, 0.292738037109375, 0.2921891784667969, 0.2920550537109375, 0.2918143920898438, 0.2921983947753906, 0.29208370971679687, 0.29210214233398435, 0.29210214233398435, 0.2925189208984375, 0.2919208984375, 0.29203353881835936, 0.2927615966796875, 0.291736572265625, 0.29209701538085936, 0.2920550537109375, 0.2918656005859375, 0.2918370056152344, 0.2924051818847656, 0.2920263671875, 0.2931199951171875, 0.29166488647460936, 0.29208370971679687, 0.2922158203125, 0.2921277465820312, 0.2917294006347656, 0.29210012817382813, 0.29211541748046876, 0.29188607788085935, 0.29205810546875, 0.29206732177734374, 0.29190142822265625, 0.2926776428222656, 0.29242471313476565, 0.2921656188964844, 0.29246054077148437, 0.2923315124511719, 0.29213287353515627, 0.2920325012207031, 0.2924195861816406, 0.29780581665039063, 0.29234072875976563, 0.2921574401855469, 0.29292031860351564, 0.2927329406738281, 0.2922823791503906, 0.6048440551757812, 0.29242266845703124, 0.29194342041015625, 0.2922823791503906, 0.29246875, 0.2921922607421875, 0.2924472351074219, 0.2927779846191406, 0.29216461181640624, 0.29211953735351565, 0.29237759399414065, 0.29223526000976563, 0.2919372863769531, 0.2918973388671875, 0.2923294982910156, 0.2918020935058594, 0.29216152954101565, 0.2923212890625, 0.29266021728515623, 0.2927503356933594, 0.2927216491699219, 0.29210418701171875, 0.2921973876953125, 0.29242471313476565, 0.29217279052734374, 0.29210931396484374, 0.2921912231445313, 0.292021240234375, 0.29214617919921876, 0.29203866577148435, 0.29192190551757813, 0.2918973388671875, 0.29224346923828126, 0.291884033203125, 0.291857421875, 0.29202532958984373, 0.2921379699707031, 0.2925025329589844, 0.2959308776855469, 0.2924974060058594, 0.2935582580566406, 0.2933534851074219, 0.2929541015625, 0.29257931518554686, 0.2927984619140625, 0.29274725341796876, 0.29252197265625, 0.29275750732421874, 0.2934097900390625, 0.29243289184570315, 0.2923284606933594, 0.2925189208984375, 0.2929377136230469, 0.29240524291992187, 0.2938306579589844, 0.29293670654296877, 0.29282302856445314, 0.29310772705078125, 0.2923100280761719, 0.2923991088867188, 0.29276568603515624, 0.2923294677734375, 0.2927698059082031, 0.6069278564453126, 0.2922486267089844, 0.29226181030273435, 0.29200384521484374, 0.29218099975585937, 0.2922465209960938, 0.29204376220703127, 0.2927329406738281, 0.2925977478027344, 0.292210693359375, 0.29327462768554685, 0.2930093994140625, 0.29313638305664064, 0.29303192138671874, 0.292389892578125, 0.29288653564453127, 0.29291519165039065, 0.29289266967773436, 0.2928721923828125, 0.292701171875, 0.29249331665039063, 0.29188607788085935, 0.29253631591796875, 0.29483929443359375, 0.2933084106445312, 0.29292849731445314, 0.2923530883789062, 0.29386029052734375, 0.29232742309570314, 0.2925465698242187, 0.29262130737304687, 0.292274169921875, 0.2924564514160156, 0.292242431640625, 0.2929407958984375, 0.2920704040527344, 0.2924810791015625, 0.292454345703125, 0.2922188720703125, 0.29238885498046874, 0.29204888916015626, 0.2919598083496094, 0.2920232849121094, 0.2920048522949219, 0.29257220458984373, 0.29197512817382815, 0.2930882568359375, 0.29200384521484374, 0.292384765625, 0.2927626342773437, 0.2920519714355469, 0.2919045104980469, 0.2923561096191406, 0.29254348754882814, 0.2925537414550781, 0.2922148132324219, 0.292211669921875, 0.2919024658203125, 0.29274008178710936, 0.29211032104492185, 0.29219430541992186, 0.2925619201660156, 0.2921574401855469, 0.6078638305664062, 0.2920642700195312, 0.29204171752929686, 0.29206219482421875, 0.2920550537109375, 0.2921185302734375, 0.29250149536132813, 0.29204888916015626, 0.29186456298828123, 0.2920867919921875, 0.29199871826171875, 0.29200689697265625, 0.29195059204101564, 0.2919956359863281, 0.2926612548828125, 0.291999755859375, 0.29500723266601564, 0.2922413940429687, 0.2919342041015625, 0.2922650146484375, 0.2921286926269531, 0.29177548217773436, 0.2920263671875, 0.29223629760742187, 0.2919342041015625, 0.2920151062011719, 0.29193624877929686, 0.29171505737304687, 0.29255679321289063, 0.29226290893554685, 0.29204888916015626, 0.29261932373046873, 0.2921389465332031, 0.2924472351074219, 0.292569091796875, 0.29272988891601565, 0.2924666442871094, 0.29255474853515623, 0.2924288024902344, 0.29235711669921877, 0.29266842651367186, 0.2926305236816406, 0.292279296875, 0.29281585693359374, 0.29269195556640626, 0.2922127380371094, 0.2923765869140625, 0.29226190185546874, 0.29332275390625, 0.29225677490234375, 0.2925455322265625, 0.29204684448242185, 0.2919465026855469, 0.29239501953125, 0.292094970703125, 0.2916546630859375, 0.29185330200195314, 0.2919178161621094, 0.29201202392578124, 0.2919045104980469, 0.2917683715820312, 0.29191879272460936, 0.29268173217773436, 0.609132568359375, 0.2924984436035156, 0.2927001647949219, 0.29203353881835936, 0.2919598083496094, 0.29245440673828127, 0.29231411743164065, 0.2920304565429688, 0.29216973876953123, 0.2918604736328125, 0.2920816650390625, 0.29198541259765626, 0.29182769775390627, 0.29221478271484375, 0.29226290893554685, 0.29183078002929685, 0.2922137451171875, 0.2939207763671875, 0.29266226196289064, 0.292632568359375, 0.2929541015625, 0.29197210693359377, 0.2920867919921875, 0.29224346923828126, 0.2920704040527344, 0.292126708984375, 0.2921769104003906, 0.2920243225097656, 0.292173828125, 0.2919126892089844, 0.2919823303222656, 0.2920867919921875, 0.2925608825683594, 0.29213287353515627, 0.29228546142578127, 0.29201608276367186, 0.2929305725097656, 0.2924963989257813, 0.2920181884765625, 0.2923519897460938, 0.29213082885742186, 0.2921359252929687, 0.29183795166015625, 0.29199258422851565, 0.2920724487304687, 0.29192807006835936, 0.292031494140625, 0.2920294494628906, 0.292173828125, 0.2920765380859375, 0.29200689697265625, 0.2918758544921875, 0.2921349182128906, 0.29268896484375, 0.2934025573730469, 0.292505615234375, 0.2919270324707031, 0.29225164794921876, 0.292105224609375, 0.29204071044921875, 0.2915840148925781, 0.2917908630371094, 0.2920570983886719, 0.6085621948242188, 0.2921451416015625, 0.29239706420898437, 0.29239501953125, 0.29228955078125, 0.2920243225097656, 0.2924892272949219, 0.2920345458984375, 0.2921123962402344, 0.291962890625, 0.2920796203613281, 0.29241549682617185, 0.29221682739257815, 0.2918553466796875, 0.291915771484375, 0.29186663818359376, 0.2920427551269531, 0.2920980529785156, 0.29218817138671876, 0.29189529418945315, 0.29189837646484373, 0.29198028564453127, 0.29211953735351565, 0.29269195556640626, 0.2930841674804687, 0.2925148010253906, 0.29509222412109376, 0.29249127197265623, 0.2924369812011719, 0.29232537841796874, 0.2920162048339844, 0.29203448486328126, 0.29213900756835937, 0.2920243225097656, 0.29217279052734374, 0.2922342529296875, 0.2917130126953125, 0.2923991088867188, 0.29214718627929687, 0.2926458740234375, 0.2934947814941406, 0.2932357177734375, 0.2925066223144531, 0.2929162292480469, 0.29218817138671876, 0.2923735046386719, 0.2922332153320312, 0.2920325012207031, 0.2919761962890625, 0.29228543090820314, 0.29216461181640624, 0.29214718627929687, 0.29235302734375, 0.292274169921875, 0.2920376281738281, 0.29199154663085936, 0.2923724670410156, 0.2924646301269531, 0.29217279052734374, 0.29284454345703126, 0.29187277221679686, 0.2919321594238281, 0.29226495361328125]",tokens/s,3.368317674233167,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1869.656064,3326.60736,0.0,2682.257408,2578.238464,s,10,1.3870304412841796,0.13870304412841797,0.00244373759901306,0.13811017608642578,0.14131588592529296,0.14323313064575197,0.14476692642211916,"[0.14515037536621095, 0.13752809143066405, 0.1378865966796875, 0.13688485717773438, 0.13833375549316407, 0.13642874145507813, 0.13711347961425782, 0.13835682678222655, 0.1384578857421875, 0.14088983154296875]",tokens/s,1845.6696578554026,kWh,1.6179650182774805e-06,8.864855210497944e-07,6.8487079142743225e-06,9.353158453601597e-06,tokens/kWh,27370433.342912387,MB,1869.656064,3328.704512,0.0,2682.257408,2667.0976,s,10,82.21512695312498,8.2215126953125,0.02726168309176718,8.22408544921875,8.254994140625,8.26155078125,8.26679609375,"[8.218158203125, 8.2300126953125, 8.231037109375, 8.253537109375, 8.2426494140625, 8.18561865234375, 8.1915498046875, 8.18607568359375, 8.268107421875, 8.208380859375]",tokens/s,7.662823416415751,kWh,9.660354430590225e-05,5.294595236465346e-05,0.0003949548913061273,0.000544504387976683,tokens/kWh,115701.54693169857,,s,629,83.32182740783702,0.1324671341936994,0.01648122008836376,0.13012069702148438,0.13181768188476561,0.13225840454101562,0.2677710559082031,"[0.1353492431640625, 0.13449932861328126, 0.13354396057128906, 0.13218812561035156, 0.1321062469482422, 0.132279296875, 0.1320765380859375, 0.1297838134765625, 0.12942950439453124, 0.13008486938476563, 0.1297357482910156, 0.12950111389160157, 0.12954214477539064, 0.12996812438964844, 0.12972236633300782, 0.129586181640625, 0.12967730712890624, 0.12944589233398437, 0.12974386596679688, 0.12966400146484375, 0.12964044189453125, 0.12967730712890624, 0.13006745910644532, 0.13016166687011718, 0.12972647094726564, 0.13020672607421874, 0.1301012420654297, 0.13006541442871095, 0.1300264892578125, 0.13026918029785156, 0.1298155517578125, 0.12965580749511718, 0.12980120849609375, 0.13100137329101563, 0.12973667907714845, 0.12997222900390626, 0.12998655700683595, 0.13009100341796875, 0.13086618041992187, 0.13041253662109376, 0.13003570556640626, 0.13003366088867188, 0.12998655700683595, 0.13012069702148438, 0.12974899291992187, 0.1296711730957031, 0.13009510803222657, 0.13012582397460937, 0.13009408569335937, 0.129723388671875, 0.12967532348632813, 0.13028140258789062, 0.1303726043701172, 0.13008793640136718, 0.13204173278808592, 0.13000090026855468, 0.1302650909423828, 0.12970188903808594, 0.13084979248046874, 0.12998963928222657, 0.12973773193359375, 0.13070130920410156, 0.26767672729492187, 0.13097874450683594, 0.12999679565429687, 0.1298524169921875, 0.12990463256835938, 0.12967730712890624, 0.130735107421875, 0.12993740844726562, 0.12993843078613282, 0.12966195678710937, 0.13075456237792968, 0.1301739501953125, 0.1298462677001953, 0.12979507446289062, 0.12965171813964843, 0.12960255432128906, 0.12983296203613282, 0.1297029113769531, 0.12987289428710938, 0.13024870300292968, 0.12961074829101563, 0.12961587524414062, 0.1304627227783203, 0.13114883422851562, 0.13138124084472655, 0.13123989868164063, 0.1312419891357422, 0.1315799102783203, 0.13145703125, 0.1315246124267578, 0.13152359008789063, 0.13154917907714844, 0.13147648620605468, 0.13150822448730468, 0.1314273223876953, 0.1317058563232422, 0.1314283447265625, 0.13160345458984374, 0.13148159790039063, 0.13152767944335939, 0.13164851379394532, 0.13085594177246093, 0.13025791931152345, 0.12963226318359375, 0.13061222839355469, 0.12971110534667968, 0.1318850860595703, 0.13038383483886717, 0.12956982421875, 0.13069512939453126, 0.12964761352539061, 0.1297664337158203, 0.1301851806640625, 0.13052313232421875, 0.1300756530761719, 0.131198974609375, 0.1302650909423828, 0.13156045532226562, 0.13132595825195312, 0.13137612915039062, 0.13114163208007812, 0.13150822448730468, 0.13148774719238282, 0.26988442993164063, 0.1297592315673828, 0.1303961639404297, 0.13102079772949218, 0.13180621337890625, 0.1311805419921875, 0.131198974609375, 0.1320120391845703, 0.13153382873535155, 0.13055078125, 0.13116621398925782, 0.13224858093261718, 0.1315379180908203, 0.13149183654785157, 0.13085594177246093, 0.1307740173339844, 0.13012069702148438, 0.13034701538085938, 0.13029273986816406, 0.12967219543457031, 0.12965785217285156, 0.13052621459960936, 0.1309951934814453, 0.13132908630371093, 0.12980528259277344, 0.12976431274414063, 0.130735107421875, 0.1307904052734375, 0.13091941833496093, 0.13070028686523438, 0.13097164916992188, 0.13049139404296875, 0.13055282592773437, 0.13094093322753905, 0.1304883270263672, 0.13196185302734376, 0.13015142822265624, 0.13110272216796875, 0.1304780731201172, 0.13022618103027345, 0.13066342163085937, 0.1300070343017578, 0.1298851776123047, 0.12999679565429687, 0.13136691284179688, 0.13021286010742186, 0.1300305938720703, 0.13127577209472657, 0.12978688049316406, 0.12988313293457032, 0.12990975952148437, 0.13098086547851562, 0.12965785217285156, 0.13045555114746094, 0.1315246124267578, 0.1313638458251953, 0.12958309936523438, 0.12952677917480468, 0.12959642028808593, 0.13055795288085936, 0.13012889099121094, 0.13161677551269532, 0.13049856567382812, 0.2679295959472656, 0.13007772827148437, 0.1300623016357422, 0.1296732177734375, 0.1302405090332031, 0.13101568603515626, 0.13053543090820313, 0.12958309936523438, 0.12989543151855468, 0.13040640258789063, 0.12972032165527345, 0.12959744262695314, 0.12965682983398438, 0.12967628479003906, 0.1296046142578125, 0.12968960571289062, 0.1309102020263672, 0.13147955322265625, 0.13136282348632813, 0.13167820739746094, 0.1325506591796875, 0.1314693145751953, 0.1313454132080078, 0.1313280029296875, 0.13255577087402343, 0.1314273223876953, 0.13156965637207033, 0.13132698059082032, 0.1305374755859375, 0.1306746826171875, 0.1309071350097656, 0.13065113830566405, 0.13108326721191407, 0.1307310333251953, 0.13129315185546875, 0.13122866821289061, 0.13088768005371093, 0.13070541381835937, 0.1310146484375, 0.13133619689941406, 0.1298104248046875, 0.13102490234375, 0.13102694702148437, 0.13097779846191407, 0.13345689392089843, 0.13114060974121095, 0.13085081481933594, 0.13208985900878906, 0.13153689575195313, 0.13162701416015626, 0.13186355590820312, 0.13157069396972657, 0.13132492065429688, 0.13146829223632814, 0.13144781494140625, 0.13143244934082032, 0.13133209228515624, 0.13199154663085938, 0.13162086486816407, 0.13136589050292968, 0.13134745788574217, 0.13149900817871094, 0.13125018310546874, 0.27049984741210936, 0.13173965454101563, 0.13157376098632811, 0.13129933166503907, 0.13149183654785157, 0.13152255249023437, 0.13144268798828124, 0.13151744079589844, 0.13144781494140625, 0.1312665557861328, 0.1313638458251953, 0.1312542724609375, 0.13116621398925782, 0.1316812744140625, 0.1304279022216797, 0.13107609558105468, 0.13173452758789062, 0.13157273864746094, 0.13156761169433595, 0.131114013671875, 0.13155325317382813, 0.13144883728027343, 0.13135667419433594, 0.1312972869873047, 0.13135154724121093, 0.13139456176757813, 0.13143653869628907, 0.13130137634277345, 0.13196800231933595, 0.1318707275390625, 0.131631103515625, 0.1325455322265625, 0.13174578857421876, 0.13033779907226561, 0.12979814147949217, 0.129902587890625, 0.13098188781738282, 0.13004287719726562, 0.13019442749023438, 0.12989439392089844, 0.13051187133789063, 0.12981350708007813, 0.12995890808105467, 0.13021900939941405, 0.12963839721679687, 0.12967628479003906, 0.13002546691894531, 0.13009510803222657, 0.1296711730957031, 0.13236019897460938, 0.12973362731933594, 0.13032858276367187, 0.13070643615722657, 0.1302425537109375, 0.1299220428466797, 0.12974490356445312, 0.12966812133789063, 0.12980630493164064, 0.129623046875, 0.13014938354492187, 0.13049958801269532, 0.12993023681640625, 0.1298841552734375, 0.26678680419921874, 0.1296680908203125, 0.12964556884765624, 0.12968447875976563, 0.12973260498046876, 0.129765380859375, 0.1300142059326172, 0.1296855010986328, 0.12954725646972656, 0.1295247344970703, 0.12960870361328125, 0.1295667266845703, 0.12963941955566408, 0.12973158264160156, 0.13008895874023438, 0.13011558532714843, 0.12971827697753907, 0.12998963928222657, 0.12966706848144532, 0.12973568725585938, 0.12970803833007813, 0.1298350067138672, 0.1297090606689453, 0.12965274047851563, 0.12967730712890624, 0.13019648742675782, 0.12975820922851564, 0.12972854614257812, 0.12970799255371093, 0.1302794189453125, 0.12972647094726564, 0.12966400146484375, 0.12970086669921874, 0.12959333801269532, 0.1299220428466797, 0.1300633544921875, 0.1296537628173828, 0.12968038940429688, 0.1319403839111328, 0.12973974609375, 0.13016064453125, 0.13014527893066405, 0.1306234893798828, 0.1300377655029297, 0.13088461303710938, 0.13168946838378906, 0.1299251251220703, 0.12965580749511718, 0.12978688049316406, 0.13135565185546874, 0.13036749267578124, 0.12981248474121093, 0.1297510986328125, 0.1297816925048828, 0.12961383056640624, 0.1298667449951172, 0.12978175354003907, 0.12962611389160156, 0.13000294494628906, 0.12986572265625, 0.1298462677001953, 0.12970803833007813, 0.12991897583007814, 0.2678077392578125, 0.13065216064453125, 0.12982272338867187, 0.13018418884277344, 0.13069625854492187, 0.12963424682617186, 0.12971827697753907, 0.12956063842773438, 0.13137094116210937, 0.13036647033691406, 0.12986265563964844, 0.12970803833007813, 0.1298462677001953, 0.12968447875976563, 0.1297029113769531, 0.12973670959472655, 0.12944793701171875, 0.12968345642089843, 0.1307125701904297, 0.12972134399414062, 0.12976742553710938, 0.12971929931640624, 0.12958309936523438, 0.13087026977539062, 0.12964659118652344, 0.1298032684326172, 0.12998757934570312, 0.13012275695800782, 0.12993536376953124, 0.1304698944091797, 0.1299988555908203, 0.12983807373046874, 0.13000192260742188, 0.13018623352050782, 0.1299199981689453, 0.13019648742675782, 0.13000090026855468, 0.13050265502929687, 0.12989645385742188, 0.12952677917480468, 0.12986778259277343, 0.13002546691894531, 0.13098597717285157, 0.129828857421875, 0.12967730712890624, 0.13003981018066407, 0.1295636444091797, 0.12984115600585938, 0.12994764709472656, 0.13031321716308594, 0.12971315002441405, 0.12961485290527344, 0.12971417236328125, 0.12996202087402345, 0.13037257385253906, 0.12986061096191406, 0.12995584106445313, 0.13010226440429687, 0.1315707550048828, 0.1297571258544922, 0.12957183837890626, 0.12970188903808594, 0.13077810668945314, 0.2692198486328125, 0.13007154846191407, 0.12988313293457032, 0.12971212768554688, 0.12965481567382814, 0.12962608337402343, 0.12967219543457031, 0.12959231567382812, 0.12979200744628908, 0.1296680908203125, 0.13002957153320313, 0.12997938537597656, 0.12960153198242189, 0.13012786865234374, 0.13016986083984375, 0.12967219543457031, 0.1310576629638672, 0.12998348999023437, 0.12976332092285156, 0.1300869140625, 0.1296824951171875, 0.13126751708984374, 0.12993536376953124, 0.13007359313964845, 0.12969984436035156, 0.12999679565429687, 0.1298698272705078, 0.1300858917236328, 0.1298913269042969, 0.1299404754638672, 0.13012275695800782, 0.13052313232421875, 0.1302425537109375, 0.13017805480957031, 0.12996812438964844, 0.12995071411132814, 0.13004083251953125, 0.130302978515625, 0.1300623321533203, 0.13056716918945313, 0.1300695037841797, 0.1299261474609375, 0.12973260498046876, 0.13010330200195314, 0.1296855010986328, 0.13009613037109374, 0.12976742553710938, 0.12981350708007813, 0.12980429077148437, 0.12988723754882814, 0.1295564727783203, 0.12995277404785155, 0.12949913024902343, 0.12956877136230469, 0.129870849609375, 0.13016371154785156, 0.12968754577636718, 0.1295667266845703, 0.12944383239746093, 0.1296282196044922, 0.1295543670654297, 0.12969778442382812, 0.12969062805175782, 0.26870681762695314, 0.1297827911376953, 0.1297592315673828, 0.131557373046875, 0.12989439392089844, 0.13009408569335937, 0.1305999298095703, 0.1311446990966797, 0.13035110473632813, 0.13046885681152343, 0.1303521270751953, 0.13139045715332032, 0.1301749725341797, 0.1304627227783203, 0.13010841369628906, 0.1311856689453125, 0.1305753936767578, 0.1308395233154297, 0.12992515563964843, 0.13041970825195312, 0.13009100341796875, 0.1309265594482422, 0.130155517578125, 0.13011354064941405, 0.12959437561035156, 0.1299148864746094, 0.13058047485351562, 0.13014527893066405, 0.13060198974609374, 0.12966400146484375, 0.12963533020019533, 0.1295380554199219, 0.12953395080566407, 0.1297786865234375, 0.13530213928222656, 0.13242477416992188, 0.13271443176269532, 0.1324779510498047, 0.13233970642089843, 0.13220352172851563, 0.1323294677734375, 0.13220556640625, 0.1319833526611328, 0.1321994171142578, 0.13245542907714844, 0.132347900390625, 0.13226495361328125, 0.13233561706542968, 0.13197415161132814, 0.13208883666992188, 0.13216461181640626, 0.1321881561279297, 0.13220249938964843, 0.13227622985839843, 0.1321246795654297, 0.13223423767089842, 0.13205606079101562, 0.13206629943847656, 0.13218611145019532, 0.13227622985839843, 0.13237759399414062, 0.13250457763671875, 0.13218917846679687, 0.27291647338867187, 0.13057228088378905, 0.12985139465332032, 0.12977766418457032, 0.12981146240234376, 0.1296363525390625, 0.12980120849609375, 0.12959642028808593, 0.12995993041992188, 0.129691650390625, 0.12974592590332032, 0.12970086669921874, 0.12972647094726564, 0.1295984649658203, 0.12972032165527345, 0.13100953674316407, 0.13002957153320313, 0.13050367736816407, 0.13074021911621095, 0.13157478332519532, 0.12986880493164063, 0.12991693115234376, 0.12960050964355468, 0.12965682983398438, 0.13195468139648436, 0.13054464721679687, 0.1317058563232422, 0.13119078063964842, 0.13114982604980469, 0.1297049560546875, 0.12969573974609375, 0.12971315002441405, 0.12958412170410155, 0.1308958740234375, 0.1309696044921875, 0.12978994750976564, 0.12954112243652344, 0.12962713623046876, 0.12967832946777344, 0.12949913024902343, 0.12971827697753907, 0.12982989501953124, 0.1297622985839844, 0.12973464965820314, 0.12974490356445312, 0.1297244110107422, 0.13159628295898437, 0.13082418823242187, 0.1297407989501953, 0.12985548400878907, 0.1299814453125, 0.13154815673828124, 0.13089791870117187, 0.12983602905273436, 0.1315010528564453, 0.13150003051757814, 0.132068359375, 0.13167718505859374, 0.13150309753417969, 0.13090509033203124, 0.12976332092285156, 0.1298053741455078, 0.1295963592529297]",tokens/s,7.549042304619914,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2805.69856,8389.132288,0.0,7742.685184,7007.0144,s,10,5.757144287109376,0.5757144287109376,0.0011485729711129637,0.5757064819335938,0.5769704406738282,0.5771491607666016,0.5772921368408204,"[0.5760953979492187, 0.5769307250976563, 0.574836669921875, 0.5745257568359375, 0.574697998046875, 0.5738778076171875, 0.5753175659179688, 0.5767816162109375, 0.577327880859375, 0.5767528686523438]",tokens/s,444.66490196050984,kWh,6.783310471125591e-06,3.7157116531489013e-06,3.30976653669996e-05,4.3596687491274095e-05,tokens/kWh,5872005.758493431,MB,2805.69856,8389.132288,0.0,7742.685184,7283.984384,s,10,336.2841484375,33.62841484375,0.0041773861216769514,33.62973046875,33.63291328125,33.63334921875,33.63369796875,"[33.62066796875, 33.6314140625, 33.6304453125, 33.62453125, 33.627359375, 33.629015625, 33.63378515625, 33.63111328125, 33.623, 33.63281640625]",tokens/s,1.873415690056198,kWh,0.0003969375698617947,0.00021755745967517217,0.0019112099178555991,0.0025257049473925665,tokens/kWh,24943.531137727943,,s,629,340.93043640136693,0.5420197717032864,0.06847421137969394,0.5337415771484375,0.53422822265625,0.5344366455078124,1.1091660595703123,"[0.5336361083984374, 0.5341531982421875, 0.533359619140625, 0.5337293090820312, 0.53304931640625, 0.5336708984375, 0.5331292114257813, 0.5338419189453125, 0.5331640625, 0.5335398559570312, 0.53309033203125, 0.5335951538085938, 0.5331834716796875, 0.5338798217773437, 0.5333298950195312, 0.5336842041015625, 0.5331845092773437, 0.5336678466796875, 0.533411865234375, 0.53406103515625, 0.5336094970703125, 0.5338388671875, 0.5332828369140625, 0.5337896728515625, 0.5332971801757812, 0.533770263671875, 0.5334599609375, 0.5337487182617188, 0.5331834716796875, 0.5339166870117188, 0.5336627197265625, 0.5344092407226563, 0.5335090942382813, 0.5339432983398438, 0.533738525390625, 0.5342853393554687, 0.5333934326171875, 0.5340252075195312, 0.53340771484375, 0.534096923828125, 0.5334374389648437, 0.5337191162109375, 0.5333012084960937, 0.5337835693359375, 0.5332633666992187, 0.53411328125, 0.533443603515625, 0.5338327026367188, 0.5333759765625, 0.5339893798828125, 0.5333831787109375, 0.5338306274414063, 0.5334395141601562, 0.5340466918945312, 0.5333514404296875, 0.5337937622070312, 0.5338665161132813, 0.5344153442382813, 0.5337047119140625, 0.5338112182617187, 0.5332122192382812, 0.5337702026367187, 1.1116871337890626, 0.5333892822265625, 0.53395556640625, 0.5335818481445312, 0.534329345703125, 0.53378662109375, 0.533939208984375, 0.533485595703125, 0.5340098266601563, 0.533265380859375, 0.5337507934570312, 0.5338316650390625, 0.53416552734375, 0.533770263671875, 0.5342669067382813, 0.5334896850585937, 0.5339904174804687, 0.5334537963867187, 0.5338736572265625, 0.5339002685546875, 0.5341122436523438, 0.5333739624023438, 0.5337057495117188, 0.5331763305664062, 0.5336084594726562, 0.5334323120117187, 0.5338009643554688, 0.5331425170898437, 0.5337620239257812, 0.5331527709960937, 0.5337323608398438, 0.5339801635742187, 0.5345730590820312, 0.53325927734375, 0.5336207275390625, 0.53326953125, 0.5340211791992188, 0.5333349609375, 0.5338777465820312, 0.53340673828125, 0.5337753295898438, 0.533338134765625, 0.533880859375, 0.533570556640625, 0.5340579833984375, 0.5336616821289063, 0.5373572998046875, 0.5339514770507813, 0.53390234375, 0.5336719360351563, 0.5342648315429688, 0.5339279174804688, 0.5340426025390625, 0.5336504516601562, 0.5338839721679688, 0.53328173828125, 0.534540283203125, 0.5336729736328125, 0.5342125854492188, 0.5336329956054687, 0.53383984375, 0.5335726318359375, 0.5343897705078124, 1.10948046875, 0.5335797729492188, 0.5341194458007813, 0.5339064331054687, 0.5345751342773437, 0.5335787353515625, 0.533949462890625, 0.5334251708984376, 0.5337794799804687, 0.5332059936523438, 0.5337763671875, 0.5334661254882812, 0.533986328125, 0.5332305908203125, 0.5337221069335938, 0.5334609985351563, 0.53401904296875, 0.5335316772460937, 0.5339371337890625, 0.53347021484375, 0.5337108764648437, 0.5331456298828126, 0.5338245239257813, 0.5349140625, 0.5346262817382812, 0.5334579467773437, 0.5338726196289062, 0.533796875, 0.5340897216796875, 0.5336565551757813, 0.5343047485351563, 0.5332715454101562, 0.534076416015625, 0.533580810546875, 0.5337415771484375, 0.5334824829101562, 0.53382861328125, 0.53319677734375, 0.5336668090820312, 0.5332664184570313, 0.5337743530273438, 0.5335418701171875, 0.5340856323242188, 0.5335429077148437, 0.53378662109375, 0.5334508056640626, 0.5341675415039062, 0.5349437255859375, 0.5338644409179687, 0.5339566040039062, 0.5342269287109375, 0.5335900268554687, 0.5341214599609375, 0.53355419921875, 0.5342976684570313, 0.5334230346679687, 0.5340139770507812, 0.5335992431640625, 0.5341634521484375, 0.5334210815429687, 0.5340436401367188, 0.5337569580078125, 0.5342617797851562, 1.1091947021484374, 0.5332183227539062, 0.5337579345703125, 0.533496826171875, 0.5339699096679688, 0.53334326171875, 0.5339320068359374, 0.5333401489257813, 0.533749755859375, 0.533454833984375, 0.533738525390625, 0.5335623779296875, 0.5348157348632813, 0.5332367553710937, 0.5340784301757813, 0.5337159423828125, 0.5338306274414063, 0.5333944091796875, 0.5338828735351563, 0.533201904296875, 0.5338593139648438, 0.53323681640625, 0.5340261840820313, 0.5336555786132813, 0.5336821899414063, 0.5331660766601563, 0.5336535034179688, 0.5331763305664062, 0.5336637573242188, 0.5330974731445313, 0.5336381225585938, 0.5332664184570313, 0.5338818359375, 0.533327880859375, 0.53427099609375, 0.5336790771484375, 0.5338746948242188, 0.533159912109375, 0.5337006225585937, 0.5332623291015625, 0.5337293090820312, 0.5337589721679687, 0.533981201171875, 0.533370849609375, 0.5360977783203125, 0.5334261474609375, 0.5341010131835937, 0.5335623779296875, 0.5340078125, 0.5334896850585937, 0.5340006103515625, 0.5336053466796875, 0.5344645385742187, 0.53421875, 0.5342853393554687, 0.5334138793945312, 0.5337979125976563, 0.53326953125, 0.533792724609375, 0.5334814453125, 0.5338665161132813, 0.5334456176757812, 0.53382861328125, 1.1090924072265624, 0.5336094970703125, 0.5341419677734375, 0.5334814453125, 0.5340355224609376, 0.53357666015625, 0.53401904296875, 0.5336771240234375, 0.5337230834960938, 0.5334425048828125, 0.5336268920898437, 0.5331640625, 0.5335726318359375, 0.533123046875, 0.5340631103515625, 0.5331763305664062, 0.5340078125, 0.5347676391601562, 0.5339586791992188, 0.5332183227539062, 0.5338541870117187, 0.5333883056640625, 0.53372314453125, 0.5331834716796875, 0.5336309814453125, 0.5334537963867187, 0.534108154296875, 0.5333411865234375, 0.5341143188476563, 0.5333524780273438, 0.5335510864257812, 0.5337293090820312, 0.5341480712890625, 0.5335869140625, 0.5337671508789062, 0.533243896484375, 0.5338193969726562, 0.5337169799804687, 0.5342074584960937, 0.533265380859375, 0.5335838623046875, 0.533375, 0.5341593627929687, 0.5334948120117188, 0.5341030883789063, 0.5336299438476563, 0.5339105224609375, 0.5335091552734375, 0.5336759643554687, 0.5331793823242188, 0.5338460083007812, 0.5332551879882812, 0.5341522216796875, 0.5335869140625, 0.53411328125, 0.5335480346679687, 0.53422802734375, 0.5365042724609375, 0.5340682373046876, 0.533644287109375, 0.5340825805664062, 0.533528564453125, 0.5341306762695313, 1.1086192626953124, 0.5332972412109375, 0.534067138671875, 0.5337210693359375, 0.5342228393554688, 0.53347021484375, 0.53422900390625, 0.5334702758789063, 0.5339790649414062, 0.5337579345703125, 0.5341173706054687, 0.5335818481445312, 0.534245361328125, 0.5336678466796875, 0.5341265869140625, 0.5333944091796875, 0.5336135864257813, 0.5332551879882812, 0.5341911010742187, 0.533359619140625, 0.5341552734375, 0.5331834716796875, 0.53369140625, 0.5332572021484375, 0.53432421875, 0.533712890625, 0.5336708984375, 0.533106689453125, 0.533802001953125, 0.5334364013671875, 0.53359716796875, 0.5331845092773437, 0.53380810546875, 0.5332346801757812, 0.5336627197265625, 0.5332254638671875, 0.5337774047851562, 0.5336309814453125, 0.537302001953125, 0.5335510864257812, 0.5340999755859375, 0.5334763793945313, 0.5337241821289063, 0.5331824340820313, 0.5339197387695312, 0.53344970703125, 0.533875732421875, 0.53338623046875, 0.5339381713867187, 0.5334395141601562, 0.5338777465820312, 0.5334159545898437, 0.534012939453125, 0.5338378295898437, 0.5343323974609375, 0.534024169921875, 0.534361083984375, 0.5335521240234375, 0.5342996826171875, 0.5335305786132812, 0.533734375, 0.5334640502929687, 0.5342750854492188, 1.1099852294921875, 0.533232666015625, 0.5340067749023437, 0.5332183227539062, 0.5340108642578125, 0.5337364501953125, 0.534192138671875, 0.533570556640625, 0.5341460571289063, 0.533876708984375, 0.53406005859375, 0.5334681396484375, 0.5340528564453125, 0.5333984985351562, 0.53374462890625, 0.5331937255859375, 0.5337579345703125, 0.533475341796875, 0.5339535522460938, 0.533396484375, 0.53382861328125, 0.5333401489257813, 0.5337302856445313, 0.535394287109375, 0.5337927856445313, 0.5334507446289063, 0.5339443359375, 0.5337825317382813, 0.5344010009765625, 0.533728271484375, 0.5337845458984375, 0.5336329956054687, 0.5338839111328125, 0.5333759765625, 0.5338009643554688, 0.533201904296875, 0.5340794677734375, 0.5336320190429688, 0.5339146118164062, 0.53344873046875, 0.5339638061523437, 0.5336893310546875, 0.5338931274414063, 0.5337426147460937, 0.5341430053710937, 0.5335347290039063, 0.5341624145507813, 0.5345700073242188, 0.5347133178710938, 0.5338163452148438, 0.53465087890625, 0.5338040161132812, 0.5341941528320312, 0.533674072265625, 0.5342606811523437, 0.5343539428710937, 0.5342125854492188, 0.5335050048828125, 0.5339566040039062, 0.533570556640625, 0.5341276245117188, 0.5338695678710937, 0.5341880493164063, 1.111352294921875, 0.533518310546875, 0.534761474609375, 0.5335090942382813, 0.5338511352539063, 0.5336331176757813, 0.5341572265625, 0.5336279296875, 0.5341102294921874, 0.5339033813476562, 0.53374462890625, 0.5332838134765625, 0.5336801147460938, 0.5332142333984375, 0.5337333984375, 0.5331947631835937, 0.534908935546875, 0.533190673828125, 0.5336279296875, 0.5330892944335938, 0.5340794677734375, 0.5336063842773437, 0.5338890380859375, 0.5336463623046875, 0.5338880004882812, 0.5336104736328126, 0.533712890625, 0.5332838134765625, 0.5341624145507813, 0.5335418701171875, 0.5341460571289063, 0.53351220703125, 0.5338890380859375, 0.533243896484375, 0.5342177124023437, 0.5337088012695312, 0.5343447265625, 0.533712890625, 0.5340774536132813, 0.5339156494140626, 0.5344481201171875, 0.5336882934570313, 0.5341992797851562, 0.5336084594726562, 0.533676025390625, 0.5333197021484375, 0.5338849487304688, 0.533917724609375, 0.53441943359375, 0.5336135864257813, 0.5341010131835937, 0.5335132446289063, 0.5340897216796875, 0.5336616821289063, 0.5339934692382813, 0.5337794799804687, 0.5340200805664063, 0.5335142211914062, 0.5342648315429688, 0.533865478515625, 0.5339801635742187, 0.5338695678710937, 0.534255615234375, 1.112015869140625, 0.5335879516601563, 0.534097900390625, 0.5332183227539062, 0.5336187133789062, 0.5332059936523438, 0.5337405395507813, 0.5333309326171874, 0.5338091430664063, 0.533423095703125, 0.5338306274414063, 0.533191650390625, 0.5339586791992188, 0.5338961791992187, 0.5337569580078125, 0.5333165893554688, 0.534349853515625, 0.5343836059570313, 0.5336043701171875, 0.5334517822265625, 0.5338685913085938, 0.5332326049804688, 0.5336350708007812, 0.5330892944335938, 0.5336309814453125, 0.5331548461914063, 0.5336053466796875, 0.5331824340820313, 0.5337999267578125, 0.5332408447265625, 0.533622802734375, 0.5338716430664062, 0.5345494995117187, 0.53351220703125, 0.5343057861328125, 0.5334180297851563, 0.5338480224609375, 0.53349169921875, 0.5338357543945312, 0.533622802734375, 0.533875732421875, 0.5334579467773437, 0.5337610473632812, 0.5334579467773437, 0.5340170288085937, 0.5332745971679688, 0.534091796875, 0.5333995361328125, 0.5338532104492187, 0.5333872680664062, 0.5340620727539063, 0.533886962890625, 0.5340108642578125, 0.5335654296875, 0.533928955078125, 0.5333514404296875, 0.533950439453125, 0.5334404907226562, 0.5343201293945312, 0.5335582885742187, 0.5337221069335938, 0.5335408935546875, 0.5341378784179688, 1.111615478515625, 0.5335418701171875, 0.534171630859375, 0.5334824829101562, 0.533771240234375, 0.5340958862304688, 0.5341583251953125, 0.5335234375, 0.5342811889648438, 0.5338716430664062, 0.53395556640625, 0.533296142578125, 0.5340877075195313, 0.53382861328125, 0.5342125854492188, 0.5335961303710938, 0.5341245727539062, 0.5336575927734375, 0.5344921875, 0.5339248657226563, 0.5340242309570312, 0.533560302734375, 0.5341531982421875, 0.5337302856445313, 0.5339627685546875, 0.5335162963867187, 0.535699462890625, 0.5336187133789062, 0.5340529174804688, 0.5334834594726563, 0.5338634033203125, 0.5333811645507812, 0.5340415649414062, 0.5334630126953125, 0.53372314453125, 0.5333606567382813, 0.5338726196289062, 0.5338736572265625, 0.5340108642578125, 0.5332828369140625, 0.5338992919921876, 0.5332766723632812, 0.5340753784179687, 0.5338142700195313, 0.5339904174804687, 0.5335613403320313, 0.5342269287109375, 0.533306396484375, 0.5340200805664063, 0.5333984985351562, 0.5338890380859375, 0.5335675048828125, 0.5339054565429687, 0.5333565063476563, 0.5340200805664063, 0.5336309814453125, 0.5339617309570313, 0.5336258544921875, 0.5341859741210937, 0.5334886474609375, 0.5339893798828125, 0.53378662109375, 0.534213623046875]",tokens/s,1.8449511479212646,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,,cuda,0,42,,,,,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1721.061376,22129.672192,0.0,21483.225088,20799.036928,s,10,27.921223388671876,2.7921223388671876,0.002873024218980136,2.791891357421875,2.7956688476562497,2.7963155029296876,2.7968328271484375,"[2.793120361328125, 2.796962158203125, 2.790662353515625, 2.790180419921875, 2.7901220703125, 2.787002685546875, 2.79004345703125, 2.793321533203125, 2.795525146484375, 2.794283203125]",tokens/s,91.68652692484228,kWh,3.2928969429598913e-05,1.8046407099463975e-05,0.00015823184880759956,0.00020920722533666246,tokens/kWh,1223667.1060860218,MB,1726.52544,22129.672192,0.0,21483.225088,20902.142976,s,10,1662.0463125,166.20463124999998,0.013890741352160448,166.199171875,166.2239140625,166.22626953125,166.22815390625,"[166.211921875, 166.223390625, 166.191390625, 166.191359375, 166.191640625, 166.203515625, 166.194828125, 166.2176875, 166.191953125, 166.228625]",tokens/s,0.3790508093919315,kWh,0.0019620591764814327,0.001075380349219922,0.009375421333664404,0.012412860859365755,tokens/kWh,5075.381148131151,,s,629,1684.5351452636726,2.678116288177539,0.33164961442268687,2.637888427734375,2.63990244140625,2.640667578125,5.42840712890625,"[2.638138427734375, 2.638636962890625, 2.638865478515625, 2.6390283203125, 2.6389248046875, 2.63697509765625, 2.6374482421875, 2.637588623046875, 2.6383994140625, 2.637781982421875, 2.63811181640625, 2.639296630859375, 2.6380380859375, 2.637068359375, 2.638099365234375, 2.637255615234375, 2.637476806640625, 2.63743798828125, 2.637467529296875, 2.63781787109375, 2.637115478515625, 2.637315185546875, 2.63943896484375, 2.639993896484375, 2.6398291015625, 2.63933349609375, 2.63897802734375, 2.639859619140625, 2.63980126953125, 2.639171630859375, 2.637111328125, 2.63720556640625, 2.636434326171875, 2.63687158203125, 2.63707958984375, 2.639266845703125, 2.63845263671875, 2.637464599609375, 2.638060546875, 2.64020263671875, 2.638950439453125, 2.63784033203125, 2.63769091796875, 2.6381884765625, 2.639057861328125, 2.637621337890625, 2.638141357421875, 2.63823876953125, 2.639244384765625, 2.63790283203125, 2.6387138671875, 2.6390537109375, 2.638361572265625, 2.637244384765625, 2.639206298828125, 2.638740478515625, 2.639088623046875, 2.638509033203125, 2.637013916015625, 2.637980712890625, 2.637846435546875, 2.638864501953125, 5.43535302734375, 2.63853662109375, 2.63802978515625, 2.63697607421875, 2.637854736328125, 2.638664794921875, 2.63739794921875, 2.637233154296875, 2.637539306640625, 2.637358154296875, 2.637158447265625, 2.637360107421875, 2.63921875, 2.63846923828125, 2.638044189453125, 2.639015869140625, 2.636918701171875, 2.637613037109375, 2.637201416015625, 2.63785888671875, 2.6367763671875, 2.637592529296875, 2.63693310546875, 2.6386318359375, 2.63771337890625, 2.638298095703125, 2.637001708984375, 2.6378701171875, 2.6421064453125, 2.6414111328125, 2.641197021484375, 2.64100146484375, 2.63889111328125, 2.6406533203125, 2.6406552734375, 2.637020263671875, 2.637322265625, 2.63722900390625, 2.63769091796875, 2.6369853515625, 2.63765185546875, 2.63745751953125, 2.638650390625, 2.638825439453125, 2.637306884765625, 2.64330859375, 2.641152099609375, 2.6409931640625, 2.63775244140625, 2.639099853515625, 2.638685302734375, 2.6388388671875, 2.637993896484375, 2.639129638671875, 2.638023681640625, 2.638602294921875, 2.63790283203125, 2.63815380859375, 2.637550537109375, 2.63849169921875, 2.639201171875, 2.6400400390625, 2.638790771484375, 5.4299453125, 2.63796826171875, 2.638594970703125, 2.637592529296875, 2.6385439453125, 2.637388916015625, 2.638464111328125, 2.637737060546875, 2.63819775390625, 2.63681005859375, 2.637154296875, 2.63805224609375, 2.638739501953125, 2.63828173828125, 2.638024658203125, 2.637736083984375, 2.637816650390625, 2.63781884765625, 2.63653466796875, 2.637263916015625, 2.637474853515625, 2.63747998046875, 2.636569580078125, 2.64140478515625, 2.637125732421875, 2.638499755859375, 2.6369833984375, 2.637737060546875, 2.636338134765625, 2.63744921875, 2.637201416015625, 2.638740478515625, 2.638299072265625, 2.63929150390625, 2.637087646484375, 2.637667236328125, 2.6377021484375, 2.636683349609375, 2.637559814453125, 2.637581298828125, 2.6383564453125, 2.636505126953125, 2.63706005859375, 2.636032958984375, 2.6379765625, 2.637094970703125, 2.63819775390625, 2.64007568359375, 2.6376611328125, 2.6375966796875, 2.6389228515625, 2.63778515625, 2.639754150390625, 2.6381884765625, 2.63853466796875, 2.63933642578125, 2.639509521484375, 2.639793212890625, 2.640154541015625, 2.63959033203125, 2.636541015625, 2.6371533203125, 2.63670068359375, 5.43085791015625, 2.64038818359375, 2.640405517578125, 2.63921875, 2.638604248046875, 2.638392333984375, 2.63703857421875, 2.638612548828125, 2.638138427734375, 2.637631591796875, 2.637518798828125, 2.636412841796875, 2.639814697265625, 2.63709912109375, 2.638194580078125, 2.636851318359375, 2.637906982421875, 2.636904541015625, 2.63757421875, 2.637557861328125, 2.637801513671875, 2.637253662109375, 2.638193603515625, 2.63731396484375, 2.63754345703125, 2.637656005859375, 2.638063720703125, 2.6375537109375, 2.63655517578125, 2.636990478515625, 2.6373427734375, 2.638109619140625, 2.637427734375, 2.637330322265625, 2.63708154296875, 2.63813232421875, 2.636632080078125, 2.638185546875, 2.636789794921875, 2.6382294921875, 2.636559326171875, 2.63756494140625, 2.6364814453125, 2.637990966796875, 2.64270947265625, 2.63874755859375, 2.640064453125, 2.638127197265625, 2.637509521484375, 2.637388916015625, 2.637737060546875, 2.63737353515625, 2.639585205078125, 2.637530029296875, 2.638297119140625, 2.63809423828125, 2.637974609375, 2.63773583984375, 2.637834228515625, 2.63741552734375, 2.637762451171875, 2.637958251953125, 2.63785791015625, 5.428443359375, 2.63807275390625, 2.6370693359375, 2.638107666015625, 2.637665283203125, 2.63819677734375, 2.6379326171875, 2.63821923828125, 2.6370078125, 2.63802685546875, 2.638180419921875, 2.63954443359375, 2.6387373046875, 2.638023681640625, 2.636853271484375, 2.637129638671875, 2.637197265625, 2.636950439453125, 2.63758544921875, 2.638212158203125, 2.63828076171875, 2.6365849609375, 2.6370908203125, 2.638017578125, 2.63769384765625, 2.636833740234375, 2.638193603515625, 2.638341064453125, 2.640734130859375, 2.63990576171875, 2.639550537109375, 2.638340087890625, 2.63885107421875, 2.63923095703125, 2.6383955078125, 2.636854248046875, 2.636768310546875, 2.6369443359375, 2.637570068359375, 2.637836181640625, 2.63730078125, 2.63779541015625, 2.63750244140625, 2.637592529296875, 2.6366474609375, 2.638162841796875, 2.636866455078125, 2.6375146484375, 2.636231689453125, 2.6370693359375, 2.637305908203125, 2.63849169921875, 2.636894287109375, 2.636900390625, 2.637894775390625, 2.637490234375, 2.6377666015625, 2.64785107421875, 2.6376591796875, 2.637158447265625, 2.637832275390625, 2.63720849609375, 2.639467529296875, 5.425724609375, 2.637199462890625, 2.63809130859375, 2.638032958984375, 2.637854736328125, 2.638718017578125, 2.63857861328125, 2.637035400390625, 2.63773583984375, 2.638855224609375, 2.63739892578125, 2.638341064453125, 2.63885205078125, 2.639034423828125, 2.63807177734375, 2.637761474609375, 2.639326171875, 2.639801513671875, 2.639014892578125, 2.639296630859375, 2.637675537109375, 2.63769287109375, 2.637137939453125, 2.637119384765625, 2.6383955078125, 2.63736328125, 2.637094970703125, 2.637177734375, 2.6371953125, 2.637066162109375, 2.637592529296875, 2.639177734375, 2.637533203125, 2.6364169921875, 2.636937255859375, 2.6390283203125, 2.63927294921875, 2.63655126953125, 2.64357373046875, 2.637197265625, 2.637576171875, 2.638668701171875, 2.639214599609375, 2.639289306640625, 2.639532958984375, 2.638011474609375, 2.63690234375, 2.63817626953125, 2.636898193359375, 2.637287353515625, 2.637656982421875, 2.637981689453125, 2.636938232421875, 2.63751171875, 2.6373232421875, 2.63774609375, 2.637768798828125, 2.63883056640625, 2.639134765625, 2.641203125, 2.639278076171875, 2.638201904296875, 2.637488037109375, 5.42831396484375, 2.637304931640625, 2.638138427734375, 2.636865478515625, 2.639958984375, 2.63714404296875, 2.638241943359375, 2.63680810546875, 2.638630859375, 2.638310302734375, 2.640628662109375, 2.639406982421875, 2.63910302734375, 2.6390712890625, 2.640678955078125, 2.640310302734375, 2.63718701171875, 2.637646728515625, 2.637350830078125, 2.638884765625, 2.6375947265625, 2.637400146484375, 2.6368818359375, 2.642212890625, 2.637052978515625, 2.637252685546875, 2.636802001953125, 2.64029296875, 2.6377216796875, 2.6375556640625, 2.6366923828125, 2.63757421875, 2.638635009765625, 2.637696044921875, 2.637308837890625, 2.637477783203125, 2.637402099609375, 2.6365869140625, 2.637488037109375, 2.638253173828125, 2.6386328125, 2.637402099609375, 2.63834716796875, 2.6377451171875, 2.637392822265625, 2.636634033203125, 2.637593505859375, 2.637182861328125, 2.637476806640625, 2.6378896484375, 2.637464599609375, 2.639731689453125, 2.637717529296875, 2.63756591796875, 2.638674072265625, 2.63798974609375, 2.638200927734375, 2.63733251953125, 2.63752197265625, 2.63793359375, 2.636739501953125, 2.63832470703125, 2.637581298828125, 5.43222705078125, 2.638201904296875, 2.63801025390625, 2.639353759765625, 2.63851513671875, 2.637308837890625, 2.637641845703125, 2.6370263671875, 2.636823486328125, 2.638310302734375, 2.637739990234375, 2.636812255859375, 2.637005859375, 2.63739794921875, 2.637341796875, 2.636630126953125, 2.64057861328125, 2.638487548828125, 2.6377880859375, 2.63784033203125, 2.637433837890625, 2.638781494140625, 2.638268310546875, 2.63773486328125, 2.63734375, 2.64085302734375, 2.637772705078125, 2.63678369140625, 2.636781494140625, 2.637518798828125, 2.637106201171875, 2.636708984375, 2.63695166015625, 2.63906103515625, 2.6390087890625, 2.639414306640625, 2.63884912109375, 2.638530517578125, 2.639602783203125, 2.639301513671875, 2.638671875, 2.6394306640625, 2.640407470703125, 2.63750244140625, 2.640359375, 2.640257080078125, 2.641056884765625, 2.639901611328125, 2.64020263671875, 2.640825439453125, 2.637892578125, 2.63866357421875, 2.63870166015625, 2.638103515625, 2.641052734375, 2.639697998046875, 2.639138916015625, 2.63690234375, 2.63733447265625, 2.637642822265625, 2.63709912109375, 2.6370068359375, 2.638327880859375, 5.4337607421875, 2.63835546875, 2.638138427734375, 2.637888427734375, 2.63785986328125, 2.63816796875, 2.637724609375, 2.63817431640625, 2.63734375, 2.637358154296875, 2.637413330078125, 2.63850390625, 2.63788134765625, 2.638455810546875, 2.63848046875, 2.637498291015625, 2.637077392578125, 2.640005126953125, 2.638171142578125, 2.63721875, 2.638088134765625, 2.637340576171875, 2.637073486328125, 2.637824951171875, 2.63834619140625, 2.63819580078125, 2.63836474609375, 2.63822021484375, 2.637216796875, 2.63757421875, 2.63648046875, 2.6365439453125, 2.63712255859375, 2.63768994140625, 2.636919921875, 2.637600830078125, 2.637790283203125, 2.63785986328125, 2.636936279296875, 2.637821044921875, 2.637937744140625, 2.638752685546875, 2.63861767578125, 2.637978515625, 2.63923291015625, 2.639403076171875, 2.63928515625, 2.63895751953125, 2.63758447265625, 2.637350830078125, 2.63724853515625, 2.6378095703125, 2.638350341796875, 2.637401123046875, 2.636908447265625, 2.63790478515625, 2.638477294921875, 2.638087158203125, 2.639475830078125, 2.639469482421875, 2.640037841796875, 2.637907958984375, 2.636857421875, 5.4322666015625, 2.640575439453125, 2.641816650390625, 2.640384033203125, 2.639365234375, 2.639097900390625, 2.6402724609375, 2.638215087890625, 2.638160888671875, 2.637603759765625, 2.637083740234375, 2.6377421875, 2.637223876953125, 2.6389013671875, 2.638928955078125, 2.637473876953125, 2.637214599609375, 2.636859375, 2.637595703125, 2.63667822265625, 2.6370498046875, 2.638342041015625, 2.63901904296875, 2.637927490234375, 2.63882958984375, 2.640554931640625, 2.64306884765625, 2.639297607421875, 2.6393896484375, 2.64045166015625, 2.63986376953125, 2.639677490234375, 2.64067578125, 2.64060009765625, 2.640331787109375, 2.640183349609375, 2.639626220703125, 2.640194580078125, 2.63760498046875, 2.639280029296875, 2.6397255859375, 2.641039306640625, 2.6397880859375, 2.6377646484375, 2.639326171875, 2.63863916015625, 2.636462158203125, 2.637134765625, 2.6374892578125, 2.637487060546875, 2.63678466796875, 2.63695361328125, 2.636927978515625, 2.636856201171875, 2.63660546875, 2.637761474609375, 2.637675537109375, 2.6367119140625, 2.637622314453125, 2.6384404296875, 2.637370361328125, 2.63602880859375, 2.636900390625]",tokens/s,0.37339678057090686,,,main,False,False,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9ab-3155b8f1179cb3026a9ec5ec;4c4213ae-cbb7-4a21-ad9d-a28df34cd1dd) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaaab-2ed688ac64f8d5a558c8c046;1a301e85-3907-41e9-9cc1-e2dd23e63ab9) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4461.907968,24111.480832,0.0,23465.033728,21690.932224,s,10,26.134937744140625,2.6134937744140623,0.002110944060506743,2.6131138916015626,2.6165289794921875,2.616893395996094,2.617184929199219,"[2.61349365234375, 2.615531005859375, 2.611662353515625, 2.612973388671875, 2.612295654296875, 2.611193115234375, 2.61325439453125, 2.616447998046875, 2.6172578125, 2.610828369140625]",tokens/s,97.95317000798843,kWh,3.0828719950384564e-05,1.6895240036155882e-05,0.0001496905919745939,0.00019741455196113433,tokens/kWh,1296763.5741989252,MB,4461.907968,24111.480832,0.0,23465.033728,21890.217984,s,10,1550.2514375,155.02514375,0.01758597574684522,155.027953125,155.04441406249998,155.04866796875,155.05207109375002,"[155.040484375, 155.052921875, 155.004375, 155.03190625, 155.025, 155.03090625, 155.00465625, 154.996828125, 155.020890625, 155.04346875]",tokens/s,0.4063856899342497,kWh,0.0018302277258535225,0.001003126846851137,0.008909165071770808,0.01174251964447547,tokens/kWh,5365.117701092351,,s,629,1571.3079760742191,2.498104890420062,0.3103664945461323,2.460632080078125,2.46187294921875,2.4623572265625002,5.0725159374999995,"[2.460918701171875, 2.46082666015625, 2.461980712890625, 2.4610048828125, 2.460906494140625, 2.46040576171875, 2.46205126953125, 2.4613447265625, 2.461484130859375, 2.4613662109375, 2.461151123046875, 2.461115478515625, 2.460440673828125, 2.460030029296875, 2.461190185546875, 2.460876708984375, 2.460451904296875, 2.460170166015625, 2.463140869140625, 2.46112060546875, 2.46163671875, 2.46084814453125, 2.463774658203125, 2.461010986328125, 2.4610498046875, 2.46070263671875, 2.461833251953125, 2.460528564453125, 2.4617880859375, 2.46072021484375, 2.461929443359375, 2.4613662109375, 2.46120849609375, 2.46112060546875, 2.4614892578125, 2.459990966796875, 2.461517822265625, 2.4609638671875, 2.46169189453125, 2.460205078125, 2.460928955078125, 2.461189208984375, 2.459634765625, 2.45965625, 2.45981396484375, 2.459525146484375, 2.459683837890625, 2.46053369140625, 2.46042529296875, 2.459734130859375, 2.461192138671875, 2.46074072265625, 2.46057470703125, 2.46108154296875, 2.46078564453125, 2.46036474609375, 2.461035400390625, 2.4617646484375, 2.460655517578125, 2.461253662109375, 2.461116455078125, 2.46118505859375, 5.0779052734375, 2.461738037109375, 2.461781982421875, 2.461154296875, 2.463372314453125, 2.462037109375, 2.462613525390625, 2.462189453125, 2.46213427734375, 2.46221923828125, 2.462265380859375, 2.462834716796875, 2.461442138671875, 2.460442626953125, 2.460788818359375, 2.460884033203125, 2.46097509765625, 2.461346923828125, 2.46034130859375, 2.460333984375, 2.46076318359375, 2.461075439453125, 2.460369873046875, 2.459978759765625, 2.4613203125, 2.461739013671875, 2.460712890625, 2.461740966796875, 2.46230322265625, 2.462064697265625, 2.4611767578125, 2.4604365234375, 2.46226318359375, 2.46073046875, 2.461908935546875, 2.4604140625, 2.4613857421875, 2.460633056640625, 2.46066796875, 2.459797607421875, 2.460780517578125, 2.46072119140625, 2.46026025390625, 2.4598271484375, 2.46049169921875, 2.46009765625, 2.460492919921875, 2.46042529296875, 2.461200439453125, 2.461295654296875, 2.460458984375, 2.461582275390625, 2.46042822265625, 2.460832763671875, 2.461412353515625, 2.460978271484375, 2.460579833984375, 2.4606044921875, 2.460789794921875, 2.460420166015625, 2.4605654296875, 2.461767578125, 2.46121875, 5.0727412109375, 2.4607958984375, 2.46120751953125, 2.4612392578125, 2.460706787109375, 2.46135595703125, 2.46187109375, 2.461371337890625, 2.4605634765625, 2.46158642578125, 2.46133251953125, 2.46209326171875, 2.46131103515625, 2.461284423828125, 2.46093212890625, 2.46143798828125, 2.460158935546875, 2.460937255859375, 2.459987060546875, 2.4602919921875, 2.460261474609375, 2.460375, 2.460521484375, 2.461273193359375, 2.46073046875, 2.46131396484375, 2.460286865234375, 2.46070263671875, 2.459401123046875, 2.460560302734375, 2.460210205078125, 2.4605859375, 2.459334716796875, 2.46042822265625, 2.460444580078125, 2.460652587890625, 2.4594482421875, 2.459885498046875, 2.459210693359375, 2.459854736328125, 2.45916162109375, 2.459714599609375, 2.45982421875, 2.45997265625, 2.460665771484375, 2.461526123046875, 2.459255859375, 2.4603525390625, 2.460240966796875, 2.460114990234375, 2.459881591796875, 2.46009765625, 2.459779052734375, 2.45985791015625, 2.459809814453125, 2.459896728515625, 2.459599853515625, 2.459850830078125, 2.459740234375, 2.459671630859375, 2.459470947265625, 2.45933984375, 2.4605419921875, 5.07296044921875, 2.459979736328125, 2.460074951171875, 2.460875732421875, 2.46111328125, 2.461024169921875, 2.4617041015625, 2.45998583984375, 2.461939697265625, 2.4617861328125, 2.46111328125, 2.460988525390625, 2.460284912109375, 2.460103759765625, 2.4607939453125, 2.461890625, 2.46074072265625, 2.4606279296875, 2.46205224609375, 2.46086865234375, 2.461393798828125, 2.461231201171875, 2.462035888671875, 2.4613251953125, 2.462630859375, 2.460718017578125, 2.46099853515625, 2.460675048828125, 2.4612158203125, 2.460883056640625, 2.461149169921875, 2.461590576171875, 2.4607939453125, 2.460654541015625, 2.460872802734375, 2.461231201171875, 2.46080712890625, 2.461241455078125, 2.46087158203125, 2.459632568359375, 2.460600341796875, 2.460632080078125, 2.46099365234375, 2.460303466796875, 2.46082568359375, 2.462000244140625, 2.461948974609375, 2.4596806640625, 2.460571533203125, 2.460125244140625, 2.46019677734375, 2.45987841796875, 2.46080810546875, 2.460212158203125, 2.460813232421875, 2.45957421875, 2.46019384765625, 2.460409912109375, 2.459958251953125, 2.45973095703125, 2.460412841796875, 2.46169384765625, 2.46048046875, 5.07272607421875, 2.461116455078125, 2.460166259765625, 2.461013916015625, 2.460505126953125, 2.46078662109375, 2.461305908203125, 2.4609658203125, 2.460851318359375, 2.461484130859375, 2.4605869140625, 2.4610498046875, 2.4601630859375, 2.46051318359375, 2.4606064453125, 2.461643798828125, 2.461190185546875, 2.461765625, 2.46075390625, 2.4614091796875, 2.460251220703125, 2.4604609375, 2.460229736328125, 2.460904541015625, 2.46035546875, 2.460232666015625, 2.46175439453125, 2.462803955078125, 2.4607958984375, 2.461580322265625, 2.461107177734375, 2.461393798828125, 2.460929931640625, 2.460676025390625, 2.461404052734375, 2.460982177734375, 2.460367919921875, 2.4600791015625, 2.4596552734375, 2.460010498046875, 2.45901318359375, 2.4592802734375, 2.46055224609375, 2.460517333984375, 2.459428955078125, 2.46019384765625, 2.460880859375, 2.4609208984375, 2.459928466796875, 2.46034423828125, 2.460252197265625, 2.460788818359375, 2.460273681640625, 2.45994287109375, 2.460907470703125, 2.4596796875, 2.460264404296875, 2.460949462890625, 2.460282958984375, 2.4613427734375, 2.462763916015625, 2.461613037109375, 2.460632080078125, 5.0719755859375, 2.462064697265625, 2.46147998046875, 2.46153515625, 2.461073486328125, 2.460905517578125, 2.461240234375, 2.460853271484375, 2.461421630859375, 2.462041015625, 2.46237890625, 2.46096484375, 2.461697998046875, 2.4613037109375, 2.461698974609375, 2.460219482421875, 2.462074951171875, 2.460283935546875, 2.459872314453125, 2.46007080078125, 2.45981689453125, 2.460726318359375, 2.459724853515625, 2.45985888671875, 2.460906494140625, 2.46042822265625, 2.46017626953125, 2.460051513671875, 2.46018359375, 2.46103759765625, 2.460602294921875, 2.46068212890625, 2.461318115234375, 2.460810302734375, 2.4603740234375, 2.46188037109375, 2.462437255859375, 2.460948486328125, 2.460538818359375, 2.460599365234375, 2.461042724609375, 2.46105712890625, 2.45985595703125, 2.46139697265625, 2.46086865234375, 2.460706787109375, 2.46046826171875, 2.4603740234375, 2.461024169921875, 2.4599951171875, 2.460008544921875, 2.460411865234375, 2.4604580078125, 2.460127197265625, 2.4596591796875, 2.460818359375, 2.461365234375, 2.460373046875, 2.46135302734375, 2.461212646484375, 2.4607734375, 2.460324951171875, 2.46021728515625, 5.07445458984375, 2.4605234375, 2.461729736328125, 2.461869140625, 2.461232177734375, 2.461845458984375, 2.463498291015625, 2.460968994140625, 2.46013232421875, 2.460211181640625, 2.45943603515625, 2.460124267578125, 2.46013037109375, 2.460103759765625, 2.461614013671875, 2.46322900390625, 2.461684814453125, 2.4606064453125, 2.4600556640625, 2.45979345703125, 2.459621337890625, 2.460230712890625, 2.459693115234375, 2.460508056640625, 2.460240966796875, 2.460556396484375, 2.459707275390625, 2.459610107421875, 2.459715576171875, 2.460527587890625, 2.459706298828125, 2.45965625, 2.45981591796875, 2.45966845703125, 2.4598037109375, 2.459366455078125, 2.4594267578125, 2.45954052734375, 2.46019580078125, 2.459505615234375, 2.4598701171875, 2.4605830078125, 2.46051953125, 2.4611103515625, 2.460297119140625, 2.45998388671875, 2.459361328125, 2.460695556640625, 2.46107958984375, 2.46091162109375, 2.45956494140625, 2.460255126953125, 2.460651611328125, 2.460200927734375, 2.45922314453125, 2.460324951171875, 2.460303466796875, 2.46060546875, 2.460147705078125, 2.459874267578125, 2.459558837890625, 2.460209228515625, 2.46225927734375, 5.07727880859375, 2.46051953125, 2.459748291015625, 2.460139404296875, 2.459675537109375, 2.460464111328125, 2.45935400390625, 2.460265380859375, 2.460979248046875, 2.46025927734375, 2.460180419921875, 2.45945654296875, 2.45956396484375, 2.4599091796875, 2.459864990234375, 2.459249755859375, 2.459658203125, 2.45960693359375, 2.4602666015625, 2.463476806640625, 2.46247314453125, 2.461254638671875, 2.4606064453125, 2.46013134765625, 2.4603095703125, 2.46048046875, 2.460453857421875, 2.460747802734375, 2.4610087890625, 2.45964501953125, 2.460734375, 2.460478515625, 2.460600341796875, 2.460285888671875, 2.460894287109375, 2.460316650390625, 2.460169189453125, 2.460146728515625, 2.459640869140625, 2.459826171875, 2.459534423828125, 2.46048876953125, 2.459629638671875, 2.459568115234375, 2.459454345703125, 2.46009765625, 2.46054296875, 2.46160693359375, 2.459303955078125, 2.459570068359375, 2.460251220703125, 2.459474853515625, 2.45965625, 2.460379150390625, 2.45972900390625, 2.459229248046875, 2.459660400390625, 2.45932958984375, 2.464058349609375, 2.461569091796875, 2.45967041015625, 2.459884521484375, 2.460506103515625, 5.07673388671875, 2.459622314453125, 2.462644287109375, 2.4620205078125, 2.461388916015625, 2.460607421875, 2.461044677734375, 2.461013916015625, 2.461294677734375, 2.460347412109375, 2.46035546875, 2.4605205078125, 2.460373046875, 2.459558837890625, 2.46158740234375, 2.462236572265625, 2.462750732421875, 2.46039453125, 2.459875244140625, 2.460180419921875, 2.459242431640625, 2.461361083984375, 2.461865966796875, 2.4626064453125, 2.46232470703125, 2.462096435546875, 2.460041259765625, 2.4603125, 2.460031005859375, 2.460012451171875, 2.460483642578125, 2.46249365234375, 2.460971923828125, 2.460064697265625, 2.460525634765625, 2.4618515625, 2.461947998046875, 2.46048046875, 2.460482666015625, 2.459716552734375, 2.46003515625, 2.45914111328125, 2.459892822265625, 2.4607099609375, 2.461216796875, 2.45960693359375, 2.459496337890625, 2.459989013671875, 2.46011181640625, 2.461148193359375, 2.459788330078125, 2.459845703125, 2.45964501953125, 2.45973291015625, 2.459387939453125, 2.460958740234375, 2.4599326171875, 2.459988037109375, 2.459134033203125, 2.4599716796875, 2.459010009765625, 2.4597216796875, 2.466093017578125, 5.07026025390625, 2.459715576171875, 2.459428955078125, 2.460008544921875, 2.45975244140625, 2.460271728515625, 2.4604365234375, 2.46042724609375, 2.46135595703125, 2.460927001953125, 2.460580810546875, 2.46221826171875, 2.461213623046875, 2.461199462890625, 2.460501953125, 2.461664306640625, 2.46034423828125, 2.460695556640625, 2.460241943359375, 2.46086865234375, 2.4613837890625, 2.4614892578125, 2.460771240234375, 2.461020263671875, 2.462573486328125, 2.461728759765625, 2.461054931640625, 2.46047021484375, 2.46080712890625, 2.46276806640625, 2.46089306640625, 2.461909912109375, 2.46152392578125, 2.4612607421875, 2.460707763671875, 2.461158447265625, 2.46102734375, 2.46084716796875, 2.460749755859375, 2.460251220703125, 2.460735595703125, 2.46051220703125, 2.461053955078125, 2.4607548828125, 2.46175244140625, 2.460388427734375, 2.46027880859375, 2.4614501953125, 2.461758544921875, 2.461529052734375, 2.460019775390625, 2.4599306640625, 2.460873779296875, 2.460814453125, 2.460541015625, 2.460760009765625, 2.46168994140625, 2.460673095703125, 2.46126806640625, 2.460421142578125, 2.462738525390625, 2.46156689453125, 2.461497314453125]",tokens/s,0.4003034475593408,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2452.258816,7298.613248,0.0,6652.166144,6323.221504,s,10,7.6798339843750005,0.7679833984375,0.0025974055850476797,0.7679208068847656,0.7711121887207032,0.7721169158935547,0.772920697631836,"[0.7685753173828125, 0.7731216430664063, 0.7647974853515624, 0.7675167846679688, 0.76627783203125, 0.7641251220703125, 0.7666953735351563, 0.7683248291015625, 0.770888916015625, 0.7695106811523438]",tokens/s,333.34053902837564,kWh,9.039419593792114e-06,4.95321018510627e-06,4.32610068310048e-05,5.725363660990319e-05,tokens/kWh,4471331.694513176,MB,2452.258816,7298.613248,0.0,6652.166144,6382.564864,s,10,455.74574609375,45.57457460937499,0.015546779743152406,45.57488671875,45.595179296874996,45.5973923828125,45.5991628515625,"[45.5867109375, 45.5946875, 45.59960546875, 45.58337109375, 45.55990625, 45.5529375, 45.558640625, 45.56011328125, 45.5776015625, 45.572171875]",tokens/s,1.3823497101175461,kWh,0.000537889561981909,0.0002948106017116671,0.0025152069288305857,0.0033479070925241613,tokens/kWh,18817.726495660016,,s,629,461.93301361083974,0.7343927084433065,0.09125959398126374,0.7233239135742188,0.7243169799804687,0.7247333496093751,1.4901356689453125,"[0.7238604736328125, 0.7241830444335937, 0.7225497436523437, 0.7234365234375, 0.7223142700195313, 0.7223203735351562, 0.7245137939453125, 0.7236771850585938, 0.7226992797851562, 0.723535888671875, 0.7234600830078125, 0.7246929931640625, 0.7229706420898437, 0.7237723999023438, 0.7233392944335938, 0.7235133666992187, 0.7225231323242187, 0.7234631958007812, 0.723367919921875, 0.7226572875976562, 0.7243929443359375, 0.7232921752929687, 0.7236546630859375, 0.7240488891601562, 0.7235286865234375, 0.7245383911132812, 0.7233167114257812, 0.7230576782226562, 0.7237959594726563, 0.7243140869140625, 0.722629638671875, 0.7226122436523438, 0.7224381713867187, 0.7233873901367187, 0.7249028930664062, 0.7240294189453125, 0.723904541015625, 0.7241912231445312, 0.7243069458007813, 0.7245864868164062, 0.7234006958007813, 0.723114990234375, 0.7231324462890625, 0.7226593017578125, 0.7241062622070312, 0.7237662963867187, 0.7239597778320312, 0.724094970703125, 0.7240611572265625, 0.7237017822265625, 0.7238123779296874, 0.72437451171875, 0.7245363159179687, 0.7248076782226562, 0.7224637451171875, 0.7232000122070312, 0.7239700317382812, 0.7240325317382813, 0.7229603881835938, 0.7228866577148437, 0.7229910888671875, 0.7245496215820313, 1.497416748046875, 0.7238062133789063, 0.7237662963867187, 0.7238911743164063, 0.7240550537109375, 0.7237857055664062, 0.7238358764648437, 0.7228784790039062, 0.72378369140625, 0.7233925170898438, 0.7229235229492188, 0.722534423828125, 0.7236341552734376, 0.723399658203125, 0.7241830444335937, 0.7228897094726563, 0.7232071533203125, 0.7245209350585937, 0.7240325317382813, 0.7224688720703125, 0.7228262329101562, 0.7225323486328125, 0.7236700439453125, 0.7239026489257813, 0.72288037109375, 0.7225128784179687, 0.7245783081054687, 0.7235983276367187, 0.7235532836914063, 0.725017578125, 0.724094970703125, 0.7250708618164062, 0.7244400634765625, 0.7243571166992188, 0.7240745239257812, 0.7232041015625, 0.7236218872070312, 0.7237877807617188, 0.7239588012695313, 0.7241809692382812, 0.7240929565429688, 0.7230279541015625, 0.7229634399414062, 0.7229419555664063, 0.72318359375, 0.7240601806640625, 0.7232184448242187, 0.7226746826171875, 0.7235880737304687, 0.724453369140625, 0.7243407592773438, 0.7238184814453125, 0.7240242919921875, 0.7247308959960937, 0.7247349853515626, 0.7235543212890625, 0.7233402709960938, 0.7242977294921875, 0.7243202514648438, 0.723862548828125, 0.724084716796875, 0.723768310546875, 0.7227473754882813, 1.490913330078125, 0.7229655151367187, 0.7226480712890625, 0.7240775756835938, 0.7235686645507813, 0.7231815795898437, 0.72302490234375, 0.7227381591796875, 0.7236761474609374, 0.724653076171875, 0.724200439453125, 0.7239618530273437, 0.7246776123046875, 0.7250585327148438, 0.7243140869140625, 0.7238225708007813, 0.7248977661132813, 0.725359619140625, 0.7238379516601563, 0.7237171020507812, 0.724158447265625, 0.7239188232421875, 0.7248445434570312, 0.7251189575195313, 0.7247544555664063, 0.7250022583007812, 0.7248936767578125, 0.7236198120117188, 0.7234979858398437, 0.723462158203125, 0.7234559936523437, 0.724263916015625, 0.7227955322265625, 0.7225702514648438, 0.7241932983398438, 0.7234805908203125, 0.7234949340820312, 0.72275146484375, 0.7225385131835937, 0.7228804931640626, 0.7242403564453125, 0.7236218872070312, 0.7236986694335937, 0.723472412109375, 0.724126708984375, 0.7239505615234375, 0.7238901977539063, 0.7237283935546875, 0.7242025146484375, 0.724116455078125, 0.7241031494140625, 0.72374169921875, 0.7228671875, 0.7233228759765625, 0.7237857055664062, 0.7240171508789063, 0.72399462890625, 0.7229276123046875, 0.7241410522460937, 0.7236751098632812, 0.7226521606445313, 0.7233648681640625, 0.7235952758789063, 1.49184814453125, 0.7231047973632813, 0.7224780883789063, 0.7224258422851563, 0.72266650390625, 0.7234283447265625, 0.72353076171875, 0.7227269287109375, 0.7243991088867188, 0.7234119873046875, 0.72285595703125, 0.7229951782226562, 0.7234692993164062, 0.7242373046875, 0.7239915771484375, 0.7242465209960938, 0.7243253784179687, 0.7225200805664063, 0.7226972045898438, 0.7227289428710938, 0.7236956176757813, 0.7238881225585938, 0.7240274047851563, 0.7241492309570312, 0.7236741333007812, 0.7231661987304687, 0.723114990234375, 0.7243571166992188, 0.7238656005859375, 0.723240966796875, 0.7226122436523438, 0.7238615112304687, 0.7242168579101562, 0.7235440673828125, 0.7236239624023437, 0.723926025390625, 0.7243919067382812, 0.7229173583984375, 0.7226542358398438, 0.7227238159179687, 0.7229859619140625, 0.7234990234375, 0.7239567260742188, 0.7231416015625, 0.7239454956054687, 0.7230084838867188, 0.7233607788085937, 0.7238359375, 0.7240099487304688, 0.7241431274414063, 0.7246141357421875, 0.7241359252929688, 0.7245772705078125, 0.7245137939453125, 0.725411865234375, 0.723578857421875, 0.7241543579101563, 0.7230054321289062, 0.7233526000976562, 0.7226992797851562, 0.7232420043945312, 0.7230075073242187, 0.7230187377929688, 1.490323486328125, 0.7229685668945313, 0.723535888671875, 0.72338330078125, 0.723409912109375, 0.7233341674804687, 0.7232112426757813, 0.72279345703125, 0.7230853271484375, 0.7230105590820313, 0.7226060791015625, 0.723346435546875, 0.7229296875, 0.72382568359375, 0.7241666259765625, 0.72303515625, 0.7230812377929687, 0.7226583251953125, 0.7230535888671875, 0.7229890747070312, 0.7228118896484375, 0.7222753295898438, 0.7228641357421876, 0.722740234375, 0.72254052734375, 0.7228671875, 0.7227391967773438, 0.7233505249023438, 0.722619384765625, 0.72300439453125, 0.7234150390625, 0.7230556030273437, 0.7235297241210937, 0.722819091796875, 0.723610595703125, 0.7228836059570313, 0.7235348510742188, 0.7226911010742187, 0.7227914428710938, 0.7233935546875, 0.7231590576171875, 0.7238717651367188, 0.72331982421875, 0.7227811889648438, 0.7233136596679688, 0.7229542236328125, 0.7231754150390625, 0.7233607788085937, 0.7233925170898438, 0.7229081420898438, 0.7231047973632813, 0.7224483642578124, 0.723252197265625, 0.722640869140625, 0.7238778686523437, 0.7232604370117187, 0.7231641845703125, 0.7244615478515625, 0.7236874389648438, 0.72321533203125, 0.7241400146484375, 0.7235686645507813, 0.7226695556640625, 1.48693603515625, 0.7224832153320313, 0.7234774780273437, 0.7231426391601562, 0.724173828125, 0.7240755004882813, 0.7229542236328125, 0.72241357421875, 0.7228876953125, 0.7233106079101562, 0.7236618041992188, 0.7231641845703125, 0.7225897216796875, 0.7224903564453125, 0.7225599975585938, 0.7225702514648438, 0.7227361450195312, 0.7226112060546875, 0.7231498413085937, 0.723304443359375, 0.722703369140625, 0.7227955322265625, 0.7233474731445313, 0.7234703369140625, 0.7267368774414062, 0.7227781982421875, 0.7225128173828125, 0.7224033203125, 0.7225589599609376, 0.7226634521484375, 0.7223306274414063, 0.7225364990234375, 0.7230811767578125, 0.72255078125, 0.7227003173828125, 0.7224832153320313, 0.7223961791992187, 0.7230310668945312, 0.7257907104492187, 0.7230904541015625, 0.7228538818359375, 0.7229788208007812, 0.7225180053710938, 0.7223818359375, 0.7229481201171875, 0.7229378662109375, 0.72367822265625, 0.7230084838867188, 0.7230146484375, 0.7226326904296875, 0.7231416015625, 0.7237335205078125, 0.7241860961914063, 0.7245127563476562, 0.722572265625, 0.7229020385742188, 0.72300341796875, 0.72266650390625, 0.7228159790039063, 0.7224207153320312, 0.7239270629882812, 0.72285693359375, 0.722966552734375, 1.4896527099609376, 0.7232348022460937, 0.7239567260742188, 0.7227739868164063, 0.723078125, 0.7226429443359375, 0.7226798095703125, 0.7228703002929687, 0.7228917846679688, 0.7229112548828125, 0.722935791015625, 0.7229030151367187, 0.7226050415039063, 0.7226798095703125, 0.7230105590820313, 0.7234058227539063, 0.7233239135742188, 0.7225077514648437, 0.7225938110351563, 0.7227412719726563, 0.7225303344726562, 0.72310888671875, 0.7248046264648438, 0.7248180541992187, 0.723907470703125, 0.72346826171875, 0.7232081909179687, 0.7231743774414062, 0.7237877807617188, 0.7234160766601563, 0.7235860595703125, 0.7227647705078125, 0.7225620727539063, 0.7228334350585938, 0.7226992797851562, 0.7227996215820313, 0.7227381591796875, 0.7233106079101562, 0.723267578125, 0.7230556030273437, 0.7226695556640625, 0.723061767578125, 0.7235317993164062, 0.7233689575195312, 0.7238010864257812, 0.7234078979492188, 0.7232849731445312, 0.7231876831054688, 0.7232327880859375, 0.7230422973632813, 0.7230341186523438, 0.7235563354492187, 0.7233157348632813, 0.7235266723632813, 0.723040283203125, 0.7234692993164062, 0.7238154296875, 0.7239547119140625, 0.7235758056640625, 0.7227924194335937, 0.7224013061523438, 0.7225753784179687, 0.7224832153320313, 1.4909122314453125, 0.7237437744140625, 0.7234396362304687, 0.7226849365234375, 0.7229102172851563, 0.7227125854492188, 0.7236638793945313, 0.7237918701171875, 0.722882568359375, 0.7225845947265624, 0.7231959228515625, 0.7231826171875, 0.722893798828125, 0.7226316528320312, 0.7233382568359376, 0.7234171142578125, 0.7229818725585937, 0.722428955078125, 0.7228836059570313, 0.722682861328125, 0.7232973022460938, 0.7227125854492188, 0.7229685668945313, 0.723040283203125, 0.7242997436523437, 0.7234037475585937, 0.7232604370117187, 0.7234345092773438, 0.72426904296875, 0.7237816162109375, 0.7232747802734375, 0.7224575805664063, 0.7224043579101562, 0.7228845825195312, 0.7226439819335938, 0.7229317016601563, 0.722651123046875, 0.7223142700195313, 0.7236024169921875, 0.7237283935546875, 0.7242454833984375, 0.72339453125, 0.7232337646484375, 0.7232767944335937, 0.7239905395507813, 0.7230320434570312, 0.7228037109375, 0.7227996215820313, 0.722904052734375, 0.7226705932617188, 0.7241994018554687, 0.7242670288085937, 0.7234703369140625, 0.723314697265625, 0.7229102172851563, 0.722967529296875, 0.7240089721679688, 0.723979248046875, 0.7236167602539062, 0.7229450073242187, 0.7226654663085937, 0.7226122436523438, 0.722820068359375, 1.495736328125, 0.724738037109375, 0.7240570678710937, 0.72363623046875, 0.72394140625, 0.7237017822265625, 0.7253534545898438, 0.7251988525390625, 0.7241973876953125, 0.7238829956054688, 0.7238615112304687, 0.7239464721679687, 0.7240714111328125, 0.7241809692382812, 0.7237929077148437, 0.72289892578125, 0.7232706298828125, 0.722914306640625, 0.7235338134765625, 0.7231948852539063, 0.7239393310546876, 0.7230525512695313, 0.7231815795898437, 0.7229603881835938, 0.7230853271484375, 0.72344677734375, 0.7233054809570313, 0.72283544921875, 0.7226583251953125, 0.7230003051757813, 0.72287744140625, 0.722608154296875, 0.7231324462890625, 0.7233526000976562, 0.7234529418945312, 0.72300439453125, 0.7229020385742188, 0.7233484497070313, 0.723610595703125, 0.7233925170898438, 0.72308837890625, 0.7229317016601563, 0.7229389038085937, 0.7231682739257812, 0.7229910888671875, 0.7232286987304688, 0.72271875, 0.723019775390625, 0.723493896484375, 0.72239208984375, 0.722845703125, 0.7230084838867188, 0.7235686645507813, 0.7236188354492188, 0.7237345581054687, 0.72363623046875, 0.7234805908203125, 0.7234385986328125, 0.7238164672851563, 0.724041748046875, 0.723356689453125, 0.723030029296875, 0.7237140502929688, 1.4925987548828126, 0.7230996704101562, 0.72384716796875, 0.7239147338867188, 0.7256944580078125, 0.7247103881835938, 0.7244021606445312, 0.7243776245117187, 0.724316162109375, 0.7242546997070313, 0.7244994506835938, 0.724832275390625, 0.7239239501953125, 0.723041259765625, 0.7240745239257812, 0.7239096069335937, 0.723515380859375, 0.7226603393554687, 0.7232348022460937, 0.7224627075195312, 0.7226132202148438, 0.7228047485351563, 0.72265625, 0.7226470947265625, 0.7226756591796875, 0.7231713256835938, 0.72317236328125, 0.722787353515625, 0.7223971557617187, 0.7228057861328125, 0.7233065185546875, 0.7235245971679688, 0.7238748168945313, 0.72374169921875, 0.7234816284179687, 0.7235297241210937, 0.7231426391601562, 0.7237867431640626, 0.7235635375976562, 0.7232808837890625, 0.72345703125, 0.7230924682617188, 0.72296142578125, 0.7232747802734375, 0.7239485473632813, 0.724284423828125, 0.722783203125, 0.7224873046875, 0.7223971557617187, 0.7222200927734375, 0.7224954833984375, 0.7242076416015625, 0.722787353515625, 0.7238604736328125, 0.7227545776367188, 0.7224268798828125, 0.7232020263671874, 0.7240253295898438, 0.722998291015625, 0.7229900512695312, 0.72289892578125, 0.7227340698242187, 0.7230996704101562]",tokens/s,1.36166929287697,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1571.749888,5448.925184,0.0,4802.47808,4489.12128,s,10,5.0903373718261715,0.5090337371826171,0.0024963690031113454,0.5090882415771485,0.5114007537841796,0.5133009628295898,0.514821130065918,"[0.515201171875, 0.5058653564453125, 0.5075380249023438, 0.5109784851074218, 0.5074734191894531, 0.5066469421386719, 0.5090344543457032, 0.5092752685546875, 0.5091822204589844, 0.5091420288085937]",tokens/s,502.91362104386286,kWh,5.98198832737075e-06,3.277856253707796e-06,2.697877158300277e-05,3.623861616408132e-05,tokens/kWh,7064287.412104325,MB,1571.749888,5448.925184,0.0,4802.47808,4557.793792,s,10,299.8803828125,29.98803828125,0.03808075694133287,29.9707392578125,30.046216015625,30.058939062500002,30.0691175,"[29.972552734375, 30.043388671875, 30.071662109375, 30.010330078125, 29.96, 29.95865625, 29.96925, 29.972228515625, 29.963451171875, 29.95886328125]",tokens/s,2.1008376543053404,kWh,0.00035374844759702685,0.00019388459414373753,0.001556149911585597,0.0021037829533263617,tokens/kWh,29946.05498651303,,s,629,303.97862612915014,0.48327285553124066,0.06045359810307915,0.47556915283203127,0.47775867309570313,0.47801220703124997,0.9833881201171876,"[0.4750715026855469, 0.4762265625, 0.4773417053222656, 0.47625729370117187, 0.47571148681640624, 0.47557223510742186, 0.47629312133789065, 0.47471615600585937, 0.475789306640625, 0.47461376953125, 0.4746967163085937, 0.474913818359375, 0.47592657470703126, 0.47551071166992187, 0.47595623779296875, 0.47574118041992186, 0.4750817260742187, 0.47586611938476564, 0.4771154479980469, 0.47594082641601565, 0.4757462768554688, 0.47569818115234375, 0.4764293212890625, 0.475978759765625, 0.47632794189453126, 0.47545343017578123, 0.47498138427734377, 0.47522305297851564, 0.47550360107421874, 0.47533575439453124, 0.4750652770996094, 0.4751790161132812, 0.47525888061523436, 0.4754544677734375, 0.4752803955078125, 0.4756971435546875, 0.4763504638671875, 0.4758845520019531, 0.47809332275390626, 0.475462646484375, 0.47741644287109375, 0.47680307006835937, 0.47630642700195314, 0.47653070068359377, 0.47590911865234375, 0.4751380615234375, 0.47556710815429687, 0.475146240234375, 0.47564901733398435, 0.4757647399902344, 0.47530291748046877, 0.4750807189941406, 0.47518719482421873, 0.47629925537109374, 0.4754646911621094, 0.4754155578613281, 0.4753121337890625, 0.4750469055175781, 0.47505612182617185, 0.4759122009277344, 0.47506842041015623, 0.4758763427734375, 0.9829324951171875, 0.4772618103027344, 0.47679385375976563, 0.4752302551269531, 0.47515850830078127, 0.4750141296386719, 0.4753879089355469, 0.47504486083984376, 0.47488204956054686, 0.47477862548828126, 0.4748114013671875, 0.47600946044921877, 0.4750940246582031, 0.4750837707519531, 0.4750745544433594, 0.4750592041015625, 0.47484109497070315, 0.4761292724609375, 0.4751933288574219, 0.4750182495117187, 0.475357177734375, 0.47510528564453125, 0.4762142639160156, 0.47762432861328125, 0.4771512451171875, 0.478097412109375, 0.47743179321289064, 0.4781097106933594, 0.4774246520996094, 0.47769497680664064, 0.4778270568847656, 0.4778741760253906, 0.47795404052734375, 0.47758233642578124, 0.47754238891601564, 0.47758438110351564, 0.4775854187011719, 0.4776581115722656, 0.4781363220214844, 0.47784140014648435, 0.4785827941894531, 0.4778670043945312, 0.478013427734375, 0.4813271179199219, 0.4787435607910156, 0.4778823547363281, 0.4775577697753906, 0.47766937255859376, 0.4776509399414062, 0.47802163696289063, 0.4775925903320313, 0.4771829833984375, 0.47707955932617185, 0.4779130859375, 0.47752191162109375, 0.4774225769042969, 0.47764581298828124, 0.47809228515625, 0.47725054931640626, 0.47956683349609375, 0.47532647705078124, 0.47634738159179685, 0.4757237854003906, 0.9846497192382813, 0.4779151306152344, 0.47775845336914063, 0.4778516540527344, 0.4773468017578125, 0.47776461791992186, 0.47770932006835937, 0.4769669189453125, 0.4777676696777344, 0.4777635803222656, 0.47618765258789064, 0.4749752197265625, 0.4755538024902344, 0.4747980651855469, 0.4747796630859375, 0.4748114013671875, 0.47680514526367185, 0.4753622741699219, 0.47532440185546876, 0.4750438537597656, 0.4750325622558594, 0.47742669677734373, 0.4790394897460937, 0.47991500854492186, 0.4776028137207031, 0.4770672607421875, 0.47781991577148436, 0.4773304443359375, 0.4778946533203125, 0.4778496398925781, 0.4774788818359375, 0.4779632568359375, 0.47771136474609377, 0.47775640869140623, 0.47768267822265625, 0.47796121215820314, 0.4775454711914062, 0.4780103759765625, 0.4779438171386719, 0.4773253173828125, 0.4780001220703125, 0.47749530029296877, 0.478482421875, 0.4767160339355469, 0.47870156860351565, 0.47761203002929686, 0.47860427856445314, 0.47783221435546874, 0.4779366149902344, 0.4776212463378906, 0.4775301208496094, 0.477328369140625, 0.478376953125, 0.4772362365722656, 0.47751168823242185, 0.47775955200195314, 0.47782699584960936, 0.47732632446289064, 0.47806362915039063, 0.4772454528808594, 0.4775782775878906, 0.47722698974609373, 0.477765625, 0.9892290649414063, 0.47758950805664063, 0.47766937255859376, 0.47711026000976564, 0.4785479736328125, 0.4779674072265625, 0.47848751831054687, 0.4773918762207031, 0.47714407348632815, 0.475968505859375, 0.47733248901367187, 0.4777830505371094, 0.4774912109375, 0.4774410095214844, 0.4776990661621094, 0.47711026000976564, 0.47647540283203127, 0.4762900390625, 0.47571044921875, 0.47531622314453126, 0.47745126342773436, 0.4778526611328125, 0.4754595947265625, 0.4753295593261719, 0.47519024658203124, 0.47728536987304687, 0.4776847229003906, 0.47796734619140624, 0.47679693603515627, 0.4769525756835937, 0.47623678588867185, 0.47598489379882813, 0.47588760375976563, 0.47573504638671876, 0.4752127990722656, 0.47496600341796874, 0.4763709411621094, 0.47549542236328124, 0.4751493225097656, 0.4747591552734375, 0.47531109619140627, 0.47625933837890627, 0.47604122924804687, 0.4752691345214844, 0.478866455078125, 0.4751441650390625, 0.4752322692871094, 0.47499981689453125, 0.47643954467773436, 0.4755630187988281, 0.4757166137695312, 0.47565005493164064, 0.476015625, 0.4754155578613281, 0.4754124755859375, 0.47525070190429686, 0.47504281616210936, 0.4759347229003906, 0.47554763793945315, 0.47518923950195313, 0.47525274658203126, 0.475404296875, 0.47679489135742187, 0.9835653076171875, 0.4750100708007812, 0.47497518920898435, 0.47521792602539065, 0.47529168701171876, 0.47549435424804687, 0.47515341186523435, 0.47609548950195313, 0.47539712524414063, 0.47541351318359376, 0.47511859130859374, 0.47484622192382814, 0.47537664794921874, 0.475104248046875, 0.47634738159179685, 0.4752896118164063, 0.47548724365234374, 0.4761077880859375, 0.4751452026367188, 0.4771768188476562, 0.4755333251953125, 0.4755230712890625, 0.4750796813964844, 0.4751923217773438, 0.4752496643066406, 0.4748308410644531, 0.47501516723632814, 0.47498751831054686, 0.4759531555175781, 0.4754883117675781, 0.47538067626953123, 0.4753909912109375, 0.47539712524414063, 0.4753735656738281, 0.475494384765625, 0.47589376831054686, 0.4761006164550781, 0.47518206787109374, 0.4750602111816406, 0.47489434814453124, 0.47634228515625, 0.47604940795898437, 0.47567974853515627, 0.47519744873046876, 0.4751749267578125, 0.47551077270507813, 0.4754002685546875, 0.47505914306640623, 0.47493426513671877, 0.475788330078125, 0.4752465515136719, 0.47501516723632814, 0.47556915283203127, 0.4761507873535156, 0.47565823364257814, 0.47601458740234376, 0.47671194458007815, 0.478308349609375, 0.4755906677246094, 0.47542578125, 0.4754708557128906, 0.4771061706542969, 0.4754810791015625, 0.9827368774414063, 0.47548416137695315, 0.4752916564941406, 0.4750274658203125, 0.4750274658203125, 0.47477658081054686, 0.4755199890136719, 0.47510833740234376, 0.4752332763671875, 0.47511962890625, 0.4750837707519531, 0.47506329345703124, 0.4751718444824219, 0.477048828125, 0.4756756591796875, 0.475315185546875, 0.47508685302734377, 0.474925048828125, 0.476084228515625, 0.4754022521972656, 0.47531417846679686, 0.47490765380859373, 0.47505612182617185, 0.4752547912597656, 0.47493939208984376, 0.4751216735839844, 0.4752363586425781, 0.47553741455078125, 0.47550872802734373, 0.4752025451660156, 0.47503768920898437, 0.47521588134765624, 0.47512063598632814, 0.4751523742675781, 0.4767621154785156, 0.4764487609863281, 0.47532235717773436, 0.4752414855957031, 0.47862374877929686, 0.47638223266601565, 0.47562542724609375, 0.47544525146484373, 0.47627877807617186, 0.47506329345703124, 0.47538995361328124, 0.4754227294921875, 0.47551693725585936, 0.4754288635253906, 0.47596337890625, 0.4758896789550781, 0.4756899719238281, 0.4755988464355469, 0.4755916748046875, 0.4752066650390625, 0.4755640258789062, 0.47575653076171875, 0.47548416137695315, 0.4752998962402344, 0.4755332336425781, 0.4752363586425781, 0.4765552673339844, 0.4758056945800781, 0.47643853759765625, 0.9858508911132813, 0.4760975341796875, 0.4763627624511719, 0.47618048095703125, 0.47583026123046873, 0.4758814697265625, 0.47627365112304687, 0.47550360107421874, 0.4748226623535156, 0.4754380798339844, 0.4754176025390625, 0.47634841918945314, 0.47666278076171875, 0.4759449462890625, 0.47538177490234373, 0.475430908203125, 0.4754606018066406, 0.4765736999511719, 0.47562957763671876, 0.47619277954101563, 0.4762623901367187, 0.47586407470703124, 0.4763156433105469, 0.47800421142578126, 0.47563058471679687, 0.47566949462890623, 0.4764661865234375, 0.47568487548828126, 0.4763525085449219, 0.47546881103515626, 0.4754503784179688, 0.47535821533203126, 0.4751155090332031, 0.476685302734375, 0.4755333251953125, 0.4750540771484375, 0.4750325622558594, 0.4753387451171875, 0.47632281494140627, 0.47543603515625, 0.47514727783203126, 0.47536947631835935, 0.4750110778808594, 0.4750960693359375, 0.47505816650390625, 0.47526605224609375, 0.47514215087890627, 0.47597158813476564, 0.47550054931640623, 0.47509503173828127, 0.47519845581054687, 0.4751452026367188, 0.4753704833984375, 0.474967041015625, 0.47536639404296877, 0.47516876220703125, 0.47490457153320315, 0.4751523742675781, 0.47530905151367187, 0.4764610595703125, 0.47634228515625, 0.47607601928710935, 0.4758845520019531, 0.9857208251953125, 0.4755548095703125, 0.47583026123046873, 0.4755302734375, 0.4761640625, 0.4756316223144531, 0.475904052734375, 0.4751758728027344, 0.47539813232421874, 0.475536376953125, 0.4755210266113281, 0.475514892578125, 0.4755937805175781, 0.47548818969726564, 0.47541656494140627, 0.47544216918945315, 0.477454345703125, 0.4756387939453125, 0.4758138732910156, 0.47603302001953124, 0.47580465698242186, 0.47644158935546876, 0.4751994934082031, 0.47532339477539065, 0.4750745544433594, 0.47546981811523437, 0.4754565124511719, 0.47503768920898437, 0.47527935791015624, 0.4750858154296875, 0.4753018798828125, 0.4747683715820312, 0.4749916076660156, 0.47602789306640625, 0.4755906677246094, 0.4751790161132812, 0.47570123291015626, 0.477876220703125, 0.477338623046875, 0.4760637512207031, 0.4767999877929687, 0.47610983276367186, 0.47703347778320315, 0.47675802612304685, 0.47620712280273436, 0.4752158508300781, 0.47609036254882814, 0.4771328125, 0.4762552185058594, 0.4760504455566406, 0.47554458618164064, 0.475283447265625, 0.47543603515625, 0.4751523742675781, 0.475335693359375, 0.4752138366699219, 0.47528140258789064, 0.475109375, 0.47681332397460935, 0.47562240600585937, 0.47532339477539065, 0.47533465576171874, 0.47510833740234376, 0.984848388671875, 0.4750960693359375, 0.47506024169921873, 0.4759930419921875, 0.47555789184570313, 0.4750254211425781, 0.47533978271484373, 0.4758394775390625, 0.475610107421875, 0.47590911865234375, 0.4757596130371094, 0.4766791687011719, 0.4766371765136719, 0.4757176208496094, 0.4764549255371094, 0.4773990478515625, 0.4758917236328125, 0.476790771484375, 0.47595416259765627, 0.4752404479980469, 0.47515545654296876, 0.475030517578125, 0.4752916564941406, 0.4751278076171875, 0.4755138549804688, 0.47549234008789065, 0.47554150390625, 0.4758425598144531, 0.47556607055664063, 0.4752015380859375, 0.47505612182617185, 0.4750540771484375, 0.47509811401367186, 0.4751697998046875, 0.4750817260742187, 0.4752066650390625, 0.4769587097167969, 0.4757596130371094, 0.47552410888671875, 0.4753387451171875, 0.4751247253417969, 0.47600946044921877, 0.47491787719726564, 0.47531417846679686, 0.47513394165039063, 0.4757739562988281, 0.4750469055175781, 0.475030517578125, 0.4758507385253906, 0.4755599365234375, 0.47600741577148437, 0.4752414855957031, 0.4751247253417969, 0.47610470581054687, 0.4753530883789063, 0.475404296875, 0.4752005004882813, 0.476626953125, 0.4756654052734375, 0.47670578002929687, 0.4761466979980469, 0.4752547912597656, 0.4752209777832031, 0.9850972290039063, 0.47500698852539064, 0.47586099243164065, 0.475536376953125, 0.4751933288574219, 0.4753070068359375, 0.47523434448242186, 0.475157470703125, 0.4753039245605469, 0.4752762756347656, 0.4760606689453125, 0.47565216064453125, 0.4750796203613281, 0.47518106079101563, 0.4763607177734375, 0.47558758544921875, 0.47543499755859375, 0.47558349609375, 0.4752384033203125, 0.47536639404296877, 0.47527423095703125, 0.47547494506835936, 0.4750335998535156, 0.4753049621582031, 0.47516571044921874, 0.4754155578613281, 0.4772812805175781, 0.4751769714355469, 0.4758814697265625, 0.4761343994140625, 0.4757074279785156, 0.4766033630371094, 0.47504281616210936, 0.47508480834960937, 0.4752629699707031, 0.47697714233398436, 0.4764979248046875, 0.4756357421875, 0.4753837890625, 0.4751473083496094, 0.475273193359375, 0.475177978515625, 0.47527835083007813, 0.4753541259765625, 0.4753623046875, 0.4753930358886719, 0.4750796813964844, 0.47559576416015625, 0.47536334228515625, 0.4764241943359375, 0.47548724365234374, 0.47530087280273436, 0.47603302001953124, 0.4752906188964844, 0.47511962890625, 0.47501516723632814, 0.4768563232421875, 0.4758026123046875, 0.47588864135742187, 0.4753950805664062, 0.4752138366699219, 0.47535000610351563, 0.4750120849609375]",tokens/s,2.069224432025556,,,main,False,False,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 974, in __init__ self.model = InternLM2Model(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in __init__ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 598, in __init__ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa67-3a456d5f69f2dc1d10604993;b290a931-c314-4c73-9a5d-14be5f736551) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa15-551341a36ec2255638579554;94b4bc4d-df28-42dc-a162-9a127ff0a232) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa530-0be33bce01a4e1e87987d70d;13bbd174-f909-4c06-84f7-58966286c385) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9b9-3140fca435e1f16f153b386d;3c6cdadf-785a-4bae-a17b-e298342c1b63) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaab9-28822f101473f11448384b6f;72ccc4be-99b5-4346-adf1-79176bebb380) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 974, in __init__ self.model = InternLM2Model(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in __init__ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 598, in __init__ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpficmgqtz/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa82-3e32a0ae2496f29275d4bfa2;f3444d97-fdfa-4a98-9cee-0ff83cccadf3) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa30-5b4e2f1260ca513e5d6f53de;fc8a85a9-2778-4c51-ae44-93359d4081dd) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp9a964gjq/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa54c-4acded0c06b30b6e5a37b45a;53d275f7-fbe2-40c5-b28d-3a53f3890719) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpk_zyud20/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9d9-47f011087cce4ea9787e6bd6;8e8654e4-7767-4efd-81bb-673d1c1a23fb) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaad5-780a7a8c56e27c0379263083;2cd16c37-da9d-46f5-86b4-4c585d348ee7) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpbiaq465j/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpvqw4a06w/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1591.713792,2250.768384,0.0,1604.32128,1463.693312,s,10,1.2648313903808592,0.12648313903808595,0.0013989604614154947,0.1262478713989258,0.1271006202697754,0.1287280216217041,0.13002994270324708,"[0.1303554229736328, 0.12560326385498047, 0.12551904296875, 0.12673897552490235, 0.12573398590087892, 0.12508892822265624, 0.12599820709228515, 0.1264975357055664, 0.1265692138671875, 0.12672681427001953]",tokens/s,2023.9851884361806,kWh,1.4855278241965507e-06,8.139949754399822e-07,6.45409544104969e-06,8.753618240686224e-06,tokens/kWh,29245049.64245863,MB,1591.713792,2250.768384,0.0,1604.32128,1560.975872,s,10,72.9124365234375,7.291243652343749,0.003929707108860259,7.2908349609375005,7.297245703125,7.2972966796875,7.2973374609375,"[7.297234375, 7.2856533203125, 7.29734765625, 7.29276513671875, 7.29289794921875, 7.29063818359375, 7.29089404296875, 7.29077587890625, 7.28937060546875, 7.284859375]",tokens/s,8.640501264794358,kWh,8.609067341933647e-05,4.718392125277432e-05,0.00037183251274354733,0.0005051071074156582,tokens/kWh,124726.02162011672,,s,629,73.93082885742194,0.11753708880353239,0.015022143769719637,0.11565261077880859,0.11614679260253907,0.11651727142333984,0.24159293273925783,"[0.11554611206054688, 0.11542835235595703, 0.11560959625244141, 0.11652607727050782, 0.11558604431152343, 0.1156341781616211, 0.11573760223388672, 0.1157570571899414, 0.11565670776367187, 0.11562393951416015, 0.11558502197265624, 0.11574476623535156, 0.1157580795288086, 0.11542835235595703, 0.11543961334228516, 0.11608268737792969, 0.1160478744506836, 0.11690393829345704, 0.11582669067382813, 0.11567513275146485, 0.1154303970336914, 0.11542835235595703, 0.1157232666015625, 0.1157694091796875, 0.11561158752441406, 0.11565261077880859, 0.11558297729492187, 0.11557170867919922, 0.11555225372314454, 0.11560857391357422, 0.11558604431152343, 0.11568946838378906, 0.11568537902832031, 0.11553177642822265, 0.1154703369140625, 0.11592601776123047, 0.11566284942626953, 0.11548159790039063, 0.11579289245605469, 0.11569561767578125, 0.11579801940917969, 0.11614924621582032, 0.11590553283691406, 0.1172838363647461, 0.11599565124511718, 0.11589427185058594, 0.11579392242431641, 0.11582054138183594, 0.11560550689697266, 0.11576937866210937, 0.11679228973388672, 0.11623014068603515, 0.115884033203125, 0.11555532836914062, 0.11562290954589843, 0.11581132507324218, 0.11705241394042969, 0.11600486755371094, 0.11588607788085938, 0.115736572265625, 0.11612569427490234, 0.11604176330566406, 0.24190666198730468, 0.11594547271728516, 0.11553177642822265, 0.1155819549560547, 0.1155051498413086, 0.11557990264892579, 0.11673600006103516, 0.11557376098632813, 0.11559117126464843, 0.1155072021484375, 0.11576525115966797, 0.11566079711914062, 0.11590656280517578, 0.115525634765625, 0.11559219360351562, 0.1160847396850586, 0.11556658935546875, 0.1156147232055664, 0.11551641845703126, 0.11533209228515626, 0.11555328369140624, 0.11560147094726562, 0.11554502105712891, 0.11557376098632813, 0.1158809585571289, 0.11545088195800782, 0.11558604431152343, 0.11544985961914063, 0.11559321594238281, 0.11556761932373047, 0.11569868469238281, 0.11571097564697265, 0.11561676788330078, 0.11552051544189453, 0.11553997039794922, 0.11532393646240234, 0.11541606140136719, 0.11545494079589844, 0.11565055847167968, 0.11606527709960937, 0.11634278106689454, 0.1155440673828125, 0.11550105285644531, 0.11538432312011719, 0.11557785797119141, 0.11540582275390625, 0.11706163024902344, 0.11553382110595703, 0.11559321594238281, 0.11543244934082031, 0.11545804595947265, 0.11543142700195312, 0.11540480041503906, 0.11530547332763671, 0.11539667510986328, 0.11571295928955078, 0.11585740661621094, 0.11560447692871094, 0.11563622283935547, 0.11560550689697266, 0.11555430603027343, 0.11554303741455078, 0.1156147232055664, 0.24147865295410156, 0.11553382110595703, 0.11642678070068359, 0.11596393585205078, 0.11609900665283203, 0.11560550689697266, 0.11545906829833984, 0.11533824157714843, 0.11538944244384766, 0.11619737243652344, 0.11612876892089843, 0.11601612854003907, 0.1155758056640625, 0.11589433288574219, 0.1156719970703125, 0.11552870178222656, 0.11574681854248046, 0.11581337738037109, 0.11625574493408203, 0.11572531127929687, 0.11590656280517578, 0.11556454467773437, 0.11557075500488281, 0.1164062042236328, 0.11572736358642578, 0.11559935760498047, 0.1159710693359375, 0.11614105224609375, 0.11683942413330078, 0.11663980865478515, 0.11592697906494141, 0.11562598419189453, 0.11584307098388671, 0.11552973175048828, 0.11597516632080078, 0.11550310516357422, 0.11585126495361328, 0.11633869171142579, 0.11576121520996094, 0.11590239715576171, 0.11548057556152344, 0.11562700653076172, 0.11580518341064452, 0.11599974060058593, 0.11608370971679688, 0.11642265319824219, 0.11600179290771484, 0.11641446685791015, 0.11563212585449219, 0.11547647857666016, 0.11545088195800782, 0.11568032073974609, 0.11588703918457031, 0.11573350524902344, 0.1157201919555664, 0.11566182708740234, 0.11576627349853516, 0.11582566070556641, 0.11572736358642578, 0.11544780731201172, 0.11562393951416015, 0.11546828460693359, 0.11547853088378907, 0.2430136260986328, 0.1159004135131836, 0.11564236450195313, 0.11563827514648438, 0.11565567779541015, 0.11559526062011719, 0.11573350524902344, 0.11556658935546875, 0.1157396469116211, 0.1160284194946289, 0.11578982543945313, 0.11594035339355468, 0.11562290954589843, 0.11544985961914063, 0.11558502197265624, 0.11559117126464843, 0.11605811309814452, 0.11588301086425781, 0.1157949447631836, 0.11583795166015624, 0.11589734649658204, 0.11614617919921875, 0.11590348815917968, 0.1155440673828125, 0.1166714859008789, 0.11601510620117188, 0.11548365020751954, 0.11576012420654297, 0.11538944244384766, 0.11538944244384766, 0.11550617980957031, 0.1161164779663086, 0.11625676727294922, 0.11571814727783203, 0.11565875244140625, 0.1156485137939453, 0.11559117126464843, 0.11533106994628907, 0.11534438323974609, 0.115957763671875, 0.11574066925048829, 0.11546316528320312, 0.11652095794677735, 0.11555532836914062, 0.11549081420898437, 0.11547443389892578, 0.11542630767822265, 0.11534438323974609, 0.11527986907958984, 0.11548365020751954, 0.11580825805664062, 0.1169459228515625, 0.11576729583740235, 0.11597516632080078, 0.11551849365234375, 0.11617174530029296, 0.1161553955078125, 0.11608678436279297, 0.11566079711914062, 0.11565261077880859, 0.11548365020751954, 0.1155758056640625, 0.11557376098632813, 0.2416373748779297, 0.11573760223388672, 0.11561881256103515, 0.11579596710205078, 0.11593830108642578, 0.115810302734375, 0.11552870178222656, 0.11581542205810547, 0.11552665710449218, 0.11582771301269532, 0.1157396469116211, 0.11553791809082031, 0.1157027816772461, 0.11587789154052734, 0.11567513275146485, 0.11574169921875, 0.11559833526611328, 0.11615334320068359, 0.11550822448730469, 0.11546316528320312, 0.11542733001708984, 0.11593523406982421, 0.11576217651367188, 0.11546521759033203, 0.11530751800537109, 0.11534848022460938, 0.11562496185302734, 0.11573248291015625, 0.11568946838378906, 0.11568025970458984, 0.1157570571899414, 0.11558809661865234, 0.11546009826660156, 0.11551436614990235, 0.11556864166259766, 0.11568851470947265, 0.11561977386474609, 0.11564441680908204, 0.11560243225097656, 0.11574374389648437, 0.11693260955810547, 0.11600179290771484, 0.11581951904296875, 0.11551337432861328, 0.11621782684326172, 0.11561574554443359, 0.11616153717041015, 0.11619328308105469, 0.11597926330566406, 0.11552153778076171, 0.11582157135009766, 0.11549798583984375, 0.11565773010253906, 0.11573868560791016, 0.11633657836914063, 0.11591986846923828, 0.11580723571777343, 0.11681075286865235, 0.11580210876464844, 0.11566284942626953, 0.11573554992675782, 0.11571405029296875, 0.11579392242431641, 0.24123802185058593, 0.11577037048339844, 0.11569152069091797, 0.11550924682617188, 0.11548262023925782, 0.11541299438476563, 0.11537100982666015, 0.11548979187011718, 0.11538540649414063, 0.11682399749755859, 0.11572531127929687, 0.11558911895751953, 0.11550003051757812, 0.11544576263427735, 0.11544678497314453, 0.11560755157470703, 0.11546828460693359, 0.11556249237060547, 0.1156485137939453, 0.11555023956298828, 0.11552047729492188, 0.11564236450195313, 0.11559014129638671, 0.11573452758789063, 0.11543859100341797, 0.11551641845703126, 0.11565465545654297, 0.11574988555908203, 0.11569971466064453, 0.11559935760498047, 0.11563724517822266, 0.11556044769287109, 0.11579084777832031, 0.11558399963378906, 0.11560345458984375, 0.11583385467529297, 0.11566182708740234, 0.11573350524902344, 0.11707392120361328, 0.11604377746582031, 0.11578166198730469, 0.11570275115966797, 0.11584614562988281, 0.11643289947509766, 0.11766886138916016, 0.11568742370605468, 0.11568131256103516, 0.1155184326171875, 0.11561779022216796, 0.11559219360351562, 0.1157734375, 0.1157580795288086, 0.11578675079345703, 0.11578163146972656, 0.11558399963378906, 0.1155758056640625, 0.11558399963378906, 0.11569766235351563, 0.11568230438232421, 0.11554412841796875, 0.11554502105712891, 0.11572531127929687, 0.11579392242431641, 0.24222003173828124, 0.11544473266601563, 0.11548876953125, 0.11571814727783203, 0.11562905883789062, 0.1165486068725586, 0.11599871826171874, 0.11563724517822266, 0.11574374389648437, 0.11579801940917969, 0.11553382110595703, 0.11554611206054688, 0.11684249877929688, 0.11557273864746094, 0.11547853088378907, 0.1154877471923828, 0.11573350524902344, 0.11547853088378907, 0.11548876953125, 0.11556556701660156, 0.1155973129272461, 0.11554815673828125, 0.1156280288696289, 0.1164400634765625, 0.115525634765625, 0.11554815673828125, 0.11559321594238281, 0.11538022613525391, 0.11541510772705078, 0.11557574462890625, 0.11551538848876954, 0.11658854675292969, 0.11569561767578125, 0.11581132507324218, 0.11544371032714844, 0.1155594253540039, 0.11566284942626953, 0.11575603485107422, 0.1155072021484375, 0.11597618865966797, 0.11624857330322266, 0.11573554992675782, 0.11539968109130859, 0.11556352233886719, 0.11553997039794922, 0.1154119644165039, 0.11562086486816406, 0.11566899108886719, 0.11578470611572265, 0.11621580505371094, 0.11563520050048828, 0.11619840240478516, 0.11569459533691406, 0.11568230438232421, 0.11591270446777344, 0.11579801940917969, 0.11572531127929687, 0.11583692932128906, 0.11580006408691407, 0.11551436614990235, 0.11591372680664062, 0.11565055847167968, 0.11566796875, 0.2425927734375, 0.11592601776123047, 0.11652812957763672, 0.1160263671875, 0.1156280288696289, 0.1154703369140625, 0.11540684509277344, 0.11567411041259766, 0.11576422119140625, 0.11549388885498046, 0.11578163146972656, 0.11591372680664062, 0.11623423767089844, 0.11559321594238281, 0.11569459533691406, 0.11551744079589844, 0.11644620513916015, 0.1155645751953125, 0.11573654174804687, 0.11594445037841797, 0.11607039642333984, 0.1157232666015625, 0.11550310516357422, 0.11549900817871094, 0.11566694641113281, 0.11567616271972657, 0.11566902160644531, 0.1161082534790039, 0.11594137573242187, 0.11565158081054687, 0.11552153778076171, 0.11549702453613281, 0.11560646057128907, 0.11540275573730469, 0.11600281524658203, 0.11596390533447265, 0.11555123138427735, 0.11542118072509766, 0.11550617980957031, 0.11543654632568359, 0.11562700653076172, 0.11552051544189453, 0.11569356536865234, 0.11555020904541016, 0.11564236450195313, 0.11563622283935547, 0.11566694641113281, 0.11554611206054688, 0.11565567779541015, 0.11542425537109376, 0.11545600128173829, 0.11553587341308594, 0.11576729583740235, 0.11622911834716797, 0.11651174163818359, 0.11568844604492187, 0.11576831817626954, 0.11554713439941407, 0.11568025970458984, 0.11566694641113281, 0.11580620574951171, 0.11560345458984375, 0.11570893096923827, 0.24270541381835936, 0.11573554992675782, 0.1154734115600586, 0.115525634765625, 0.11553279876708984, 0.11555430603027343, 0.11576525115966797, 0.11567922973632813, 0.11552665710449218, 0.11537715148925781, 0.1155041275024414, 0.11540991973876953, 0.11538329315185547, 0.11551641845703126, 0.11542937469482421, 0.11544576263427735, 0.11548365020751954, 0.11621273803710938, 0.11560447692871094, 0.11579801940917969, 0.11566796875, 0.11572531127929687, 0.11551750183105469, 0.1155849609375, 0.11561062622070313, 0.11598028564453125, 0.11561676788330078, 0.11556147003173828, 0.11561676788330078, 0.11551436614990235, 0.11571302032470702, 0.11565875244140625, 0.11572223663330078, 0.11552973175048828, 0.11562290954589843, 0.11551747131347656, 0.11566793823242187, 0.11573760223388672, 0.11573760223388672, 0.11582463836669922, 0.1156147232055664, 0.11548880004882812, 0.1162034912109375, 0.11573350524902344, 0.11611344146728515, 0.11572425842285156, 0.11567616271972657, 0.11576217651367188, 0.11560352325439453, 0.11609388732910156, 0.11659980773925781, 0.11586048126220704, 0.11570381164550782, 0.11573248291015625, 0.11557170867919922, 0.11563827514648438, 0.11605709075927734, 0.1166714859008789, 0.1158123550415039, 0.11569971466064453, 0.11564543914794922, 0.11563629150390625, 0.11554707336425782, 0.24293376159667968, 0.1157570571899414, 0.11543551635742187, 0.11561164855957032, 0.11565161895751953, 0.11541910552978515, 0.11535257720947266, 0.11540790557861329, 0.1153371810913086, 0.11557478332519532, 0.11545394897460938, 0.11548365020751954, 0.11608576202392579, 0.11585740661621094, 0.1155758056640625, 0.11546419525146484, 0.11544882965087891, 0.11567922973632813, 0.11579596710205078, 0.11548467254638672, 0.11567820739746094, 0.11553075408935547, 0.11560447692871094, 0.11557683563232422, 0.11538841247558594, 0.11541709136962891, 0.11647283172607421, 0.11568946838378906, 0.11550924682617188, 0.11575603485107422, 0.11594035339355468, 0.11566387176513672, 0.11554713439941407, 0.11556761932373047, 0.11553997039794922, 0.11574578857421874, 0.1154959716796875, 0.11552867126464844, 0.11542527770996094, 0.11553689575195313, 0.11548262023925782, 0.11609190368652343, 0.11563314819335938, 0.11550105285644531, 0.11568844604492187, 0.1156341781616211, 0.11574681854248046, 0.11630079650878906, 0.11589119720458985, 0.1157027816772461, 0.11557170867919922, 0.11571609497070312, 0.11571916961669922, 0.11559321594238281, 0.1154549789428711, 0.1155389404296875, 0.11564543914794922, 0.11553587341308594, 0.11558604431152343, 0.11541913604736329, 0.11550822448730469, 0.11574272155761718, 0.11551026916503906]",tokens/s,8.507952767756032,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1400.639488,6094.848,0.0,5448.400896,5215.942144,s,10,5.915999877929687,0.5915999877929687,0.0028828720813889874,0.5905971069335938,0.5921576904296875,0.5961826965332031,0.5994027014160156,"[0.6002077026367187, 0.5907606201171876, 0.5909608764648437, 0.5905933227539063, 0.5902119140625, 0.5906008911132813, 0.5904852294921875, 0.5912632446289062, 0.5904329223632813, 0.590483154296875]",tokens/s,432.7248229923689,kWh,6.983380516370138e-06,3.826209227811299e-06,3.1648620090131334e-05,4.2458209834312766e-05,tokens/kWh,6029458.166017933,MB,1400.967168,6094.848,0.0,5448.400896,5405.644288,s,10,341.33091015625,34.133091015625,0.0089451948085196,34.130179687500004,34.14430546875,34.149687890625,34.153993828125,"[34.13034765625, 34.1351484375, 34.13001171875, 34.128140625, 34.12757421875, 34.12398828125, 34.13193359375, 34.1255859375, 34.143109375, 34.1550703125]",tokens/s,1.8457162280193342,kWh,0.00040300285582741096,0.00022088092240917615,0.0018324814267682385,0.002456365205004826,tokens/kWh,25647.65201511484,,s,629,346.1052627563478,0.5502468406301234,0.07016038395822202,0.5417257080078125,0.5423982788085937,0.54271630859375,1.1323166894531251,"[0.5417594604492187, 0.5414471435546875, 0.5411215209960938, 0.5414297485351562, 0.5414563598632812, 0.541576171875, 0.5412239379882813, 0.5414502563476562, 0.5416427612304687, 0.54096484375, 0.54148095703125, 0.5411840209960938, 0.5414339599609375, 0.5409760131835938, 0.5409024047851563, 0.5412208862304687, 0.5413017578125, 0.5412515869140625, 0.5410549926757813, 0.5412177734375, 0.5411133422851563, 0.54131201171875, 0.5422049560546875, 0.5416530151367187, 0.54127001953125, 0.5410795288085938, 0.5415239868164062, 0.5422069702148438, 0.5421731567382813, 0.5426155395507812, 0.542755859375, 0.541576171875, 0.5415362548828125, 0.5411993408203125, 0.5416642456054688, 0.541591552734375, 0.541427734375, 0.5414830322265625, 0.5413529663085938, 0.5414666137695312, 0.5418700561523437, 0.5421414184570312, 0.5417953491210937, 0.5417728271484376, 0.5417267456054687, 0.5425919799804687, 0.5420779418945313, 0.5423953857421875, 0.542466064453125, 0.5427732543945313, 0.5427783813476562, 0.5423319091796875, 0.5419212646484375, 0.5422335815429687, 0.5419622192382813, 0.542166015625, 0.542202880859375, 0.5419612426757813, 0.541928466796875, 0.5428971557617187, 0.5427886352539063, 0.542540771484375, 1.132921875, 0.5414379272460937, 0.541675537109375, 0.5412976684570312, 0.5413980102539062, 0.541432861328125, 0.5411819458007813, 0.5411235961914063, 0.5415782470703125, 0.5412136840820313, 0.5412894897460937, 0.5412925415039063, 0.5418301391601562, 0.541549560546875, 0.5421035766601563, 0.5417698974609375, 0.5415985717773437, 0.5415782470703125, 0.5415618286132813, 0.5418792724609375, 0.5416151123046875, 0.5421527099609375, 0.5418721313476562, 0.5414307861328125, 0.5414574584960937, 0.5414255981445313, 0.5418076171875, 0.5416806640625, 0.5416058959960938, 0.5413232421875, 0.5415167846679687, 0.5413140869140625, 0.5417481689453125, 0.5427220458984375, 0.5421270751953124, 0.541823974609375, 0.54205029296875, 0.5420144653320312, 0.5415424194335937, 0.541971435546875, 0.5416202392578126, 0.541264892578125, 0.5415219116210938, 0.541216796875, 0.5416058959960938, 0.541259765625, 0.5414993896484375, 0.5412925415039063, 0.5416744995117188, 0.5412996826171875, 0.5416222534179688, 0.5422120971679687, 0.5429841918945313, 0.542635009765625, 0.5428388061523437, 0.542835693359375, 0.5432268676757812, 0.5427363891601562, 0.5429586181640625, 0.5425602416992188, 0.5428838500976563, 0.5426534423828125, 0.5426380615234375, 1.1323843994140625, 0.5415637817382812, 0.5415362548828125, 0.541212646484375, 0.5413027954101562, 0.54141748046875, 0.54186083984375, 0.542003173828125, 0.5418147583007813, 0.5417257080078125, 0.5426544799804688, 0.5414871215820313, 0.5419520263671875, 0.5414297485351562, 0.5414932250976563, 0.5413765258789063, 0.5414635620117187, 0.5415690307617187, 0.5419008178710938, 0.5413099365234375, 0.5413130493164062, 0.5413457641601562, 0.5415126953125, 0.541191162109375, 0.541591552734375, 0.5423012084960938, 0.5414676513671876, 0.5414563598632812, 0.5417738037109375, 0.541896728515625, 0.5416048583984375, 0.5420687255859375, 0.5419673461914063, 0.5420953369140625, 0.541780029296875, 0.541761474609375, 0.5418792724609375, 0.5419478759765625, 0.54186083984375, 0.5423062744140625, 0.54225, 0.5419407348632812, 0.542171142578125, 0.5424015502929688, 0.5422223510742188, 0.5414348754882813, 0.5414635620117187, 0.54171337890625, 0.5415720825195313, 0.5418065795898438, 0.5415321655273437, 0.54148095703125, 0.5417645874023438, 0.54140625, 0.5415218505859375, 0.5416734619140625, 0.5417164916992188, 0.5417820434570313, 0.5417778930664062, 0.54166015625, 0.5416734619140625, 0.5428797607421875, 0.542571533203125, 1.1325224609375, 0.5419622192382813, 0.541770751953125, 0.5419089965820313, 0.5416908569335938, 0.5413734130859374, 0.5414747924804687, 0.5413980102539062, 0.5412987060546876, 0.5413621826171875, 0.5414256591796875, 0.5414717407226562, 0.5418414306640625, 0.5414390258789062, 0.5416283569335938, 0.5422162475585938, 0.5423297729492188, 0.5422459106445312, 0.5416038208007813, 0.5418384399414062, 0.5420675659179688, 0.5416325073242187, 0.5418936157226563, 0.54164892578125, 0.5416837158203125, 0.5414542846679687, 0.5416089477539062, 0.541623291015625, 0.541696044921875, 0.5414962768554688, 0.5420676879882812, 0.5414850463867188, 0.5416734619140625, 0.5414481811523437, 0.5424260864257813, 0.5415341796875, 0.5418352661132813, 0.5416427612304687, 0.5419017944335938, 0.5418506469726563, 0.5419898681640625, 0.541454345703125, 0.5417994384765625, 0.5414666137695312, 0.541697021484375, 0.5416581420898438, 0.5419612426757813, 0.5414061889648437, 0.5416857299804687, 0.5413396606445312, 0.5420892333984375, 0.5414430541992188, 0.5420545654296876, 0.5417859497070312, 0.5416704711914062, 0.5413395385742188, 0.5417564086914063, 0.5420462036132813, 0.5418311767578124, 0.5419110107421875, 0.5419161376953125, 0.5414850463867188, 0.541528076171875, 1.131968505859375, 0.5414400024414062, 0.5414932250976563, 0.5411287231445312, 0.541296630859375, 0.5412792358398437, 0.5417031860351562, 0.5415956420898438, 0.5414348754882813, 0.5413161010742188, 0.5413847045898438, 0.541760498046875, 0.5419417724609376, 0.5416417236328125, 0.5418035278320312, 0.54144921875, 0.54145947265625, 0.5413785400390625, 0.541380615234375, 0.5414000854492188, 0.5414727783203125, 0.5413887939453125, 0.5414727783203125, 0.5413324584960938, 0.5414451293945313, 0.54200830078125, 0.541663330078125, 0.5414224853515625, 0.5416151123046875, 0.5414993896484375, 0.54185986328125, 0.5417636108398437, 0.5419540405273438, 0.5416038208007813, 0.5418005981445313, 0.541447021484375, 0.541686767578125, 0.5417062377929688, 0.5416693725585937, 0.5417902221679688, 0.5416990966796875, 0.5416304931640625, 0.5417277221679687, 0.5417277221679687, 0.5418322143554688, 0.5419100341796875, 0.5417410888671875, 0.5418536987304687, 0.5424578247070313, 0.5422274780273437, 0.5421045532226563, 0.5419489135742187, 0.5420206298828125, 0.5418895263671875, 0.541897705078125, 0.5418690795898438, 0.5419857788085938, 0.5421117553710938, 0.54192333984375, 0.5418588256835938, 0.5421434936523437, 0.5418905639648437, 0.5424384155273437, 1.1324405517578124, 0.5413406982421874, 0.5414522705078125, 0.5422376708984376, 0.5428193359375, 0.541686767578125, 0.5414686889648438, 0.5414215698242187, 0.541285400390625, 0.5416571044921875, 0.5415997314453125, 0.5419386596679687, 0.5416673583984375, 0.541638671875, 0.5414850463867188, 0.5416683349609375, 0.5423175659179688, 0.5418322143554688, 0.5414451904296875, 0.5412515258789062, 0.5415239868164062, 0.542023681640625, 0.541918212890625, 0.5417359619140625, 0.541549560546875, 0.5413662719726563, 0.5415792846679688, 0.5414010620117188, 0.5414441528320313, 0.5412525634765625, 0.54166015625, 0.5412556762695313, 0.541591552734375, 0.5411932373046875, 0.541432861328125, 0.5416161499023437, 0.5414912109375, 0.5413294067382812, 0.541365234375, 0.5413304443359375, 0.5423472900390625, 0.541759521484375, 0.5416038208007813, 0.5411983642578125, 0.5415557250976563, 0.5414194946289063, 0.5415966796875, 0.54135498046875, 0.5417625732421875, 0.5416345825195312, 0.5418465576171875, 0.541486083984375, 0.5419673461914063, 0.541765625, 0.542076904296875, 0.5416171264648437, 0.5418035278320312, 0.5415731201171875, 0.541892578125, 0.54164892578125, 0.5422459106445312, 0.5417850952148437, 0.5420390625, 1.132142578125, 0.5416161499023437, 0.541454345703125, 0.5413867797851563, 0.5415997314453125, 0.541613037109375, 0.5415372924804688, 0.5415844116210937, 0.5418772583007813, 0.5416714477539063, 0.5416253662109375, 0.5415557250976563, 0.5419458618164062, 0.5417195434570312, 0.5419008178710938, 0.541591552734375, 0.5416642456054688, 0.5415946044921875, 0.5415925903320312, 0.5416468505859375, 0.541644775390625, 0.5415782470703125, 0.5415966796875, 0.541538330078125, 0.541675537109375, 0.5414788818359375, 0.5420431518554687, 0.5420267333984375, 0.5419776000976563, 0.5416171264648437, 0.541822998046875, 0.54211376953125, 0.5417615356445312, 0.5421096801757812, 0.5417891845703126, 0.5419161376953125, 0.541697021484375, 0.5416591186523437, 0.5419724731445312, 0.5416560668945313, 0.54164892578125, 0.5418291015625, 0.5418055419921874, 0.541601806640625, 0.5418721313476562, 0.54198681640625, 0.5417297973632812, 0.5416796264648438, 0.5417349243164062, 0.5415823364257812, 0.5425029296875, 0.542382080078125, 0.5421414184570312, 0.5416663208007813, 0.5417011108398437, 0.54160302734375, 0.5418053588867188, 0.5416949462890625, 0.5418045654296875, 0.5421281127929688, 0.5423175659179688, 0.5419632568359375, 0.542244873046875, 1.1331278076171876, 0.5414716186523437, 0.54160791015625, 0.54152294921875, 0.5418578491210938, 0.5414696655273438, 0.5420236206054687, 0.5413458862304688, 0.5417337646484375, 0.5414185180664063, 0.5415403442382812, 0.5418291015625, 0.5415823974609375, 0.5414583740234375, 0.541802490234375, 0.541475830078125, 0.5415629272460938, 0.5419693603515625, 0.5415751953125, 0.5413673095703125, 0.5415239868164062, 0.5413990478515625, 0.5414297485351562, 0.5419171752929688, 0.542044189453125, 0.5415536499023438, 0.5416273803710937, 0.5416406860351562, 0.5414400024414062, 0.541470703125, 0.5419960327148438, 0.5414901733398437, 0.5415792846679688, 0.5415465087890625, 0.5416714477539063, 0.5413161010742188, 0.5425889282226563, 0.5413949584960938, 0.5415833740234375, 0.541106201171875, 0.5417615356445312, 0.5417666625976563, 0.541760498046875, 0.5414307861328125, 0.5415894775390625, 0.5413898315429687, 0.5415833740234375, 0.5412874145507812, 0.5417349243164062, 0.5415741577148437, 0.5416611938476562, 0.5413416748046875, 0.5422161865234375, 0.5416673583984375, 0.5423308715820313, 0.5420431518554687, 0.5420534057617188, 0.5417984008789063, 0.5419888916015625, 0.5413898315429687, 0.5425121459960938, 0.541970458984375, 0.5419970092773437, 1.13246826171875, 0.5414359130859375, 0.5415659790039062, 0.54150244140625, 0.5414912719726562, 0.5414020385742188, 0.541581298828125, 0.5415567626953125, 0.5416243286132812, 0.5418076171875, 0.5416089477539062, 0.5414696655273438, 0.5415587768554687, 0.541931640625, 0.542152587890625, 0.541929443359375, 0.541939697265625, 0.54158642578125, 0.5415321655273437, 0.5415823364257812, 0.5417267456054687, 0.5416222534179688, 0.54167041015625, 0.54148095703125, 0.5416376342773438, 0.5415116577148438, 0.5416591186523437, 0.5424230346679687, 0.5425469360351562, 0.5417778930664062, 0.5417523193359375, 0.5418987426757812, 0.5419951171875, 0.5423696899414062, 0.5424568481445312, 0.5426206665039063, 0.542086181640625, 0.541823974609375, 0.54186083984375, 0.5420349731445312, 0.5424475708007812, 0.5419776000976563, 0.5422203369140625, 0.5420625, 0.542244873046875, 0.5426073608398437, 0.5429688110351563, 0.5421936645507812, 0.542075927734375, 0.5420676879882812, 0.5423175659179688, 0.5427466430664063, 0.5428531494140625, 0.542118896484375, 0.5421475830078125, 0.542002197265625, 0.5421066284179688, 0.5419857788085938, 0.5419765625, 0.5419990844726562, 0.5420123901367188, 0.5418936157226563, 0.5418700561523437, 1.1330999755859374, 0.5421752319335937, 0.5420472412109375, 0.5415352172851563, 0.541517822265625, 0.5416509399414062, 0.54187109375, 0.5422120971679687, 0.5417625732421875, 0.5419806518554687, 0.5415413818359375, 0.54137548828125, 0.5424609375, 0.5421240234375, 0.54167041015625, 0.5416581420898438, 0.5418803100585937, 0.5415997314453125, 0.5424701538085938, 0.5428551635742187, 0.5424916381835938, 0.542624755859375, 0.542044189453125, 0.54175537109375, 0.541865966796875, 0.54215576171875, 0.5419192504882813, 0.5422418212890625, 0.54240869140625, 0.5427630004882813, 0.543088623046875, 0.5421854858398437, 0.5420318603515625, 0.5419612426757813, 0.5423749389648438, 0.541897705078125, 0.5418147583007813, 0.5428131713867187, 0.542508056640625, 0.5420390625, 0.5422202758789062, 0.542045166015625, 0.542202880859375, 0.5423974609375, 0.5422069702148438, 0.541749267578125, 0.5420185546875, 0.5419468994140625, 0.5423267822265625, 0.5417062377929688, 0.5420277709960938, 0.5420349731445312, 0.5422673950195313, 0.5421270751953124, 0.5421915893554687, 0.5427077026367187, 0.5426472778320313, 0.542382080078125, 0.54236572265625, 0.542023681640625, 0.5423565063476562, 0.542160888671875, 0.5429483642578125]",tokens/s,1.8173661821571478,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1298.415616,872.93952,0.0,226.492416,184.397824,s,11,0.17165715312957763,0.015605195739052512,0.0007607833862353127,0.01534540843963623,0.01575004768371582,0.016863200187683107,0.017753722190856934,"[0.01797635269165039, 0.015421407699584961, 0.015347392082214355, 0.015315903663635253, 0.015256128311157226, 0.01575004768371582, 0.01534540843963623, 0.015287008285522461, 0.015340383529663087, 0.01527171230316162, 0.01534540843963623]",tokens/s,16404.792626814135,kWh,1.8500959462776245e-07,1.0137606156642736e-07,3.4862617005003736e-07,6.350118262442272e-07,tokens/kWh,403142098.1780924,MB,1298.743296,872.93952,0.0,226.492416,197.933568,s,11,10.171151123046876,0.9246501020951705,0.010660233315434522,0.920117919921875,0.9293056640625,0.9430220336914062,0.9539951293945312,"[0.9567384033203125, 0.9293056640625, 0.9245167846679687, 0.9185679931640625, 0.920117919921875, 0.9228417358398437, 0.919580810546875, 0.9193172607421874, 0.9187877807617187, 0.923688232421875, 0.9176885375976562]",tokens/s,68.13388097535262,kWh,1.1024352834572037e-05,6.040874968975529e-06,1.993414258140665e-05,3.6999370384954216e-05,tokens/kWh,1702731.6774454878,,s,692,10.307773435592651,0.014895626352012501,0.0018980452913926306,0.014577664375305176,0.01503354845046997,0.015412480163574216,0.030279741020202637,"[0.016365568161010743, 0.016268287658691406, 0.01589350414276123, 0.01510707187652588, 0.015166463851928711, 0.015711232185363768, 0.015400959968566894, 0.015467519760131837, 0.015654911994934084, 0.015433728218078613, 0.015542304039001465, 0.015136799812316894, 0.015172543525695801, 0.015097855567932129, 0.015466496467590332, 0.016034816741943358, 0.015383551597595215, 0.015619071960449218, 0.015451135635375977, 0.015178751945495606, 0.015625215530395507, 0.015426560401916504, 0.015222847938537598, 0.015156160354614258, 0.015565823554992676, 0.016327680587768553, 0.015833087921142578, 0.01530675220489502, 0.015086591720581055, 0.01507737636566162, 0.01501088047027588, 0.01481721591949463, 0.015222784042358398, 0.015755264282226563, 0.016021503448486327, 0.015339520454406739, 0.015148032188415527, 0.01509171199798584, 0.015104000091552734, 0.015050751686096191, 0.015105024337768554, 0.014737407684326171, 0.014621696472167968, 0.014722047805786133, 0.014584896087646484, 0.014627776145935058, 0.014577664375305176, 0.014598143577575684, 0.014589952468872071, 0.014524415969848633, 0.014585856437683106, 0.01456332778930664, 0.014573568344116212, 0.014537728309631348, 0.014552063941955566, 0.014576640129089356, 0.014576640129089356, 0.014548992156982422, 0.01457049560546875, 0.014616576194763184, 0.014621696472167968, 0.014593024253845215, 0.030418943405151368, 0.014628864288330079, 0.014632960319519044, 0.0145797119140625, 0.01460531234741211, 0.014616576194763184, 0.01458073616027832, 0.014573568344116212, 0.014584832191467285, 0.014578687667846679, 0.014615551948547363, 0.014621696472167968, 0.01458790397644043, 0.014673919677734374, 0.014622719764709472, 0.01457151985168457, 0.014611455917358398, 0.0145797119140625, 0.014619647979736328, 0.014697471618652343, 0.014582783699035644, 0.014603263854980468, 0.01460223960876465, 0.01459404754638672, 0.014568448066711426, 0.014610431671142577, 0.014583807945251465, 0.014577664375305176, 0.014595071792602539, 0.014558208465576173, 0.01457254409790039, 0.014645248413085938, 0.014598143577575684, 0.015670271873474122, 0.015376383781433106, 0.015114239692687988, 0.015088640213012695, 0.015019007682800293, 0.015113216400146484, 0.015021056175231933, 0.014995455741882324, 0.014962688446044922, 0.014998527526855468, 0.014962688446044922, 0.014985216140747071, 0.014946304321289062, 0.01500876808166504, 0.014965760231018066, 0.015024191856384277, 0.014966719627380371, 0.014658559799194336, 0.014590975761413574, 0.014565376281738282, 0.01461350440979004, 0.014582783699035644, 0.015023103713989258, 0.01509887981414795, 0.014726143836975097, 0.014541824340820313, 0.0145797119140625, 0.014606335639953612, 0.014665727615356445, 0.01459609603881836, 0.030462976455688476, 0.014574591636657714, 0.014746623992919922, 0.015026176452636719, 0.015056960105895996, 0.015804351806640624, 0.015242239952087403, 0.015034367561340332, 0.015006719589233398, 0.014931967735290527, 0.01477734375, 0.0145797119140625, 0.014584832191467285, 0.014592000007629394, 0.014552063941955566, 0.014543871879577636, 0.014574591636657714, 0.014581760406494141, 0.014575615882873535, 0.014657535552978516, 0.014608384132385254, 0.014558208465576173, 0.014607359886169433, 0.014606335639953612, 0.01459609603881836, 0.014619647979736328, 0.014576640129089356, 0.014553088188171387, 0.014573568344116212, 0.014575615882873535, 0.014550016403198243, 0.01458892822265625, 0.014589952468872071, 0.014590975761413574, 0.014647295951843262, 0.01455513572692871, 0.014576640129089356, 0.014583807945251465, 0.014652416229248047, 0.01460223960876465, 0.014617600440979005, 0.014598143577575684, 0.0145797119140625, 0.01457254409790039, 0.014576640129089356, 0.014589952468872071, 0.01458790397644043, 0.014652416229248047, 0.014550016403198243, 0.01457049560546875, 0.01458892822265625, 0.014541824340820313, 0.0145797119140625, 0.0145633602142334, 0.014960607528686524, 0.014947327613830566, 0.014850111961364746, 0.014465984344482421, 0.014386176109313965, 0.014425087928771972, 0.014609408378601075, 0.014697471618652343, 0.014664704322814942, 0.030324735641479493, 0.014525440216064453, 0.014568448066711426, 0.014598143577575684, 0.01457049560546875, 0.014592000007629394, 0.014541824340820313, 0.014660639762878418, 0.014574560165405273, 0.014515199661254884, 0.014586879730224609, 0.014577664375305176, 0.01461350440979004, 0.014574591636657714, 0.01455513572692871, 0.014520319938659668, 0.014518272399902344, 0.01457151985168457, 0.014554112434387208, 0.01456332778930664, 0.014540800094604492, 0.01457049560546875, 0.01456332778930664, 0.014504960060119629, 0.014553088188171387, 0.014619647979736328, 0.014583807945251465, 0.014557184219360352, 0.014566399574279786, 0.014529536247253418, 0.014600192070007324, 0.014612480163574219, 0.01456332778930664, 0.014575615882873535, 0.014627840042114258, 0.01467084789276123, 0.014575615882873535, 0.014560256004333496, 0.014663680076599121, 0.014564352035522461, 0.014524415969848633, 0.01454694366455078, 0.01458073616027832, 0.014548992156982422, 0.01452444839477539, 0.014602208137512207, 0.01455513572692871, 0.01456332778930664, 0.014532608032226562, 0.01461350440979004, 0.014622719764709472, 0.014517248153686524, 0.01459404754638672, 0.014558208465576173, 0.014556159973144531, 0.014548992156982422, 0.01457049560546875, 0.014508031845092773, 0.014590975761413574, 0.014540800094604492, 0.014600192070007324, 0.01469644832611084, 0.014692352294921876, 0.030276607513427735, 0.01461350440979004, 0.014553088188171387, 0.014508031845092773, 0.014564352035522461, 0.014553088188171387, 0.014565376281738282, 0.014566399574279786, 0.014537728309631348, 0.014530559539794922, 0.01459609603881836, 0.014569472312927247, 0.014567423820495605, 0.014508031845092773, 0.01455513572692871, 0.014628864288330079, 0.014535679817199706, 0.01458790397644043, 0.014557184219360352, 0.014535712242126466, 0.014557151794433594, 0.014535679817199706, 0.014534655570983887, 0.014531583786010742, 0.014545920372009278, 0.014566399574279786, 0.014538751602172852, 0.014553088188171387, 0.01456332778930664, 0.014638079643249511, 0.014538751602172852, 0.014615551948547363, 0.014623744010925293, 0.014533663749694824, 0.014525407791137696, 0.014531583786010742, 0.0145797119140625, 0.01457049560546875, 0.014547967910766601, 0.014557184219360352, 0.014577664375305176, 0.014550016403198243, 0.014560256004333496, 0.014547967910766601, 0.01447935962677002, 0.014482432365417481, 0.014364671707153321, 0.014386176109313965, 0.014722047805786133, 0.015221759796142579, 0.01508351993560791, 0.015017984390258789, 0.014990336418151855, 0.014946304321289062, 0.014996479988098145, 0.014521344184875488, 0.014540800094604492, 0.014592000007629394, 0.014558208465576173, 0.014535679817199706, 0.01457049560546875, 0.014556159973144531, 0.014526464462280274, 0.031153152465820313, 0.014898176193237305, 0.01458790397644043, 0.014568448066711426, 0.014693375587463378, 0.014625791549682618, 0.015431679725646973, 0.014708736419677734, 0.014601216316223145, 0.01455513572692871, 0.014576640129089356, 0.014567423820495605, 0.014589952468872071, 0.014566399574279786, 0.014551039695739745, 0.01480191993713379, 0.014784511566162109, 0.015605759620666505, 0.015138815879821778, 0.014937088012695313, 0.014920703887939453, 0.014586879730224609, 0.014550047874450684, 0.014574624061584473, 0.01459712028503418, 0.014562239646911621, 0.014526464462280274, 0.014525440216064453, 0.014536704063415527, 0.014520319938659668, 0.014515199661254884, 0.014625791549682618, 0.014533632278442383, 0.014556159973144531, 0.014573568344116212, 0.014589952468872071, 0.01458073616027832, 0.014565376281738282, 0.014610431671142577, 0.01459609603881836, 0.01466982364654541, 0.014590975761413574, 0.014558208465576173, 0.014566399574279786, 0.014538751602172852, 0.014564352035522461, 0.014567423820495605, 0.014530559539794922, 0.014538751602172852, 0.014535679817199706, 0.014551039695739745, 0.01457151985168457, 0.014675968170166016, 0.014567423820495605, 0.014585856437683106, 0.014559231758117675, 0.014527487754821777, 0.014577664375305176, 0.014561280250549317, 0.014562303543090821, 0.014578687667846679, 0.014515199661254884, 0.01454086399078369, 0.03033900833129883, 0.014519295692443847, 0.014643199920654297, 0.014578687667846679, 0.014610431671142577, 0.014585856437683106, 0.014574591636657714, 0.014586879730224609, 0.01457254409790039, 0.014628864288330079, 0.01458790397644043, 0.01529856014251709, 0.014918656349182128, 0.014584832191467285, 0.014532608032226562, 0.014585920333862305, 0.014574527740478516, 0.014573568344116212, 0.014543871879577636, 0.014592063903808595, 0.01458886432647705, 0.014574591636657714, 0.01458892822265625, 0.014633983612060546, 0.014540800094604492, 0.014567423820495605, 0.014564352035522461, 0.014526464462280274, 0.014562303543090821, 0.01458073616027832, 0.014523391723632812, 0.014614527702331542, 0.014553088188171387, 0.014541824340820313, 0.014567423820495605, 0.014635007858276367, 0.014566399574279786, 0.01459404754638672, 0.014535679817199706, 0.014558208465576173, 0.014565376281738282, 0.01458073616027832, 0.014585856437683106, 0.014607359886169433, 0.014540800094604492, 0.014544896125793457, 0.014617600440979005, 0.014566399574279786, 0.01457254409790039, 0.014559231758117675, 0.014561280250549317, 0.01458073616027832, 0.014527551651000977, 0.014563263893127442, 0.014543871879577636, 0.014548992156982422, 0.014582783699035644, 0.014598143577575684, 0.014575615882873535, 0.014552063941955566, 0.014589952468872071, 0.01457254409790039, 0.014573568344116212, 0.030249984741210937, 0.014523391723632812, 0.014586943626403809, 0.014542783737182617, 0.014595104217529296, 0.014566368103027343, 0.014517248153686524, 0.014551039695739745, 0.014527487754821777, 0.014636032104492188, 0.014616576194763184, 0.014551072120666505, 0.014565343856811524, 0.014598143577575684, 0.0146626558303833, 0.014781439781188965, 0.015035391807556153, 0.014568448066711426, 0.01457254409790039, 0.014618623733520507, 0.014560256004333496, 0.014574591636657714, 0.014547967910766601, 0.014540800094604492, 0.014562303543090821, 0.014603263854980468, 0.01456332778930664, 0.014577664375305176, 0.014547967910766601, 0.01459404754638672, 0.01458790397644043, 0.014542847633361817, 0.014595071792602539, 0.01458790397644043, 0.014665792465209961, 0.014555071830749512, 0.014537728309631348, 0.014530559539794922, 0.014581760406494141, 0.014584832191467285, 0.014564352035522461, 0.014573568344116212, 0.014548992156982422, 0.014700544357299805, 0.014585856437683106, 0.014574591636657714, 0.014560256004333496, 0.014617600440979005, 0.014556159973144531, 0.014578687667846679, 0.01455513572692871, 0.01466982364654541, 0.014557184219360352, 0.014561280250549317, 0.014525440216064453, 0.014565376281738282, 0.014586879730224609, 0.0145664644241333, 0.014582719802856445, 0.014539775848388671, 0.014560256004333496, 0.01460428810119629, 0.014592000007629394, 0.030311424255371092, 0.014531583786010742, 0.014569472312927247, 0.014636032104492188, 0.014564352035522461, 0.014538751602172852, 0.01458892822265625, 0.014531583786010742, 0.014553088188171387, 0.014525440216064453, 0.014530624389648437, 0.014523327827453613, 0.014575615882873535, 0.014558208465576173, 0.014516223907470703, 0.014637056350708008, 0.014518272399902344, 0.014577664375305176, 0.01495961570739746, 0.014617600440979005, 0.014619711875915528, 0.014569408416748047, 0.014639103889465332, 0.014576640129089356, 0.014560256004333496, 0.014568448066711426, 0.0145797119140625, 0.014550016403198243, 0.014577664375305176, 0.014582783699035644, 0.014611488342285156, 0.014558176040649413, 0.014578687667846679, 0.014569472312927247, 0.014663680076599121, 0.014584832191467285, 0.014565376281738282, 0.01456332778930664, 0.014550016403198243, 0.014538751602172852, 0.014530559539794922, 0.014534655570983887, 0.014559231758117675, 0.01455513572692871, 0.014628864288330079, 0.014585856437683106, 0.014578687667846679, 0.014625791549682618, 0.014540800094604492, 0.014582783699035644, 0.014582783699035644, 0.014595071792602539, 0.014568448066711426, 0.014524415969848633, 0.014545920372009278, 0.014603263854980468, 0.01457049560546875, 0.014561280250549317, 0.014623744010925293, 0.014557184219360352, 0.014583807945251465, 0.014567487716674805, 0.014582719802856445, 0.03019980812072754, 0.014551039695739745, 0.01457151985168457, 0.014541824340820313, 0.014585856437683106, 0.01539891242980957, 0.015851519584655763, 0.014816255569458007, 0.014586879730224609, 0.014638079643249511, 0.014639103889465332, 0.014531583786010742, 0.014663680076599121, 0.014520319938659668, 0.014543871879577636, 0.014617600440979005, 0.014495743751525878, 0.01458073616027832, 0.014540800094604492, 0.014554112434387208, 0.014567423820495605, 0.01455513572692871, 0.014504960060119629, 0.01479475212097168, 0.015308799743652344, 0.014611455917358398, 0.014626815795898437, 0.014567423820495605, 0.014535679817199706, 0.01460223960876465, 0.01458892822265625, 0.014566399574279786, 0.014566399574279786, 0.014557184219360352, 0.014724096298217774, 0.014551039695739745, 0.014535712242126466, 0.014597087860107423, 0.014573599815368652, 0.014573535919189452, 0.014564352035522461, 0.01457049560546875, 0.014740480422973632, 0.014905344009399414, 0.014684160232543946, 0.014547967910766601, 0.014768128395080566, 0.015746047973632812, 0.014673919677734374, 0.014562303543090821, 0.014558208465576173, 0.014558208465576173, 0.01455513572692871, 0.014536704063415527, 0.01458790397644043, 0.014529536247253418, 0.014589952468872071, 0.014554112434387208, 0.014553088188171387, 0.014586879730224609, 0.014547967910766601, 0.014573599815368652, 0.014557151794433594, 0.03037593650817871, 0.014526464462280274, 0.014798848152160645, 0.014523391723632812, 0.01459404754638672, 0.014567423820495605, 0.014531583786010742, 0.014575615882873535, 0.014536704063415527, 0.014520319938659668, 0.014532608032226562, 0.014419967651367188, 0.014387200355529785, 0.01439027214050293, 0.014443519592285157, 0.014552063941955566, 0.014498815536499024, 0.014529536247253418, 0.014651391983032227, 0.014534655570983887, 0.014548992156982422, 0.014628864288330079, 0.014568448066711426, 0.01458790397644043, 0.01457151985168457, 0.014544896125793457, 0.014608384132385254, 0.014728192329406739, 0.014638079643249511, 0.014592000007629394, 0.014573568344116212, 0.014636032104492188, 0.014647295951843262, 0.014457856178283691, 0.014402560234069824, 0.014632960319519044, 0.014524415969848633, 0.01457049560546875, 0.01459712028503418, 0.014574591636657714, 0.014561280250549317, 0.01458790397644043, 0.014598143577575684, 0.014618623733520507, 0.014533632278442383, 0.014523391723632812, 0.014573568344116212, 0.01456332778930664, 0.01457151985168457, 0.01458073616027832, 0.014542847633361817, 0.014547967910766601, 0.014568448066711426, 0.014557184219360352, 0.014553088188171387, 0.014541824340820313, 0.014518272399902344, 0.014573568344116212, 0.014523455619812012, 0.014740415573120117, 0.014707776069641113, 0.014453696250915528, 0.014403583526611329]",tokens/s,67.13379997376835,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa3e-125f61b1450fdd331a65c1d8;e0614d6c-abd0-498c-b212-f8d285a40174) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4294.987776,14621.868032,0.0,13975.420928,13366.068224,s,10,16.495098876953126,1.6495098876953125,0.0005626635207808348,1.6493680419921875,1.6498059448242186,1.6504339782714843,1.650936405029297,"[1.65106201171875, 1.6493150634765625, 1.6490108642578125, 1.64951513671875, 1.6496663818359374, 1.6489195556640626, 1.649384521484375, 1.6492862548828124, 1.6495875244140625, 1.6493515625]",tokens/s,155.19761470340868,kWh,1.9482200029823513e-05,1.067632958434842e-05,9.151657321320173e-05,0.00012167510282737365,tokens/kWh,2103963.7037595077,MB,4294.987776,14621.868032,0.0,13975.420928,13814.414848,s,10,977.7317578124998,97.77317578125,0.006501026483324052,97.774078125,97.7795859375,97.78208203125,97.78407890625,"[97.7740859375, 97.76746875, 97.7754296875, 97.784578125, 97.7782421875, 97.7740703125, 97.77903125, 97.76209375, 97.764875, 97.7718828125]",tokens/s,0.6443485086436308,kWh,0.0011542207027475041,0.0006326151578875942,0.005356944729996799,0.007143780590631898,tokens/kWh,8818.859874086276,,s,629,991.0235808105483,1.5755541825286914,0.19600214423351972,1.55186181640625,1.5529105224609374,1.5533291259765625,3.2017358984375,"[1.5526031494140624, 1.5511746826171875, 1.5512442626953125, 1.5510794677734374, 1.55169482421875, 1.5518966064453126, 1.5526973876953125, 1.5532349853515626, 1.55080810546875, 1.55123095703125, 1.5509954833984374, 1.5513375244140626, 1.551034423828125, 1.552995361328125, 1.5513057861328126, 1.55249462890625, 1.5510445556640624, 1.551331298828125, 1.5506575927734374, 1.55245263671875, 1.5513251953125, 1.5512381591796875, 1.551388671875, 1.5524095458984375, 1.5524761962890625, 1.5533260498046875, 1.553095703125, 1.5521700439453125, 1.551240234375, 1.551688720703125, 1.5509698486328125, 1.55129443359375, 1.5527669677734375, 1.5515802001953125, 1.551595458984375, 1.5514736328125, 1.552204833984375, 1.5519498291015625, 1.55194677734375, 1.55318994140625, 1.55307421875, 1.5523338623046874, 1.5521546630859375, 1.55177978515625, 1.5524197998046876, 1.5533404541015625, 1.5529237060546874, 1.5524351806640626, 1.55150439453125, 1.55194775390625, 1.552548828125, 1.5523931884765625, 1.55225390625, 1.5514603271484375, 1.5512484130859374, 1.5517244873046876, 1.5518822021484375, 1.552468017578125, 1.55346630859375, 1.5518555908203124, 1.5518289794921876, 1.5518668212890625, 3.202376708984375, 1.55222216796875, 1.5525550537109376, 1.551751220703125, 1.5510302734375, 1.55141943359375, 1.5515596923828125, 1.5518863525390625, 1.5530260009765624, 1.5516702880859374, 1.5516968994140625, 1.55188330078125, 1.55222119140625, 1.5524822998046874, 1.5535472412109375, 1.552794677734375, 1.550878662109375, 1.551520751953125, 1.55161083984375, 1.5515064697265626, 1.5530260009765624, 1.5537049560546874, 1.5521280517578124, 1.5518392333984374, 1.5516507568359375, 1.5517174072265625, 1.5512647705078124, 1.5524556884765626, 1.5519549560546875, 1.552384033203125, 1.5521689453125, 1.5524669189453124, 1.5522928466796875, 1.5529041748046875, 1.552691162109375, 1.55226318359375, 1.552175048828125, 1.5517655029296875, 1.5517972412109375, 1.55106201171875, 1.5525765380859375, 1.551730712890625, 1.5510968017578124, 1.550551025390625, 1.5507015380859375, 1.5512001953125, 1.5515802001953125, 1.5522396240234375, 1.5507763671875, 1.5507752685546874, 1.551515625, 1.551489990234375, 1.5510538330078125, 1.552689208984375, 1.5505909423828126, 1.55152587890625, 1.551647705078125, 1.5511357421875, 1.5508541259765625, 1.5514869384765626, 1.5528365478515624, 1.5521771240234374, 1.5512657470703124, 3.201815673828125, 1.5507783203125, 1.5535196533203126, 1.55132421875, 1.551177734375, 1.55173681640625, 1.5521044921875, 1.5513375244140626, 1.5526204833984374, 1.5516661376953125, 1.5514337158203124, 1.551310791015625, 1.5512391357421875, 1.5521761474609375, 1.5526307373046875, 1.553349609375, 1.551321044921875, 1.5509493408203125, 1.5514971923828125, 1.5512432861328125, 1.551467529296875, 1.552627685546875, 1.552321533203125, 1.5515975341796875, 1.5512330322265624, 1.55135595703125, 1.5512811279296874, 1.55175732421875, 1.5532247314453125, 1.5510487060546876, 1.55163232421875, 1.551236083984375, 1.55205224609375, 1.551705078125, 1.5527781982421875, 1.55230517578125, 1.5521064453125, 1.5526748046875, 1.551935546875, 1.5518392333984374, 1.553038330078125, 1.552990234375, 1.5518115234375, 1.551340576171875, 1.5512269287109375, 1.5518863525390625, 1.551740966796875, 1.553649658203125, 1.551879150390625, 1.5528212890625, 1.552669677734375, 1.5528980712890625, 1.5524495849609374, 1.5538206787109374, 1.55340185546875, 1.5514490966796874, 1.55186181640625, 1.5515064697265626, 1.5515709228515624, 1.5511982421875, 1.5528519287109375, 1.5516334228515625, 1.5513446044921875, 3.2030166015625, 1.55180029296875, 1.552606201171875, 1.5531837158203126, 1.551551513671875, 1.55184130859375, 1.5515115966796875, 1.551283203125, 1.551794189453125, 1.5518525390625, 1.5510743408203125, 1.551730712890625, 1.551551513671875, 1.5510927734375, 1.550951416015625, 1.5540325927734375, 1.55168359375, 1.5516273193359376, 1.5512708740234376, 1.5510076904296874, 1.55110302734375, 1.552822265625, 1.5531990966796876, 1.551572021484375, 1.552153564453125, 1.5514736328125, 1.5518802490234376, 1.551657958984375, 1.5528406982421874, 1.5522344970703126, 1.5521812744140624, 1.5515064697265626, 1.55281103515625, 1.55241064453125, 1.552889892578125, 1.5524730224609375, 1.5517911376953124, 1.5517276611328126, 1.5515074462890626, 1.550802978515625, 1.552126953125, 1.55321240234375, 1.5522139892578124, 1.5518084716796876, 1.5521444091796874, 1.5523614501953125, 1.55158837890625, 1.5536046142578126, 1.5520296630859376, 1.5530731201171875, 1.5527608642578126, 1.552880615234375, 1.5533311767578124, 1.5541422119140624, 1.55335888671875, 1.551869873046875, 1.55161083984375, 1.5517900390625, 1.5514500732421874, 1.551742919921875, 1.553448974609375, 1.551916015625, 1.552142333984375, 3.20272802734375, 1.5516005859375, 1.55409814453125, 1.552426025390625, 1.5515802001953125, 1.5510958251953124, 1.5515125732421875, 1.55104052734375, 1.5517767333984376, 1.5526820068359375, 1.5530455322265626, 1.5519539794921875, 1.5517962646484376, 1.552195556640625, 1.5515853271484374, 1.5515821533203125, 1.5523768310546875, 1.5513426513671875, 1.551593505859375, 1.5516016845703124, 1.5524136962890625, 1.551958984375, 1.5531632080078126, 1.5515289306640625, 1.55102099609375, 1.5516641845703125, 1.5517286376953126, 1.5518515625, 1.5519652099609376, 1.5515596923828125, 1.55163134765625, 1.5519027099609375, 1.5522764892578125, 1.5519969482421876, 1.5524884033203126, 1.5527935791015626, 1.5509124755859376, 1.55156884765625, 1.551499267578125, 1.5518935546875, 1.551709228515625, 1.552568359375, 1.552501708984375, 1.5518760986328124, 1.5515699462890624, 1.551488037109375, 1.5521546630859375, 1.5521934814453124, 1.5524454345703125, 1.553112060546875, 1.5525970458984375, 1.551457275390625, 1.551424560546875, 1.5510076904296874, 1.5529554443359375, 1.5528099365234376, 1.552541748046875, 1.5528201904296874, 1.5534459228515625, 1.5517440185546876, 1.5520296630859376, 1.5527730712890624, 1.55129345703125, 3.201416259765625, 1.5519610595703126, 1.551810546875, 1.5517440185546876, 1.551605712890625, 1.5520235595703125, 1.5515657958984375, 1.552027587890625, 1.551488037109375, 1.5524935302734375, 1.5531314697265626, 1.5514276123046875, 1.5520655517578126, 1.5519825439453125, 1.551853515625, 1.5516712646484374, 1.5525252685546875, 1.551730712890625, 1.5519703369140625, 1.55103125, 1.55213720703125, 1.552764892578125, 1.553755126953125, 1.5528775634765626, 1.5516845703125, 1.5517542724609374, 1.5515330810546875, 1.5523287353515625, 1.552110595703125, 1.5518494873046875, 1.5517244873046876, 1.55184228515625, 1.551310791015625, 1.5519027099609375, 1.5507896728515624, 1.552606201171875, 1.5512637939453124, 1.5521474609375, 1.5516856689453125, 1.551458251953125, 1.5516610107421875, 1.552607177734375, 1.5517041015625, 1.5525919189453126, 1.55161083984375, 1.552396240234375, 1.550496826171875, 1.551563720703125, 1.5521248779296875, 1.5515289306640625, 1.5516282958984375, 1.5522191162109376, 1.5513795166015625, 1.551456298828125, 1.5522652587890624, 1.5525396728515626, 1.5528642578125, 1.5518760986328124, 1.5516190185546874, 1.5520552978515625, 1.5518668212890625, 1.5532369384765625, 1.552636962890625, 3.202765869140625, 1.5513466796875, 1.553580078125, 1.5520194091796875, 1.5519456787109376, 1.5510753173828125, 1.5518084716796876, 1.55133544921875, 1.5519498291015625, 1.5519241943359374, 1.5514061279296876, 1.551531005859375, 1.5519334716796875, 1.5514398193359376, 1.5528131103515626, 1.55154638671875, 1.55287451171875, 1.552290771484375, 1.5517265625, 1.550793701171875, 1.5515279541015625, 1.5525499267578124, 1.5527659912109375, 1.5533721923828125, 1.5519078369140624, 1.55173583984375, 1.5521668701171876, 1.5522508544921876, 1.5519713134765625, 1.5531519775390625, 1.5524608154296875, 1.551698974609375, 1.552278564453125, 1.55198974609375, 1.5515361328125, 1.55272705078125, 1.551920166015625, 1.552759765625, 1.5514500732421874, 1.5516590576171876, 1.551510498046875, 1.5519405517578124, 1.552752685546875, 1.55184130859375, 1.55072509765625, 1.5517808837890625, 1.5512965087890624, 1.552079833984375, 1.552720947265625, 1.5514542236328126, 1.553217529296875, 1.5522928466796875, 1.5522979736328124, 1.551899658203125, 1.551836181640625, 1.5530526123046875, 1.55264306640625, 1.552206787109375, 1.55278955078125, 1.5506513671875, 1.5514705810546876, 1.552828369140625, 1.5512606201171875, 3.20153076171875, 1.5501854248046876, 1.5515657958984375, 1.5514951171875, 1.552109619140625, 1.552099365234375, 1.552288818359375, 1.5523583984375, 1.5526461181640625, 1.5514920654296875, 1.5523031005859376, 1.5516446533203125, 1.55148291015625, 1.5517808837890625, 1.5515330810546875, 1.5520235595703125, 1.5527147216796875, 1.5511695556640626, 1.552215087890625, 1.5506851806640625, 1.5511387939453125, 1.5506053466796874, 1.55132421875, 1.5518238525390624, 1.551515625, 1.5510947265625, 1.5512073974609375, 1.5515657958984375, 1.5510599365234374, 1.5514132080078125, 1.551810546875, 1.55152490234375, 1.55114697265625, 1.5516077880859376, 1.5506268310546876, 1.5511910400390625, 1.5526318359375, 1.552217041015625, 1.551873046875, 1.5515443115234375, 1.551283203125, 1.551500244140625, 1.55306494140625, 1.5515596923828125, 1.5516978759765625, 1.5517235107421874, 1.551678466796875, 1.553006591796875, 1.5524617919921875, 1.5526624755859375, 1.5517911376953124, 1.5515576171875, 1.5515587158203126, 1.551556640625, 1.5509442138671874, 1.5528365478515624, 1.5521904296875, 1.55300244140625, 1.5519825439453125, 1.5525980224609375, 1.551983642578125, 1.5516201171875, 1.5528509521484375, 3.202553955078125, 1.551009765625, 1.5523553466796876, 1.5506207275390624, 1.5519232177734374, 1.551784912109375, 1.551542236328125, 1.551203369140625, 1.5513343505859376, 1.55032373046875, 1.5522550048828125, 1.5514869384765626, 1.551784912109375, 1.5508438720703126, 1.5520716552734375, 1.5515863037109374, 1.55217919921875, 1.552343017578125, 1.5512073974609375, 1.5511490478515626, 1.5520225830078125, 1.551941650390625, 1.5513641357421875, 1.5520562744140625, 1.5527935791015626, 1.553005615234375, 1.5519703369140625, 1.5519405517578124, 1.55236962890625, 1.5524075927734375, 1.552400390625, 1.5521392822265625, 1.5526983642578125, 1.5521812744140624, 1.5518033447265625, 1.5511234130859375, 1.5530147705078126, 1.552041015625, 1.5515872802734374, 1.5513333740234374, 1.5513599853515625, 1.5520286865234374, 1.5513743896484375, 1.55210546875, 1.5510108642578124, 1.551393798828125, 1.55116845703125, 1.551873046875, 1.5514449462890625, 1.5518966064453126, 1.55174609375, 1.5518658447265625, 1.5518095703125, 1.5519150390625, 1.5517449951171876, 1.5523460693359374, 1.552300048828125, 1.552015380859375, 1.551193115234375, 1.5512535400390626, 1.5524495849609374, 1.55131494140625, 1.553142822265625, 3.20293701171875, 1.5507025146484374, 1.5517491455078125, 1.5505745849609376, 1.551394775390625, 1.5516416015625, 1.5522529296875, 1.5509012451171875, 1.551172607421875, 1.5507313232421875, 1.551066162109375, 1.5524403076171875, 1.552574462890625, 1.5519908447265625, 1.5519549560546875, 1.5521126708984374, 1.5522078857421875, 1.553606689453125, 1.552162841796875, 1.5526041259765626, 1.552289794921875, 1.5522764892578125, 1.55186376953125, 1.5506749267578126, 1.551815673828125, 1.5521044921875, 1.550876708984375, 1.5515504150390624, 1.550856201171875, 1.5514798583984375, 1.552759765625, 1.551753173828125, 1.5519580078125, 1.5519119873046876, 1.551141845703125, 1.551688720703125, 1.5519180908203125, 1.5516416015625, 1.5518433837890624, 1.5516488037109375, 1.55226416015625, 1.5521085205078125, 1.5519918212890624, 1.5524515380859376, 1.55057666015625, 1.5518023681640625, 1.5514920654296875, 1.5522559814453125, 1.551362060546875, 1.5525518798828124, 1.5522672119140626, 1.551783935546875, 1.551810546875, 1.5519447021484376, 1.5518751220703124, 1.5525919189453126, 1.5539844970703125, 1.5537889404296874, 1.5529072265625, 1.55158935546875, 1.55196826171875, 1.5523502197265624, 1.552722900390625]",tokens/s,0.6346973091049438,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2119.368704,2816.999424,0.0,2170.55232,1927.744512,s,10,2.4524682617187503,0.245246826171875,0.001090999616645648,0.2451712341308594,0.24625067138671874,0.24669122314453126,0.24704366455078125,"[0.24615277099609376, 0.24713177490234375, 0.24369766235351562, 0.24442691040039063, 0.244733154296875, 0.243721435546875, 0.24478985595703126, 0.2455526123046875, 0.24614064025878907, 0.24612144470214845]",tokens/s,1043.8463322684916,kWh,2.8755634550064334e-06,1.5756288392757208e-06,1.2476319504855795e-05,1.6927511799137947e-05,tokens/kWh,15123309.499807118,MB,2119.368704,2816.999424,0.0,2170.55232,2031.983104,s,10,142.79050195312502,14.279050195312502,0.0031195370707275648,14.27847802734375,14.28149794921875,14.284002392578124,14.286005947265625,"[14.2787724609375, 14.2797041015625, 14.28094140625, 14.27818359375, 14.2772841796875, 14.2808662109375, 14.2747421875, 14.275935546875, 14.2775654296875, 14.2865068359375]",tokens/s,4.412058164812777,kWh,0.00016860610004691852,9.240989681349414e-05,0.0007205406280193562,0.000981556624879769,tokens/kWh,64183.76525930624,,s,629,144.7668543243408,0.23015398143774374,0.029166744118704197,0.22656614685058593,0.22702059936523436,0.22731673583984374,0.47147623168945313,"[0.228210693359375, 0.2264954833984375, 0.2263234558105469, 0.2265917510986328, 0.22652210998535155, 0.2266122283935547, 0.22653952026367188, 0.22631117248535157, 0.22642892456054686, 0.22647091674804687, 0.22641664123535157, 0.22648934936523438, 0.2270392303466797, 0.22645555114746094, 0.22661325073242186, 0.22698086547851562, 0.22665011596679688, 0.22659788513183593, 0.2262640686035156, 0.22631219482421874, 0.22640025329589844, 0.22692658996582032, 0.22699212646484376, 0.22696038818359374, 0.22650674438476562, 0.22637158203125, 0.2266552276611328, 0.22633882141113282, 0.22646173095703126, 0.2265814666748047, 0.22644224548339845, 0.22677912902832031, 0.22628044128417968, 0.22633573913574218, 0.2265681915283203, 0.226693115234375, 0.22634597778320312, 0.2265016326904297, 0.2263531494140625, 0.22655078125, 0.22668800354003907, 0.2267904052734375, 0.22690406799316407, 0.2266306610107422, 0.2264575958251953, 0.22657638549804687, 0.22662553405761718, 0.22665113830566405, 0.22651187133789064, 0.22652723693847657, 0.22627635192871093, 0.22631219482421874, 0.22654464721679687, 0.22648934936523438, 0.2266378173828125, 0.22687333679199218, 0.22802943420410157, 0.2267176971435547, 0.2265917510986328, 0.22732595825195312, 0.2264842224121094, 0.22659481811523438, 0.47421746826171873, 0.22658047485351562, 0.22655282592773437, 0.22653030395507812, 0.22670950317382813, 0.22652517700195313, 0.22650469970703124, 0.22647705078125, 0.22634803771972656, 0.2266787872314453, 0.22657125854492188, 0.22652621459960937, 0.22686515808105467, 0.2263900146484375, 0.22639718627929686, 0.22662553405761718, 0.22654975891113283, 0.22652928161621094, 0.2263838653564453, 0.22636647033691407, 0.2263582763671875, 0.22654975891113283, 0.22638899230957032, 0.2263961639404297, 0.22621388244628907, 0.22636749267578124, 0.22646170043945313, 0.22642381286621094, 0.22654156494140626, 0.2262988739013672, 0.22632652282714844, 0.2261411895751953, 0.2261012420654297, 0.22648838806152344, 0.22639097595214844, 0.2262917175292969, 0.22649856567382812, 0.2264145965576172, 0.22670130920410156, 0.2264453125, 0.227240966796875, 0.22686003112792968, 0.22684364318847655, 0.22670335388183593, 0.2264524841308594, 0.2268170166015625, 0.22824140930175782, 0.2270392303466797, 0.22767205810546876, 0.22691123962402343, 0.22711602783203125, 0.22689286804199219, 0.22666029357910156, 0.2267361297607422, 0.2266439666748047, 0.22683135986328126, 0.22709965515136718, 0.22691226196289063, 0.22731365966796874, 0.22672998046875, 0.22711911010742186, 0.22663475036621095, 0.22664601135253906, 0.47127346801757813, 0.2265681915283203, 0.226840576171875, 0.22668389892578125, 0.22649650573730468, 0.22704742431640626, 0.22672998046875, 0.22683135986328126, 0.22644326782226562, 0.2265743408203125, 0.226555908203125, 0.22665933227539062, 0.22649754333496094, 0.22686720275878905, 0.22685696411132814, 0.2270627899169922, 0.22701158142089845, 0.22654360961914063, 0.227631103515625, 0.22625177001953126, 0.2269306945800781, 0.226946044921875, 0.2268231658935547, 0.2272542724609375, 0.2273054656982422, 0.22650982666015626, 0.22637158203125, 0.22674432373046874, 0.22774681091308593, 0.22656512451171876, 0.22653132629394532, 0.22656101989746094, 0.22630706787109375, 0.22627430725097655, 0.22650367736816407, 0.2263654327392578, 0.2271293487548828, 0.2264596405029297, 0.2263173065185547, 0.2265753631591797, 0.22677912902832031, 0.22655282592773437, 0.22645350646972656, 0.2264954833984375, 0.22634597778320312, 0.22660403442382812, 0.226482177734375, 0.22736895751953126, 0.22658149719238282, 0.22649037170410155, 0.22647296142578124, 0.2265016326904297, 0.22649650573730468, 0.22651187133789064, 0.22654464721679687, 0.22644224548339845, 0.22651084899902343, 0.22658662414550781, 0.22652210998535155, 0.22644940185546875, 0.2268712921142578, 0.226334716796875, 0.22724812316894533, 0.4715550842285156, 0.22658047485351562, 0.22659788513183593, 0.22659075927734376, 0.2265425567626953, 0.22664601135253906, 0.22673306274414062, 0.22659481811523438, 0.2266234893798828, 0.22643096923828124, 0.2263951416015625, 0.22653439331054687, 0.22656716918945313, 0.22648626708984376, 0.2264842224121094, 0.2265323486328125, 0.22739251708984376, 0.22662757873535155, 0.22650778198242189, 0.22635621643066406, 0.22640025329589844, 0.22646783447265625, 0.2263951416015625, 0.2265374755859375, 0.22659071350097656, 0.22637158203125, 0.22640229797363282, 0.22639820861816407, 0.2262640686035156, 0.22646578979492188, 0.22665728759765624, 0.22639411926269531, 0.2265692138671875, 0.22644940185546875, 0.22637055969238282, 0.22653132629394532, 0.22651084899902343, 0.22662042236328125, 0.226440185546875, 0.22653952026367188, 0.2264248352050781, 0.22653132629394532, 0.22682112121582032, 0.22637464904785157, 0.22807859802246094, 0.22658047485351562, 0.22653543090820313, 0.22662144470214843, 0.2265364532470703, 0.22637158203125, 0.2266941375732422, 0.2265016326904297, 0.22764134216308593, 0.2268037109375, 0.22737408447265625, 0.2267689666748047, 0.22669818115234375, 0.22641868591308595, 0.2270064697265625, 0.226808837890625, 0.22760447692871094, 0.2266941375732422, 0.22663475036621095, 0.4716912536621094, 0.226555908203125, 0.22654669189453125, 0.22630400085449218, 0.22651904296875, 0.22646885681152343, 0.2263726043701172, 0.2264627227783203, 0.226376708984375, 0.22640956115722657, 0.22651280212402344, 0.2264944610595703, 0.22653030395507812, 0.22637055969238282, 0.22640538024902343, 0.22671359252929688, 0.22652825927734374, 0.22724607849121095, 0.2267904052734375, 0.22637464904785157, 0.2265006103515625, 0.2266173400878906, 0.2275594177246094, 0.22663168334960937, 0.22695526123046875, 0.22644326782226562, 0.2264453125, 0.22670541381835937, 0.22647193908691407, 0.2264842224121094, 0.22652928161621094, 0.22634495544433594, 0.22692658996582032, 0.2264575958251953, 0.22655078125, 0.22647705078125, 0.22659584045410155, 0.22641151428222656, 0.22638490295410157, 0.2264320068359375, 0.22686924743652342, 0.22668185424804688, 0.226555908203125, 0.22660096740722657, 0.2265364532470703, 0.22648013305664064, 0.22652006530761717, 0.22684774780273437, 0.22667674255371092, 0.2264524841308594, 0.2264145965576172, 0.22648320007324219, 0.2265518035888672, 0.22661631774902344, 0.22665216064453125, 0.22664909362792968, 0.22723379516601563, 0.22827008056640624, 0.22659890747070313, 0.22667776489257813, 0.22666752624511718, 0.22652517700195313, 0.22685696411132814, 0.47040716552734374, 0.2265364532470703, 0.22670335388183593, 0.22675152587890626, 0.22674327087402343, 0.22674124145507812, 0.22666854858398439, 0.2265016326904297, 0.22641664123535157, 0.226408447265625, 0.22644837951660157, 0.22657331848144532, 0.22669926452636718, 0.22673817443847658, 0.22666648864746095, 0.22652006530761717, 0.22712115478515624, 0.2267709503173828, 0.22682829284667969, 0.2264514617919922, 0.22717543029785156, 0.22672691345214843, 0.22699417114257814, 0.22659379577636718, 0.22670541381835937, 0.22654464721679687, 0.22634701538085938, 0.22648320007324219, 0.226376708984375, 0.22674330139160156, 0.2265518035888672, 0.22656716918945313, 0.22633882141113282, 0.22662144470214843, 0.22681292724609375, 0.22643609619140626, 0.22689791870117187, 0.22637055969238282, 0.22802841186523437, 0.22646885681152343, 0.22660301208496095, 0.22652006530761717, 0.22687744140625, 0.226482177734375, 0.22646476745605468, 0.22653132629394532, 0.2266787872314453, 0.22718771362304688, 0.22725325012207032, 0.22660914611816407, 0.22652517700195313, 0.2265364532470703, 0.2266623992919922, 0.2268590087890625, 0.2265999298095703, 0.2266439666748047, 0.22733311462402345, 0.2267484130859375, 0.22656101989746094, 0.22658047485351562, 0.22690713500976561, 0.22647296142578124, 0.2265333709716797, 0.4723138427734375, 0.22668698120117187, 0.22651699829101563, 0.22642994689941406, 0.22652006530761717, 0.22658355712890624, 0.22648832702636718, 0.22646067810058593, 0.22636134338378905, 0.2262917175292969, 0.22641766357421875, 0.22637362670898437, 0.226440185546875, 0.22636851501464844, 0.2264944610595703, 0.2264596405029297, 0.22644122314453125, 0.22647398376464845, 0.22640127563476561, 0.22637977600097656, 0.22642892456054686, 0.22646067810058593, 0.22642994689941406, 0.22729216003417968, 0.2265856018066406, 0.22656614685058593, 0.22651187133789064, 0.22686924743652342, 0.22699212646484376, 0.22667776489257813, 0.22668083190917968, 0.22647091674804687, 0.226440185546875, 0.2262794189453125, 0.22660198974609375, 0.22650778198242189, 0.227240966796875, 0.22670130920410156, 0.2265927734375, 0.22639820861816407, 0.22648524475097656, 0.22650265502929687, 0.22666444396972657, 0.2264637451171875, 0.22673306274414062, 0.2266306610107422, 0.2264524841308594, 0.22670541381835937, 0.22680677795410156, 0.22637773132324218, 0.22640333557128905, 0.22649754333496094, 0.22653030395507812, 0.22647602844238282, 0.2265180206298828, 0.2267105255126953, 0.2264811553955078, 0.2263408660888672, 0.22684159851074218, 0.226840576171875, 0.227162109375, 0.22650572204589844, 0.22680064392089844, 0.47229953002929687, 0.22655386352539061, 0.22669209289550782, 0.22669107055664062, 0.22653849792480468, 0.226914306640625, 0.22678323364257813, 0.22689485168457033, 0.2268078155517578, 0.22660812377929687, 0.22647296142578124, 0.22665318298339843, 0.22659584045410155, 0.2267166748046875, 0.22655897521972657, 0.22654566955566408, 0.2271068115234375, 0.22682009887695312, 0.22680575561523436, 0.22656204223632812, 0.2264217529296875, 0.22652108764648438, 0.22662553405761718, 0.22673408508300782, 0.22695730590820312, 0.22662757873535155, 0.2265364532470703, 0.22667570495605469, 0.2265743408203125, 0.22657125854492188, 0.22663679504394532, 0.2263592987060547, 0.22638490295410157, 0.22639411926269531, 0.2264514617919922, 0.22640640258789063, 0.22657125854492188, 0.22636134338378905, 0.22635008239746093, 0.22646681213378905, 0.2263756866455078, 0.22643507385253905, 0.226661376953125, 0.22654054260253906, 0.2264842224121094, 0.2266480712890625, 0.22652621459960937, 0.22654054260253906, 0.22649958801269532, 0.22644326782226562, 0.22654156494140626, 0.2265886688232422, 0.22666546630859374, 0.22688870239257813, 0.2266972198486328, 0.22649856567382812, 0.22649958801269532, 0.22640847778320314, 0.22657020568847655, 0.22655078125, 0.2271825866699219, 0.22659686279296876, 0.22644224548339845, 0.4732682189941406, 0.2267166748046875, 0.22669004821777344, 0.2266112060546875, 0.22709043884277344, 0.2265323486328125, 0.2266306610107422, 0.2265927734375, 0.22674636840820311, 0.22685285949707032, 0.22652723693847657, 0.2264954833984375, 0.22642381286621094, 0.22641253662109376, 0.22645452880859376, 0.2264248352050781, 0.22657125854492188, 0.22701670837402343, 0.22656716918945313, 0.2264268798828125, 0.22636441040039063, 0.22659379577636718, 0.22651596069335939, 0.22646476745605468, 0.22653543090820313, 0.22654873657226562, 0.22635110473632813, 0.2264453125, 0.22657740783691407, 0.22701568603515626, 0.22657331848144532, 0.226808837890625, 0.22729318237304688, 0.226840576171875, 0.22669004821777344, 0.22642994689941406, 0.22665216064453125, 0.226408447265625, 0.2264637451171875, 0.22659071350097656, 0.22760140991210936, 0.2266787872314453, 0.22653952026367188, 0.2264842224121094, 0.22630911254882813, 0.22652825927734374, 0.22639820861816407, 0.22657023620605468, 0.22674330139160156, 0.2271262664794922, 0.22665728759765624, 0.2265333709716797, 0.22666444396972657, 0.22674227905273436, 0.22675149536132813, 0.2265364532470703, 0.22639820861816407, 0.22652210998535155, 0.2266112060546875, 0.22665113830566405, 0.22656614685058593, 0.2264627227783203, 0.22642073059082032, 0.4729661560058594, 0.22653132629394532, 0.22665420532226563, 0.22658566284179688, 0.22666950988769533, 0.22666035461425782, 0.2266480712890625, 0.2267525177001953, 0.22648934936523438, 0.2266399688720703, 0.2264431610107422, 0.22651187133789064, 0.22709657287597657, 0.22754713439941407, 0.22637055969238282, 0.22647091674804687, 0.22653439331054687, 0.22654975891113283, 0.2265999298095703, 0.22654360961914063, 0.22677197265625, 0.22703718566894532, 0.2268784637451172, 0.22653543090820313, 0.22652210998535155, 0.22646681213378905, 0.2276433868408203, 0.2268784637451172, 0.2265886688232422, 0.22731878662109375, 0.2269306945800781, 0.22686822509765625, 0.22654464721679687, 0.22657945251464845, 0.2267484130859375, 0.22667263793945314, 0.22669209289550782, 0.22645350646972656, 0.22654566955566408, 0.22655282592773437, 0.22689996337890625, 0.2264698944091797, 0.22683544921875, 0.22653952026367188, 0.22666035461425782, 0.22665420532226563, 0.22662655639648438, 0.2269420166015625, 0.2271200714111328, 0.2266972198486328, 0.22657331848144532, 0.22654368591308593, 0.22663877868652343, 0.22703616333007812, 0.22663372802734374, 0.22663270568847657, 0.22767308044433593, 0.22783692932128907, 0.22670130920410156, 0.2265927734375, 0.2266378173828125, 0.22650469970703124, 0.22659584045410155]",tokens/s,4.344917232163973,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1231.42144,879.230976,0.0,232.783872,169.719808,s,10,0.35891270065307623,0.035891270065307616,0.001166922312453486,0.03606051254272461,0.03654228057861328,0.037586180877685546,0.03842130111694336,"[0.03863008117675781, 0.03461849594116211, 0.03453500747680664, 0.034383518218994144, 0.03608425521850586, 0.03613267135620117, 0.036015392303466794, 0.036310302734375, 0.036166206359863284, 0.03603676986694336]",tokens/s,7132.653693619183,kWh,4.2057203039815654e-07,2.3039066429929688e-07,8.883686426833978e-07,1.5393313373808513e-06,tokens/kWh,166305975.7073354,MB,1231.42144,879.230976,0.0,232.783872,199.793152,s,10,21.796639404296876,2.179663940429687,0.03379298018028798,2.1997751464843747,2.2025270751953125,2.2029048950195316,2.2032071508789066,"[2.15886328125, 2.11872216796875, 2.1151240234375, 2.1993291015625, 2.20328271484375, 2.20231494140625, 2.20022119140625, 2.201608642578125, 2.194730224609375, 2.202443115234375]",tokens/s,28.903538215886858,kWh,2.6054930089725813e-05,1.4278879354016576e-05,5.1935423616310185e-05,9.22692330600526e-05,tokens/kWh,682784.4765870875,,s,629,22.081034248352047,0.035104982906760014,0.004313579122612795,0.03483135986328125,0.03513838195800781,0.035482418823242184,0.06834089904785158,"[0.035579902648925785, 0.0354150390625, 0.035542015075683595, 0.0358737907409668, 0.03517337417602539, 0.035922943115234376, 0.03607551956176758, 0.03511808013916016, 0.03547443389892578, 0.035765247344970705, 0.035937278747558594, 0.03575091171264649, 0.03546214294433594, 0.04169113540649414, 0.03495731353759766, 0.03391795349121094, 0.033565696716308595, 0.03336908721923828, 0.03351347351074219, 0.03344179153442383, 0.03361177444458008, 0.03342745590209961, 0.03343564987182617, 0.03342745590209961, 0.03340185546875, 0.03341926574707031, 0.03338649749755859, 0.03336294555664063, 0.033552383422851564, 0.03352883148193359, 0.03450368118286133, 0.03436032104492188, 0.034111488342285154, 0.03450265502929688, 0.03355136108398438, 0.034164737701416016, 0.03474431991577148, 0.034351104736328124, 0.03521331024169922, 0.035111934661865234, 0.03381760025024414, 0.034427902221679685, 0.033756160736083986, 0.0334510383605957, 0.0331734733581543, 0.03338854217529297, 0.03346944046020508, 0.03350425720214844, 0.03346636962890625, 0.03356671905517578, 0.03343769454956055, 0.03313971328735352, 0.03326259231567383, 0.03314790344238281, 0.03320729446411133, 0.03347967910766601, 0.03352678298950195, 0.033538047790527346, 0.03310182571411133, 0.03322163009643555, 0.033478656768798826, 0.03340800094604492, 0.06847795104980468, 0.03334348678588867, 0.03356576156616211, 0.03346118545532226, 0.033667072296142575, 0.03376025772094727, 0.033555454254150394, 0.03353497695922852, 0.03349913787841797, 0.03347455978393555, 0.03362713623046875, 0.033650688171386715, 0.03344998550415039, 0.03351244735717773, 0.033532928466796875, 0.033718273162841796, 0.033614849090576174, 0.03364659118652344, 0.033478656768798826, 0.03355136108398438, 0.03344998550415039, 0.034282497406005856, 0.03367833709716797, 0.033516544342041016, 0.03338854217529297, 0.033584129333496096, 0.033539070129394534, 0.033326080322265625, 0.03335987091064453, 0.03366604614257813, 0.03334143829345703, 0.033258495330810545, 0.03334041595458984, 0.03343155288696289, 0.03324620819091797, 0.03349401473999023, 0.03338547134399414, 0.03338137435913086, 0.0335175666809082, 0.03442892837524414, 0.03411558532714844, 0.034249729156494144, 0.03337011337280273, 0.03331174468994141, 0.033413120269775394, 0.033301502227783206, 0.03349606323242187, 0.03334143829345703, 0.03332403182983398, 0.033527809143066405, 0.0335206413269043, 0.03582156753540039, 0.035253246307373046, 0.03405209732055664, 0.03366604614257813, 0.033827838897705076, 0.03394559860229492, 0.03346636962890625, 0.03348582458496094, 0.03372544097900391, 0.03336703872680664, 0.03363942337036133, 0.033410049438476565, 0.06798847961425782, 0.033957889556884766, 0.03349401473999023, 0.03372236633300781, 0.033659934997558594, 0.033262561798095704, 0.033339393615722655, 0.03347148895263672, 0.03481190490722656, 0.03399270248413086, 0.033448993682861326, 0.033379295349121096, 0.03344179153442383, 0.03343155288696289, 0.0330618896484375, 0.03346124649047852, 0.033410049438476565, 0.033430526733398434, 0.033495040893554685, 0.03334656143188477, 0.03343564987182617, 0.033446910858154294, 0.033513534545898435, 0.03330553436279297, 0.03338547134399414, 0.033484798431396484, 0.03334147262573242, 0.03339465713500977, 0.03335782241821289, 0.03341516876220703, 0.033355777740478515, 0.03362713623046875, 0.03338854217529297, 0.033360897064208986, 0.03363020706176758, 0.03340800094604492, 0.03336505508422852, 0.03361990356445312, 0.03336191940307617, 0.035092479705810545, 0.034958335876464845, 0.03446783828735352, 0.033719329833984374, 0.0333260498046875, 0.03360768127441406, 0.03371417617797851, 0.03365785598754883, 0.033653759002685545, 0.03359027099609375, 0.03361177444458008, 0.033465343475341795, 0.033562625885009766, 0.03357183837890625, 0.03344076919555664, 0.0335093765258789, 0.03359231948852539, 0.033463294982910154, 0.03351551818847656, 0.03339059066772461, 0.0335022087097168, 0.03354009628295898, 0.033432575225830076, 0.033476608276367184, 0.06785228729248047, 0.038763519287109374, 0.03499212646484375, 0.03476172637939453, 0.03466035079956055, 0.03442892837524414, 0.03445862579345703, 0.0349194221496582, 0.03481087875366211, 0.03482009506225586, 0.03474227142333984, 0.03470336151123047, 0.03492768096923828, 0.0348221435546875, 0.034755519866943356, 0.03479654312133789, 0.035053569793701174, 0.03477503967285156, 0.03513651275634765, 0.03483647918701172, 0.03467161560058594, 0.03479244613647461, 0.034738174438476564, 0.03485184097290039, 0.03481292724609375, 0.03513651275634765, 0.03499728012084961, 0.034848735809326174, 0.03482009506225586, 0.03604684829711914, 0.03580108642578125, 0.034900993347167966, 0.03499622344970703, 0.03482009506225586, 0.03486617660522461, 0.034716670989990234, 0.034871295928955076, 0.03480883026123047, 0.03496857452392578, 0.034961406707763674, 0.03484262466430664, 0.03478732681274414, 0.0348671989440918, 0.03487539291381836, 0.03496038436889649, 0.03478121566772461, 0.03482006454467773, 0.03547750473022461, 0.03485184097290039, 0.034953216552734374, 0.034854911804199216, 0.03493478393554687, 0.03474943923950195, 0.03476889419555664, 0.03471974563598633, 0.034825214385986326, 0.035095550537109374, 0.0350300178527832, 0.034802688598632815, 0.034781185150146485, 0.034677761077880856, 0.034490367889404294, 0.034372608184814454, 0.07120281219482422, 0.03474227142333984, 0.03492659378051758, 0.03486515045166016, 0.035027999877929684, 0.035214336395263675, 0.035108863830566404, 0.03492655944824219, 0.03491635131835937, 0.03499212646484375, 0.0349409294128418, 0.03500236892700195, 0.03487641525268555, 0.03486105728149414, 0.03492659378051758, 0.03494911956787109, 0.03502489471435547, 0.034909183502197266, 0.03505152130126953, 0.03499724960327148, 0.03479142379760742, 0.03497983932495117, 0.03498291015625, 0.03495116806030273, 0.03492454528808594, 0.03508633422851563, 0.034934814453125, 0.03494499206542969, 0.034936832427978515, 0.034841598510742186, 0.03491839981079101, 0.035023872375488284, 0.03488665771484375, 0.03486617660522461, 0.03508428955078125, 0.03497881698608398, 0.03496448135375976, 0.03487334442138672, 0.034991104125976565, 0.035092479705810545, 0.03487641525268555, 0.0350013427734375, 0.035125247955322264, 0.03501567840576172, 0.035350528717041016, 0.03521843338012695, 0.03506687927246094, 0.03503104019165039, 0.034985984802246094, 0.03507308959960938, 0.034981822967529295, 0.03523174285888672, 0.0352174072265625, 0.034923519134521484, 0.034361343383789066, 0.03486822509765625, 0.03495731353759766, 0.03501875305175781, 0.034956287384033204, 0.03489484786987305, 0.034835456848144535, 0.035156993865966796, 0.03473408126831055, 0.07153561401367188, 0.034855934143066404, 0.03496345520019531, 0.034987071990966796, 0.035133377075195316, 0.03502592086791992, 0.035043327331542966, 0.035209217071533204, 0.03504844665527344, 0.03502592086791992, 0.034936832427978515, 0.03493478393554687, 0.0358205451965332, 0.035659774780273434, 0.03496038436889649, 0.03518668746948242, 0.03511296081542969, 0.03490304183959961, 0.03482624053955078, 0.03487539291381836, 0.03492761611938477, 0.03479142379760742, 0.035125247955322264, 0.03506585693359375, 0.03505254364013672, 0.034825214385986326, 0.03476995086669922, 0.03494089508056641, 0.03478732681274414, 0.03488256072998047, 0.035244033813476565, 0.035007488250732424, 0.03475046539306641, 0.03497062301635742, 0.0347770881652832, 0.03480166244506836, 0.03479964828491211, 0.03495734405517578, 0.03496441650390625, 0.03494911956787109, 0.034885631561279294, 0.034953216552734374, 0.034909183502197266, 0.03480575942993164, 0.03474943923950195, 0.034948097229003904, 0.03509862518310547, 0.03496857452392578, 0.03502284622192383, 0.0344719352722168, 0.03450271987915039, 0.03489888000488281, 0.034909183502197266, 0.03487846374511719, 0.03488460922241211, 0.03492147064208984, 0.03486515045166016, 0.03475251388549805, 0.03486310577392578, 0.03487027359008789, 0.03497369766235352, 0.03508230209350586, 0.034826175689697265, 0.07138098907470704, 0.035064830780029296, 0.035089408874511716, 0.03489894485473633, 0.03474537658691406, 0.034952159881591796, 0.034969600677490234, 0.03484985733032227, 0.0348732795715332, 0.03523481750488281, 0.034993152618408206, 0.03489593505859375, 0.03493983840942383, 0.03493580627441406, 0.034854911804199216, 0.03482316970825195, 0.03493273544311523, 0.034797569274902344, 0.03540787124633789, 0.035068992614746095, 0.03575494384765625, 0.03548672103881836, 0.03483955383300781, 0.03533107376098633, 0.03497062301635742, 0.034800640106201174, 0.034786304473876956, 0.03497574234008789, 0.0349306869506836, 0.03490508651733398, 0.034917377471923826, 0.03463065719604492, 0.034928638458251955, 0.03481292724609375, 0.03505254364013672, 0.03483955383300781, 0.03466854476928711, 0.03482112121582031, 0.03525734329223633, 0.03482726287841797, 0.034948097229003904, 0.034776065826416014, 0.03475558471679688, 0.034283519744873044, 0.03471974563598633, 0.034735103607177735, 0.03482931137084961, 0.03508224105834961, 0.034678783416748044, 0.03488870239257812, 0.034802688598632815, 0.03478015899658203, 0.03476070404052734, 0.03488051223754883, 0.034976768493652347, 0.0347586555480957, 0.03504435348510742, 0.034929695129394533, 0.03516617584228516, 0.034799617767333986, 0.034723838806152346, 0.03487334442138672, 0.03482931137084961, 0.07187558746337891, 0.03538739013671875, 0.03526348876953125, 0.03513241577148438, 0.034925567626953126, 0.03487744140625, 0.0354856948852539, 0.035422206878662106, 0.0351723518371582, 0.034976768493652347, 0.03488460922241211, 0.034890750885009765, 0.0349409294128418, 0.03503206253051758, 0.035076095581054685, 0.03489279937744141, 0.035119102478027346, 0.034729984283447264, 0.03479244613647461, 0.034864158630371095, 0.034802654266357425, 0.0348221435546875, 0.034678783416748044, 0.03501055908203125, 0.03484467315673828, 0.0348487663269043, 0.03479859161376953, 0.03482931137084961, 0.03487539291381836, 0.034783233642578126, 0.03495116806030273, 0.03474534225463867, 0.03493580627441406, 0.03485388946533203, 0.03487948989868164, 0.034926624298095704, 0.034881504058837894, 0.03505254364013672, 0.03485081481933594, 0.03487846374511719, 0.03444224166870117, 0.03474431991577148, 0.03492761611938477, 0.034948097229003904, 0.03510067367553711, 0.0347770881652832, 0.03488972854614258, 0.03482112121582031, 0.034977790832519534, 0.034405376434326174, 0.03482422256469726, 0.034968544006347656, 0.03485081481933594, 0.0349409294128418, 0.034840576171875, 0.034907135009765625, 0.035122177124023435, 0.03496755218505859, 0.03485388946533203, 0.035350528717041016, 0.03549593734741211, 0.03502182388305664, 0.03480473709106445, 0.07150796508789062, 0.0350013427734375, 0.03506995010375977, 0.034994174957275394, 0.03483647918701172, 0.03483135986328125, 0.034991104125976565, 0.03488153457641602, 0.03486310577392578, 0.034756607055664065, 0.034781185150146485, 0.03496038436889649, 0.034710529327392575, 0.03470848083496094, 0.034977790832519534, 0.03480780792236328, 0.03479040145874023, 0.034776065826416014, 0.03490304183959961, 0.03465219116210937, 0.0350074577331543, 0.03491123199462891, 0.03498905563354492, 0.03513139343261719, 0.03480883026123047, 0.03476582336425781, 0.03472281646728516, 0.034713600158691404, 0.034830337524414064, 0.03484364700317383, 0.03500646209716797, 0.034683902740478514, 0.03486310577392578, 0.03489894485473633, 0.034664447784423826, 0.03480575942993164, 0.034835456848144535, 0.03486822509765625, 0.0347770881652832, 0.03489382553100586, 0.03503615951538086, 0.03473715209960938, 0.03480473709106445, 0.034802688598632815, 0.034677761077880856, 0.03475251388549805, 0.03466652679443359, 0.03473097610473633, 0.0347883529663086, 0.035141632080078124, 0.03476070404052734, 0.03492966461181641, 0.03492147064208984, 0.034854911804199216, 0.034776065826416014, 0.03474431991577148, 0.0348037109375, 0.034800640106201174, 0.0348037109375, 0.03454054260253906, 0.03448934555053711, 0.03489791870117188, 0.03465830230712891, 0.07124582672119141, 0.03520102310180664, 0.03487744140625, 0.035156993865966796, 0.03470848083496094, 0.03489996719360351, 0.03482726287841797, 0.03494297790527344, 0.03487744140625, 0.034769920349121096, 0.034885631561279294, 0.03489996719360351, 0.03513756942749023, 0.03485385513305664, 0.03532185745239258, 0.03501875305175781, 0.0368721923828125, 0.03486310577392578, 0.03483443069458008, 0.03476582336425781, 0.03483647918701172, 0.0348221435546875, 0.03477196884155274, 0.034669567108154296, 0.03483647918701172, 0.034947071075439456, 0.0348671989440918, 0.03482316970825195, 0.03477814483642578, 0.03476988983154297, 0.03473612976074219, 0.03470745468139649, 0.03570380783081055, 0.03584921646118164, 0.03489484786987305, 0.03489689636230469, 0.03458969497680664, 0.034830337524414064, 0.03489689636230469, 0.03477811050415039, 0.034705406188964845, 0.03480985641479492, 0.03458662414550781, 0.03588614273071289, 0.03546003341674805, 0.035043327331542966, 0.034993152618408206, 0.03488051223754883, 0.03490611267089844, 0.035004417419433595, 0.03475763320922851, 0.034797569274902344, 0.03473715209960938, 0.03501772689819336, 0.034772991180419925, 0.034885631561279294, 0.03487539291381836, 0.03483443069458008, 0.03482316970825195, 0.0350013427734375, 0.03475763320922851, 0.03494604873657227, 0.0350300178527832]",tokens/s,28.485984529775525,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9e8-1f032d6052fa589103a25bc9;d692eb93-6125-4421-8698-bce84340ff7d) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3015.59808,9299.296256,0.0,8652.849152,8210.185216,s,10,11.00354541015625,1.100354541015625,0.0021995629105345756,1.10037060546875,1.1026337646484374,1.103404345703125,1.104020810546875,"[1.1041749267578125, 1.1024625244140625, 1.0971700439453125, 1.0986126708984374, 1.0993076171875, 1.097432861328125, 1.099380126953125, 1.101361083984375, 1.1021866455078124, 1.1014569091796875]",tokens/s,232.65228656548507,kWh,1.2965490321318309e-05,7.10465502763327e-06,5.986921456200278e-05,7.993935991095436e-05,tokens/kWh,3202427.443566751,MB,3016.822784,9299.296256,0.0,8652.849152,8503.104,s,10,641.11232421875,64.11123242187502,0.005190298699693196,64.11128124999999,64.116784765625,64.1185056640625,64.11988238281249,"[64.1162265625, 64.11091796875, 64.10715234375, 64.11164453125, 64.1140703125, 64.10447265625, 64.1202265625, 64.106796875, 64.1044140625, 64.11640234375]",tokens/s,0.9826671180712498,kWh,0.0007569437074826825,0.00041487128047605436,0.0034860815944185997,0.004657896582377336,tokens/kWh,13525.418369818235,,s,629,649.9943001708987,1.0333772657724936,0.13070199113792613,1.0176091918945311,1.0182221801757814,1.0184349731445312,2.1165633984375,"[1.0176849975585938, 1.0179225463867188, 1.0177464599609376, 1.0181263427734375, 1.01766552734375, 1.0177433471679687, 1.0174985961914063, 1.0180587768554688, 1.0172999877929687, 1.0173552856445311, 1.017459716796875, 1.0184939575195313, 1.017143310546875, 1.0182799072265625, 1.0176931762695312, 1.0179122924804687, 1.0178191528320313, 1.0179092407226562, 1.0176112670898438, 1.0177853393554688, 1.0181171264648436, 1.0180086059570312, 1.0171443481445313, 1.0179840087890626, 1.0173839111328125, 1.0177720336914062, 1.0175538940429687, 1.0174218139648437, 1.0173317260742187, 1.0178887939453125, 1.0179092407226562, 1.0181570434570313, 1.0174976196289063, 1.0177576904296874, 1.017449462890625, 1.017692138671875, 1.0180843505859376, 1.0179747924804687, 1.0176112670898438, 1.0182471923828125, 1.0175887451171874, 1.017427978515625, 1.0171924438476563, 1.0172886962890626, 1.0171688842773436, 1.0173245239257813, 1.0170091552734375, 1.0176368408203125, 1.0171146240234374, 1.0184765625, 1.0177085571289062, 1.017554931640625, 1.0172211303710939, 1.0182522583007811, 1.017290771484375, 1.0179625244140624, 1.0176573486328124, 1.0178734130859375, 1.0176276245117188, 1.0185001220703125, 1.0183004150390624, 1.0180914916992188, 2.120390625, 1.0169886474609375, 1.0174678955078125, 1.0169763793945312, 1.017365478515625, 1.0172262573242188, 1.0176307373046876, 1.017291748046875, 1.0174207763671874, 1.017438232421875, 1.0176419677734374, 1.01682275390625, 1.0177566528320312, 1.0169026489257813, 1.0171187133789064, 1.0169517822265626, 1.0179747924804687, 1.0175006713867187, 1.0174310302734375, 1.0172119140625, 1.018013671875, 1.0172262573242188, 1.0173501586914062, 1.0170921020507813, 1.0172692260742187, 1.0172661743164062, 1.01758154296875, 1.0180413208007812, 1.017764892578125, 1.0175068359375, 1.0178314208984376, 1.0169129028320312, 1.0174238891601564, 1.01768701171875, 1.0177402954101562, 1.0183987426757812, 1.01777099609375, 1.0177843017578125, 1.0177607421875, 1.0173163452148437, 1.0173081665039063, 1.017702392578125, 1.0177484741210938, 1.0175764770507814, 1.01732763671875, 1.018228759765625, 1.018945556640625, 1.0175508422851562, 1.0177587280273437, 1.0177536010742188, 1.0183505859375, 1.0178427124023437, 1.01798095703125, 1.0178457641601562, 1.0182246704101563, 1.0184437866210938, 1.0184017944335937, 1.0174044189453124, 1.01778125, 1.0175907592773437, 1.0179645385742186, 1.0172713012695314, 1.0177116088867189, 2.116588623046875, 1.0172682495117187, 1.017439208984375, 1.0171781005859375, 1.0176378784179687, 1.0172579956054688, 1.0172876586914064, 1.0171351318359374, 1.017439208984375, 1.0174095458984376, 1.0178488159179688, 1.017796630859375, 1.0178846435546876, 1.0169200439453125, 1.0174443359375, 1.0173378295898436, 1.0173532104492187, 1.0172938232421875, 1.0175784912109376, 1.0172467041015625, 1.0177372436523437, 1.01789697265625, 1.0177136840820313, 1.0170101928710937, 1.01732763671875, 1.0172815551757812, 1.0176215209960937, 1.0174166870117187, 1.0176153564453125, 1.017275390625, 1.0179368896484375, 1.0172160034179687, 1.0177505493164063, 1.0171678466796874, 1.01737060546875, 1.01718017578125, 1.0172098388671875, 1.0171494140625, 1.0173306884765625, 1.0171054077148438, 1.0177413330078124, 1.017786376953125, 1.0182041625976563, 1.0170777587890625, 1.0175405883789062, 1.0175836181640625, 1.0175068359375, 1.017169921875, 1.0173778076171875, 1.0175529174804687, 1.0178037719726563, 1.0177454223632814, 1.0187315063476563, 1.01823486328125, 1.0180054931640625, 1.0178140258789063, 1.0181683349609374, 1.017670654296875, 1.0181611328125, 1.0182564086914063, 1.0183587646484376, 1.0175396118164062, 1.0181222534179688, 2.11649853515625, 1.0170214233398438, 1.0172620849609375, 1.0174719848632812, 1.0172548828125, 1.0174484252929688, 1.017628662109375, 1.0173849487304687, 1.0173992919921875, 1.0177689819335938, 1.0176215209960937, 1.01728564453125, 1.0174402465820314, 1.0171627807617187, 1.0172057495117188, 1.0171904296875, 1.0177679443359375, 1.0173214721679686, 1.0170664672851562, 1.0171207885742188, 1.0171893920898438, 1.0169364624023438, 1.0172098388671875, 1.0170009765625, 1.0171842651367187, 1.01760205078125, 1.0179368896484375, 1.0174985961914063, 1.0172743530273438, 1.0170787963867187, 1.0172272338867188, 1.0170316772460937, 1.0173040771484374, 1.0175529174804687, 1.0179287109375, 1.0178242797851562, 1.0183321533203125, 1.01816015625, 1.0178682861328125, 1.0174054565429687, 1.0174771118164063, 1.0178682861328125, 1.0177188110351563, 1.017955322265625, 1.0186710815429687, 1.0180361938476563, 1.018166259765625, 1.017849853515625, 1.0181478271484374, 1.0177095947265624, 1.017891845703125, 1.0177259521484374, 1.0180966186523437, 1.0177996826171876, 1.0185390014648437, 1.0180700073242188, 1.0183905029296876, 1.0182389526367188, 1.0185400390625, 1.0179686279296876, 1.0179194946289063, 1.0175518798828125, 1.0181058349609375, 2.11787255859375, 1.017354248046875, 1.0174566650390624, 1.017512939453125, 1.0176153564453125, 1.0173112182617188, 1.0177638549804688, 1.0170715942382813, 1.017206787109375, 1.0171371459960938, 1.0176399536132812, 1.0181683349609374, 1.0179246215820312, 1.017238525390625, 1.0177177734375, 1.01699072265625, 1.01747509765625, 1.0175672607421875, 1.0181283569335937, 1.01778125, 1.01810791015625, 1.018156005859375, 1.018429443359375, 1.0175344848632812, 1.0176460571289063, 1.0172333984375, 1.0177321166992188, 1.01758154296875, 1.0172559204101563, 1.0175887451171874, 1.0181652221679687, 1.0181017456054688, 1.0174576416015626, 1.0177464599609376, 1.017849853515625, 1.0172507934570312, 1.0176378784179687, 1.0174627685546875, 1.0175211791992187, 1.017702392578125, 1.0177321166992188, 1.01766552734375, 1.0176245727539062, 1.017238525390625, 1.0175139770507813, 1.0173101806640625, 1.0175518798828125, 1.01707568359375, 1.0175723266601562, 1.0176614379882813, 1.018265625, 1.0177587280273437, 1.0177955932617186, 1.017280517578125, 1.0177669067382813, 1.01758056640625, 1.0177003784179688, 1.0202439575195312, 1.0180403442382813, 1.0180946044921875, 1.0176676025390625, 1.0172211303710939, 1.0179778442382812, 2.115203125, 1.0167890014648437, 1.0172354736328124, 1.0170439453125, 1.0171873168945313, 1.0175703125, 1.0176470947265626, 1.0175744018554687, 1.0177515258789063, 1.0173716430664062, 1.0174197998046874, 1.0172938232421875, 1.0174003295898437, 1.0167725830078125, 1.0175344848632812, 1.0169354248046876, 1.017470947265625, 1.017322509765625, 1.017417724609375, 1.0173368530273437, 1.0175313720703125, 1.0170009765625, 1.0170460205078125, 1.0174044189453124, 1.017523193359375, 1.0177791748046876, 1.0174197998046874, 1.0170818481445312, 1.0176419677734374, 1.0173101806640625, 1.0176409301757812, 1.0174095458984376, 1.0175293579101563, 1.0173173828125, 1.0182144165039062, 1.0182215576171876, 1.0176747436523437, 1.01743310546875, 1.0186895141601562, 1.0174781494140626, 1.0176266479492186, 1.0174013671875, 1.0175191040039062, 1.0173880615234374, 1.017828369140625, 1.0177105712890624, 1.0176378784179687, 1.0176849975585938, 1.017650146484375, 1.0173193969726562, 1.0175958862304688, 1.017544677734375, 1.01745458984375, 1.0172119140625, 1.017970703125, 1.0179348754882813, 1.017543701171875, 1.0179287109375, 1.0183065795898438, 1.017802734375, 1.0182492065429687, 1.0176123046875, 1.0174044189453124, 2.1172890625, 1.0174003295898437, 1.0177330932617188, 1.0177269897460937, 1.0172897338867188, 1.0172108764648438, 1.0175191040039062, 1.0172661743164062, 1.0171361083984376, 1.0175949096679688, 1.0182318115234374, 1.0174924926757813, 1.0179317626953126, 1.0172047119140626, 1.017650146484375, 1.017218017578125, 1.0178754272460937, 1.017364501953125, 1.017511962890625, 1.0177156982421875, 1.0180003662109376, 1.0175949096679688, 1.01768603515625, 1.0176327514648438, 1.0180106201171875, 1.0180044555664063, 1.0175570068359374, 1.0175354614257812, 1.0178191528320313, 1.0182748413085938, 1.0180044555664063, 1.0177034301757812, 1.0174884033203124, 1.0172713012695314, 1.0173092041015626, 1.01715966796875, 1.0175641479492188, 1.0176747436523437, 1.0178734130859375, 1.0186076049804687, 1.0178191528320313, 1.017248779296875, 1.0178734130859375, 1.0178088989257812, 1.0177566528320312, 1.0173737182617189, 1.0184867553710937, 1.0182072143554688, 1.018197998046875, 1.0183782348632813, 1.0181672973632812, 1.0175877075195312, 1.0179911499023437, 1.0176522216796875, 1.0181212158203126, 1.017575439453125, 1.0183485717773437, 1.01770751953125, 1.0183301391601562, 1.0184693603515624, 1.0186875, 1.0184386596679686, 1.0182625122070312, 2.120072265625, 1.017439208984375, 1.0174781494140626, 1.0172333984375, 1.0177269897460937, 1.0175928344726564, 1.0170654907226562, 1.0174505004882812, 1.0172559204101563, 1.0168995971679688, 1.0167019653320313, 1.0168186645507813, 1.0172682495117187, 1.0168565673828125, 1.0178058471679687, 1.0173900756835939, 1.0174361572265624, 1.0177945556640624, 1.0173880615234374, 1.0177044677734375, 1.0172129516601562, 1.0170511474609376, 1.0175641479492188, 1.0171637573242187, 1.0182041625976563, 1.0174515380859375, 1.0173992919921875, 1.017302001953125, 1.01718017578125, 1.0170521850585938, 1.017697265625, 1.0175518798828125, 1.0177699584960938, 1.0171688842773436, 1.0180577392578125, 1.0177750854492187, 1.0180413208007812, 1.0171555786132813, 1.0174617309570313, 1.0173552856445311, 1.0172088623046875, 1.0174453735351563, 1.0177587280273437, 1.0179358520507813, 1.017807861328125, 1.0180464477539062, 1.01760205078125, 1.01793994140625, 1.01802392578125, 1.0177402954101562, 1.01783349609375, 1.01783447265625, 1.0175006713867187, 1.017565185546875, 1.0181806030273437, 1.01722314453125, 1.0179891357421875, 1.0179143676757811, 1.0177699584960938, 1.018265625, 1.018239990234375, 1.0177269897460937, 1.01764404296875, 2.12052685546875, 1.0178191528320313, 1.0170664672851562, 1.0168914184570312, 1.016933349609375, 1.0168248291015625, 1.0170194091796876, 1.0169968872070312, 1.0170706176757813, 1.01739111328125, 1.0174085083007813, 1.0172446899414063, 1.01743408203125, 1.0172498168945312, 1.0173173828125, 1.0169630737304687, 1.0172764282226563, 1.017670654296875, 1.0172395629882813, 1.0175570068359374, 1.01774951171875, 1.0171514892578124, 1.0176829223632813, 1.0178232421875, 1.0177474365234376, 1.0177638549804688, 1.0182195434570311, 1.0177802124023438, 1.01798193359375, 1.0181058349609375, 1.0178447265625, 1.0170623779296875, 1.017565185546875, 1.01713818359375, 1.017654296875, 1.0177515258789063, 1.0174115600585938, 1.0176266479492186, 1.01768603515625, 1.0180618286132812, 1.0176409301757812, 1.01743408203125, 1.0178948974609374, 1.0173562622070313, 1.0175979614257813, 1.0175078125, 1.0172713012695314, 1.0172344360351562, 1.0179164428710938, 1.0178744506835937, 1.0179573974609375, 1.01726513671875, 1.017871337890625, 1.0172272338867188, 1.017660400390625, 1.01722314453125, 1.0177474365234376, 1.0172640991210937, 1.0182471923828125, 1.0183259887695313, 1.0176481323242188, 1.0172948608398438, 1.0174535522460937, 2.119248779296875, 1.0171259155273438, 1.0173378295898436, 1.0171361083984376, 1.0176768188476562, 1.0173480834960937, 1.0173060913085938, 1.0175313720703125, 1.0172876586914064, 1.0169313354492187, 1.0169517822265626, 1.0170767211914062, 1.016911865234375, 1.0168883056640625, 1.0176091918945311, 1.0169405517578125, 1.0172006225585937, 1.0173839111328125, 1.0174832763671875, 1.0169149169921874, 1.0174115600585938, 1.017101318359375, 1.0171627807617187, 1.0175191040039062, 1.0177177734375, 1.0175150146484375, 1.0177362060546875, 1.0174299926757813, 1.0178037719726563, 1.0174238891601564, 1.0178887939453125, 1.0173532104492187, 1.0172507934570312, 1.01740234375, 1.0190120849609374, 1.0181201782226563, 1.018503173828125, 1.0183167724609374, 1.0181693725585939, 1.0181580810546875, 1.0182164306640624, 1.0177146606445313, 1.0178099365234374, 1.0184202270507812, 1.0194851684570312, 1.019615234375, 1.0187745361328124, 1.018102783203125, 1.0178682861328125, 1.01852978515625, 1.018186767578125, 1.017744384765625, 1.0179573974609375, 1.0179799194335937, 1.0177484741210938, 1.01732861328125, 1.0178109741210937, 1.0174668579101562, 1.017786376953125, 1.017491455078125, 1.0179041137695313, 1.0180321044921874, 1.0186588134765624]",tokens/s,0.9677007934294523,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1674.596352,5516.034048,0.0,4869.586944,4743.593472,s,10,6.137113647460938,0.6137113647460938,0.0032895717697753717,0.6129547424316406,0.6153997436523437,0.6190885131835938,0.6220395288085938,"[0.6227772827148438, 0.6144583129882812, 0.61261669921875, 0.61060546875, 0.612577392578125, 0.6105811157226563, 0.6121093139648438, 0.6145800170898438, 0.6132927856445313, 0.6135152587890625]",tokens/s,417.13420136176387,kWh,7.2172448039054885e-06,3.954746732982612e-06,3.355963469082604e-05,4.473162622771414e-05,tokens/kWh,5723020.189268045,MB,1674.596352,5516.034048,0.0,4869.586944,4769.651712,s,10,361.12618359375006,36.112618359375006,0.009999043540991457,36.112980468749996,36.125964453125,36.127720507812505,36.1291253515625,"[36.1294765625, 36.0964296875, 36.12557421875, 36.1174140625, 36.11404296875, 36.1114296875, 36.11191796875, 36.09769140625, 36.10714453125, 36.1150625]",tokens/s,1.7445425688343892,kWh,0.00042614284929302004,0.0002335616249413365,0.0019425026683799653,0.0026022071426143214,tokens/kWh,24210.217153084406,,s,629,366.0647033081055,0.5819788605852233,0.07282065333686359,0.5730969848632812,0.5741784912109374,0.5745491943359375,1.1854891552734375,"[0.5736980590820312, 0.573138916015625, 0.5735465087890625, 0.574129150390625, 0.5737092895507813, 0.57447216796875, 0.5745264892578125, 0.5731635131835937, 0.5737205810546875, 0.5735956420898437, 0.5749248046875, 0.5725880126953125, 0.5736376342773437, 0.57371337890625, 0.573749267578125, 0.5733990478515625, 0.5727344360351563, 0.57394482421875, 0.57497705078125, 0.5740851440429687, 0.5741229858398438, 0.5742008056640625, 0.5734738159179688, 0.574509033203125, 0.5738157958984375, 0.573897705078125, 0.5732402954101562, 0.5745704956054688, 0.5727620849609375, 0.5734061889648437, 0.5746319580078125, 0.5745643310546875, 0.5739735107421875, 0.5731901245117188, 0.5725511474609375, 0.5742970581054687, 0.5729157104492187, 0.572837890625, 0.57303857421875, 0.5725552368164063, 0.5740175170898437, 0.5737943115234375, 0.57314404296875, 0.5743441772460938, 0.5747333374023438, 0.5747230834960938, 0.5726239013671875, 0.5742520141601563, 0.5729638671875, 0.5725716552734375, 0.572348388671875, 0.572031005859375, 0.5730672607421875, 0.5724190673828125, 0.572590087890625, 0.5723648071289062, 0.5724487915039063, 0.572526611328125, 0.5727549438476562, 0.57301708984375, 0.5722869873046875, 0.5721016235351563, 1.18763623046875, 0.5728788452148438, 0.5735679931640625, 0.5728031005859375, 0.5727078247070313, 0.5732781982421875, 0.5730969848632812, 0.5728460693359375, 0.57259521484375, 0.5722409057617187, 0.57280615234375, 0.57242724609375, 0.57225830078125, 0.5726760864257813, 0.572142578125, 0.5723013305664062, 0.5727958984375, 0.5730846557617187, 0.572316650390625, 0.5732341918945313, 0.57354443359375, 0.5724467163085938, 0.5723648071289062, 0.5720924072265625, 0.5729249267578125, 0.5723299560546875, 0.5724815063476563, 0.572590087890625, 0.5723525390625, 0.5726453857421875, 0.572458984375, 0.5725296630859374, 0.5720729370117188, 0.572706787109375, 0.5740257568359375, 0.572758056640625, 0.5724251708984375, 0.5737236328125, 0.57303857421875, 0.5752218017578125, 0.5730856323242187, 0.57284814453125, 0.5734194946289063, 0.5729658813476562, 0.5726095581054688, 0.5726730346679687, 0.5732310791015625, 0.5731287231445312, 0.5729985961914063, 0.572822509765625, 0.5728573608398437, 0.5731840209960938, 0.5744937133789062, 0.5729187622070312, 0.5740841064453125, 0.5739561157226563, 0.5734850463867187, 0.5729812622070313, 0.5727211303710937, 0.5733560180664062, 0.5732577514648437, 0.5737369384765625, 0.5736038208007812, 1.18655078125, 0.5729157104492187, 0.5731143798828126, 0.5728522338867188, 0.5734451293945313, 0.5739530029296875, 0.572958740234375, 0.572759033203125, 0.5748234252929687, 0.57375537109375, 0.573359130859375, 0.5733130493164063, 0.5738352661132813, 0.57318603515625, 0.5733314819335937, 0.5734747924804687, 0.5750180053710937, 0.5730693359375, 0.5736345825195313, 0.573718505859375, 0.57335498046875, 0.5729044189453125, 0.5738772583007813, 0.5738352661132813, 0.5730682983398437, 0.5731051635742187, 0.5734297485351563, 0.5730928344726562, 0.5731317749023438, 0.5730283813476562, 0.573849609375, 0.57297509765625, 0.5731461181640625, 0.5727354736328125, 0.5730130004882813, 0.5731461181640625, 0.5728123168945313, 0.5729003295898437, 0.5728778076171875, 0.5738741455078125, 0.5740830688476563, 0.572737548828125, 0.5726536254882812, 0.5742734985351563, 0.5730897827148438, 0.5729822998046875, 0.5732136840820312, 0.5756242065429688, 0.5729976196289063, 0.5738700561523438, 0.573106201171875, 0.573149169921875, 0.5729013671875, 0.573991943359375, 0.573201416015625, 0.5733877563476563, 0.5732608032226563, 0.5743902587890625, 0.5742141723632812, 0.5728829345703125, 0.5743646850585937, 0.5734010620117187, 0.573259765625, 1.1840142822265625, 0.5737728271484375, 0.5731215209960937, 0.5732106323242188, 0.5732505493164063, 0.572969970703125, 0.573033447265625, 0.57314306640625, 0.5731942138671875, 0.5728460693359375, 0.5744957275390625, 0.5742643432617187, 0.5740144653320313, 0.5731768798828125, 0.5741864624023437, 0.5734174194335937, 0.573322265625, 0.5733375854492188, 0.5739192504882813, 0.5734788818359375, 0.5737677001953125, 0.5734850463867187, 0.5734430541992187, 0.5732044677734375, 0.5726986083984374, 0.572821533203125, 0.5730499267578125, 0.5735310668945313, 0.574656494140625, 0.5733201904296875, 0.5725225219726563, 0.5739570922851562, 0.5737728271484375, 0.57303857421875, 0.5733232421875, 0.5734522705078124, 0.5738311767578125, 0.575151123046875, 0.5733201904296875, 0.5735167846679687, 0.5731133422851562, 0.5729003295898437, 0.5728235473632812, 0.573318115234375, 0.5730816040039063, 0.5742418212890625, 0.573048828125, 0.5728880615234375, 0.572990478515625, 0.5729679565429687, 0.5725675659179688, 0.5726566162109376, 0.5728235473632812, 0.5745910034179688, 0.5722357788085938, 0.5722327270507812, 0.572821533203125, 0.5726668701171875, 0.5724385375976563, 0.5728890991210938, 0.5728051147460937, 0.5724405517578125, 0.5733775634765625, 1.1864791259765626, 0.5730631713867187, 0.5741793212890625, 0.574244873046875, 0.5729197998046875, 0.5730785522460937, 0.5742643432617187, 0.5736028442382812, 0.5729208374023438, 0.5731768188476563, 0.573612060546875, 0.5728307495117188, 0.5726996459960938, 0.57314306640625, 0.5741906127929688, 0.572669921875, 0.5738731689453125, 0.5743124389648437, 0.5728778076171875, 0.5729352416992187, 0.574119873046875, 0.5733079223632812, 0.5724866333007812, 0.572231689453125, 0.5732946166992188, 0.5724682006835937, 0.5730529174804687, 0.5733519287109375, 0.573106201171875, 0.5727999877929687, 0.57236376953125, 0.5724620971679687, 0.5725665893554688, 0.5737697143554688, 0.5733406372070312, 0.573254638671875, 0.5726945190429688, 0.5743114013671875, 0.5735679931640625, 0.5726617431640625, 0.5724610595703125, 0.57396533203125, 0.5732372436523437, 0.5741332397460938, 0.5733570556640625, 0.5733375854492188, 0.572717041015625, 0.572859375, 0.5731901245117188, 0.572558349609375, 0.5723750610351562, 0.5745213623046875, 0.573633544921875, 0.5729638671875, 0.573412353515625, 0.5735505981445312, 0.5726494750976563, 0.5753558959960937, 0.5728399658203125, 0.573432861328125, 0.5723391723632812, 0.5729290161132813, 0.572626953125, 1.1842232666015624, 0.5728092041015626, 0.572779541015625, 0.572632080078125, 0.5737963256835937, 0.5736775512695312, 0.5726033935546875, 0.572564453125, 0.5733416748046875, 0.5725654907226563, 0.5723832397460937, 0.5723812255859375, 0.5731624755859375, 0.5723832397460937, 0.5729197998046875, 0.572788818359375, 0.5723237915039062, 0.5723801879882813, 0.5726771850585938, 0.5727251586914063, 0.5726986083984374, 0.57333349609375, 0.5739939575195312, 0.5731481323242188, 0.5727344360351563, 0.5737195434570312, 0.5727918090820312, 0.5725234985351563, 0.5729740600585937, 0.5731522827148438, 0.5736734619140625, 0.5729310913085938, 0.5734471435546875, 0.573127685546875, 0.5729720458984375, 0.5730723876953125, 0.5730529174804687, 0.5730682983398437, 0.5760235595703125, 0.5742151489257813, 0.573750244140625, 0.5731143798828126, 0.573844482421875, 0.5748480224609375, 0.5746339721679687, 0.5739223022460938, 0.5732044677734375, 0.5739735107421875, 0.5730263061523437, 0.5728583984375, 0.5729924926757812, 0.5730785522460937, 0.5734502563476562, 0.5732157592773437, 0.573191162109375, 0.5728983154296875, 0.5738905639648437, 0.5733457641601563, 0.57269873046875, 0.572650390625, 0.5734747924804687, 0.5728338012695312, 0.5733191528320313, 1.1859814453125, 0.57253271484375, 0.5722654418945312, 0.5727242431640625, 0.5725839233398438, 0.5722265625, 0.572601318359375, 0.5729863891601562, 0.572706787109375, 0.5726239013671875, 0.5739888916015625, 0.5729249267578125, 0.573317138671875, 0.5733673095703125, 0.5743882446289063, 0.5732106323242188, 0.5734819946289063, 0.5727047729492187, 0.5730140380859375, 0.572675048828125, 0.5729136352539063, 0.5729924926757812, 0.5723709716796875, 0.5744793701171875, 0.57339599609375, 0.5728604125976563, 0.5722327270507812, 0.5737615356445313, 0.5753026733398438, 0.5738475341796875, 0.5734512939453125, 0.5752913818359375, 0.5733673095703125, 0.5729085693359375, 0.5732177734375, 0.573886474609375, 0.5727651977539062, 0.57282763671875, 0.57284814453125, 0.5731399536132813, 0.5727467651367187, 0.5733314819335937, 0.5736110229492187, 0.5734830322265625, 0.5729003295898437, 0.574867431640625, 0.5740697631835937, 0.572969970703125, 0.5728942260742188, 0.5735751342773437, 0.5731563720703124, 0.5731113891601562, 0.5729085083007812, 0.5745828247070313, 0.5727620849609375, 0.5729269409179687, 0.5724876708984376, 0.573665283203125, 0.5732260131835938, 0.5726268920898437, 0.57246923828125, 0.572621826171875, 0.5729013671875, 1.1888896484375, 0.57364990234375, 0.5738916015625, 0.5726515502929688, 0.5726239013671875, 0.5722838745117188, 0.5730723876953125, 0.573032470703125, 0.5727662353515625, 0.5732413330078125, 0.5728880615234375, 0.5727396240234375, 0.5731368408203125, 0.573233154296875, 0.5731963500976562, 0.5735249633789062, 0.5744219970703125, 0.5727999877929687, 0.5726064453125, 0.5729290771484375, 0.5737103271484375, 0.572811279296875, 0.5732679443359375, 0.5726730346679687, 0.5734287109375, 0.5722675170898438, 0.5725828857421875, 0.5725849609375, 0.5723627319335938, 0.5727416381835938, 0.5728348388671874, 0.5723678588867187, 0.5728604125976563, 0.573675537109375, 0.57333349609375, 0.5725634765625, 0.5726546020507812, 0.5731399536132813, 0.5728031005859375, 0.5724630737304688, 0.57310205078125, 0.5734573974609375, 0.5732689819335938, 0.5729290161132813, 0.5729782104492187, 0.5726064453125, 0.572527587890625, 0.5728818969726562, 0.5733007202148438, 0.5730426635742187, 0.5736365966796875, 0.5730549926757813, 0.573111328125, 0.5725552368164063, 0.5726505126953125, 0.5732802734375, 0.5726494750976563, 0.5724876708984376, 0.5733058471679687, 0.5727211303710937, 0.5725931396484375, 0.5726648559570312, 0.5728655395507812, 1.1863818359375, 0.5727938842773438, 0.5733508911132813, 0.5740676879882812, 0.574234619140625, 0.5727047729492187, 0.5733447875976563, 0.5732515869140625, 0.5743012084960938, 0.5728123168945313, 0.5730969848632812, 0.5729976196289063, 0.5739694213867188, 0.5728798828125, 0.5730140380859375, 0.5724334106445312, 0.5727744140625, 0.5740390625, 0.5736099853515625, 0.5726300048828125, 0.572969970703125, 0.5736683349609375, 0.572948486328125, 0.5725542602539062, 0.5725962524414062, 0.5727139892578125, 0.573559814453125, 0.572568603515625, 0.5725419311523438, 0.5730426635742187, 0.5727252197265625, 0.5726781616210938, 0.5734676513671875, 0.573179931640625, 0.5726351318359375, 0.573233154296875, 0.5737062377929687, 0.5729423217773437, 0.5729710083007813, 0.5735885009765626, 0.5732741088867187, 0.5730344848632812, 0.5727262573242188, 0.5732567138671875, 0.5732689819335938, 0.5727232055664062, 0.5727559814453125, 0.5730703125, 0.5724334716796875, 0.5731378784179687, 0.5730191650390625, 0.572896240234375, 0.5732177734375, 0.5728727416992188, 0.5729361572265625, 0.5730979614257813, 0.574118896484375, 0.573497314453125, 0.5732976684570312, 0.5737840576171875, 0.5730130004882813, 0.5730549926757813, 0.572958740234375, 1.187092529296875, 0.5724129028320313, 0.5726473999023437, 0.57280615234375, 0.572416015625, 0.5723361206054688, 0.572821533203125, 0.5732761840820313, 0.5727733764648437, 0.573095947265625, 0.5734052124023438, 0.573053955078125, 0.572674072265625, 0.5729341430664062, 0.5734830322265625, 0.573849609375, 0.5724999389648437, 0.5727334594726563, 0.5741107177734375, 0.5736663208007813, 0.57402978515625, 0.5733611450195313, 0.572958740234375, 0.5730099487304687, 0.5733345336914063, 0.5739038696289063, 0.575910888671875, 0.574392333984375, 0.5739898681640625, 0.5732730712890625, 0.5733109741210938, 0.5730928344726562, 0.5741782836914062, 0.5731993408203125, 0.57302734375, 0.5732771606445313, 0.572506103515625, 0.5729556274414063, 0.5733088989257813, 0.572968994140625, 0.5736283569335937, 0.5738291015625, 0.5733499145507812, 0.5729085693359375, 0.573539306640625, 0.5743226928710937, 0.5736959838867187, 0.5733365478515625, 0.57354443359375, 0.5733949584960938, 0.5731727294921874, 0.5731215209960937, 0.5734061889648437, 0.5725133056640626, 0.5726607055664062, 0.5729464111328125, 0.5725101928710937, 0.57212109375, 0.5734563598632813, 0.5726597290039063, 0.5727313842773437, 0.572416015625, 0.5735731201171875]",tokens/s,1.7182754696526694,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,2082.578432,5566.365696,0.0,4919.918592,4635.53792,s,10,5.0974686889648435,0.5097468688964844,0.002523113409812242,0.5091204681396484,0.5118887878417968,0.5140431793212891,0.5157666925048828,"[0.5161975708007812, 0.508827880859375, 0.5094269104003907, 0.508350830078125, 0.5081865539550782, 0.5073575439453125, 0.5094130554199219, 0.5110750427246094, 0.5072232666015625, 0.5114100341796874]",tokens/s,502.2100489880333,kWh,5.994651640454928e-06,3.28480666631549e-06,2.7987258500894452e-05,3.726671680766487e-05,tokens/kWh,6869400.417569034,MB,2082.578432,5566.365696,0.0,4919.918592,4794.464768,s,10,295.3985,29.53985,0.004666201106872227,29.538681640625,29.5469361328125,29.54772978515625,29.548364707031247,"[29.534837890625, 29.546759765625, 29.542654296875, 29.540826171875, 29.5363359375, 29.535224609375, 29.54083984375, 29.5359609375, 29.5485234375, 29.536537109375]",tokens/s,2.132712251416307,kWh,0.0003485870901164082,0.0001910528620806053,0.0016025825737317144,0.002142222525928728,tokens/kWh,29408.709523623045,,s,629,299.5096441345215,0.47616795569876236,0.06052059742525611,0.46878207397460936,0.46948126220703124,0.4697759826660156,0.977483603515625,"[0.4688783264160156, 0.46847384643554685, 0.46878515625, 0.4694343566894531, 0.4695541687011719, 0.46879034423828125, 0.4688465270996094, 0.46848306274414064, 0.46828851318359377, 0.4686806945800781, 0.46836428833007815, 0.4685475769042969, 0.4686540832519531, 0.46948455810546874, 0.4689162292480469, 0.46845745849609377, 0.4684011535644531, 0.4683714599609375, 0.4684134521484375, 0.46870220947265623, 0.468790283203125, 0.4682076110839844, 0.4684062805175781, 0.46837042236328125, 0.469607421875, 0.46908721923828123, 0.4688885803222656, 0.4687656860351562, 0.4693012390136719, 0.4686929931640625, 0.46864382934570314, 0.468430908203125, 0.4682413330078125, 0.4685291442871094, 0.46904525756835935, 0.4706498413085938, 0.46837042236328125, 0.4685875244140625, 0.4687718505859375, 0.4685537414550781, 0.46882406616210937, 0.46856805419921876, 0.4686806945800781, 0.46866943359375, 0.4686581726074219, 0.4682403869628906, 0.4695777893066406, 0.4685884704589844, 0.4686663818359375, 0.4685537414550781, 0.46855987548828126, 0.4685066223144531, 0.4687831115722656, 0.46864794921875, 0.4684666748046875, 0.4691046447753906, 0.46919476318359377, 0.4684984436035156, 0.4691128234863281, 0.4691221008300781, 0.46927557373046874, 0.46959002685546875, 0.977623046875, 0.46814208984375, 0.4683407287597656, 0.46863873291015623, 0.46877490234375, 0.46880459594726565, 0.4687513732910156, 0.46857113647460935, 0.468421630859375, 0.46894284057617186, 0.4688281555175781, 0.46850765991210935, 0.4687431640625, 0.4691404724121094, 0.4699504699707031, 0.46978561401367186, 0.46937701416015626, 0.4690975341796875, 0.4691311950683594, 0.47030169677734374, 0.4690933837890625, 0.4689039306640625, 0.4690298767089844, 0.46860595703125, 0.4687359924316406, 0.4692275085449219, 0.46936166381835936, 0.46901248168945314, 0.46843902587890623, 0.46878106689453125, 0.46938113403320314, 0.46973849487304686, 0.46935861206054685, 0.4687390441894531, 0.4685845031738281, 0.46890591430664064, 0.46899813842773436, 0.4688281555175781, 0.4687083435058594, 0.4687564697265625, 0.4687308654785156, 0.4687615966796875, 0.4689858703613281, 0.4688762817382813, 0.46893264770507814, 0.46929302978515625, 0.46915994262695315, 0.4696995849609375, 0.4688701477050781, 0.4687912902832031, 0.4691885986328125, 0.46969558715820314, 0.4696114196777344, 0.4692490234375, 0.46899301147460937, 0.46896435546875, 0.4694558715820312, 0.46925927734375, 0.4685393981933594, 0.4686530456542969, 0.46877902221679685, 0.4687155151367188, 0.46934527587890623, 0.9787330322265625, 0.468917236328125, 0.46886502075195313, 0.46889984130859375, 0.4691875915527344, 0.4684267578125, 0.46861004638671877, 0.46856500244140625, 0.46833560180664063, 0.4684933166503906, 0.4683735046386719, 0.4683263854980469, 0.4683735046386719, 0.468236328125, 0.46872265625, 0.4684236755371094, 0.46842572021484374, 0.46854452514648437, 0.46859982299804687, 0.4687155151367188, 0.46844732666015626, 0.4685188293457031, 0.4686991577148438, 0.46980194091796873, 0.46855987548828126, 0.4690032653808594, 0.46884454345703125, 0.4687575073242187, 0.4688609313964844, 0.4688558044433594, 0.4689459228515625, 0.46866329956054686, 0.4685557861328125, 0.46875238037109374, 0.4686827392578125, 0.46909747314453126, 0.46889984130859375, 0.4689756164550781, 0.46924798583984373, 0.46901144409179685, 0.4689469299316406, 0.46897048950195314, 0.46984909057617186, 0.4700190734863281, 0.4696781005859375, 0.469550048828125, 0.4697763977050781, 0.47159500122070314, 0.46917837524414063, 0.4684482421875, 0.4692193298339844, 0.46906060791015625, 0.4689141845703125, 0.46894284057617186, 0.468885498046875, 0.469064697265625, 0.46948043823242186, 0.4687861633300781, 0.4690513916015625, 0.46873907470703124, 0.46875341796875, 0.4690708618164062, 0.46917938232421874, 0.9775615844726563, 0.46874624633789064, 0.46858853149414065, 0.4686796875, 0.46987161254882814, 0.46889370727539065, 0.4687656860351562, 0.468790283203125, 0.4687278137207031, 0.46867864990234376, 0.4685322265625, 0.46837454223632813, 0.4686530456542969, 0.46875955200195313, 0.46893466186523436, 0.468706298828125, 0.4689858703613281, 0.46853018188476564, 0.46881585693359373, 0.46894796752929685, 0.4691353454589844, 0.46871038818359373, 0.46851071166992186, 0.468632568359375, 0.46843902587890623, 0.46874725341796875, 0.468890625, 0.4688670654296875, 0.46844723510742187, 0.468490234375, 0.4689090576171875, 0.46941900634765626, 0.46909030151367187, 0.4689100952148437, 0.4687175598144531, 0.46938113403320314, 0.4690616455078125, 0.4687718505859375, 0.46904730224609376, 0.4689336242675781, 0.4687933349609375, 0.46920501708984375, 0.46855471801757814, 0.4687718505859375, 0.47131646728515625, 0.4688424987792969, 0.46912005615234376, 0.4692090148925781, 0.4688189392089844, 0.4685926513671875, 0.4688619384765625, 0.46930743408203124, 0.4693247680664063, 0.4690636901855469, 0.46892135620117187, 0.4687145690917969, 0.4691373291015625, 0.4687145080566406, 0.4686673889160156, 0.46867352294921877, 0.46872677612304686, 0.4689254455566406, 0.46927871704101565, 0.9772830810546875, 0.46864077758789063, 0.4684922790527344, 0.46857421875, 0.46906878662109375, 0.4690033264160156, 0.46841543579101563, 0.4685895690917969, 0.46849639892578127, 0.468279296875, 0.46869912719726564, 0.4685619201660156, 0.4687575073242187, 0.46905242919921875, 0.46849432373046873, 0.46846875, 0.46872576904296875, 0.46877490234375, 0.4686592102050781, 0.4684892272949219, 0.4687974548339844, 0.46863565063476564, 0.4685844421386719, 0.46846157836914065, 0.46825778198242185, 0.4684994506835938, 0.4686448669433594, 0.46830081176757815, 0.46856805419921876, 0.4685209655761719, 0.46874929809570315, 0.4689264526367187, 0.46845541381835937, 0.4686520385742188, 0.46867770385742186, 0.46937490844726565, 0.46933709716796873, 0.4687554626464844, 0.4690616455078125, 0.46915789794921875, 0.46891213989257813, 0.46888754272460936, 0.4687278137207031, 0.46915277099609376, 0.4688332824707031, 0.4693790588378906, 0.46926437377929686, 0.46878515625, 0.4690831298828125, 0.46895718383789065, 0.4689776611328125, 0.4690616455078125, 0.46907391357421874, 0.46851788330078126, 0.46869195556640625, 0.4686376953125, 0.46900634765625, 0.4722012023925781, 0.4687718505859375, 0.4688670654296875, 0.46880459594726565, 0.468642822265625, 0.4693155822753906, 0.9763594360351563, 0.46846771240234375, 0.46886502075195313, 0.4687083740234375, 0.4689837646484375, 0.4684646301269531, 0.4687247314453125, 0.46836224365234375, 0.4683929748535156, 0.4685619201660156, 0.46836737060546874, 0.4687503356933594, 0.46857830810546874, 0.46878411865234376, 0.4683345947265625, 0.468358154296875, 0.46845849609375, 0.4687032470703125, 0.4686520385742188, 0.4686458740234375, 0.46885784912109374, 0.46872677612304686, 0.4685823974609375, 0.46873095703125, 0.4685475158691406, 0.468790283203125, 0.46927053833007815, 0.46861822509765627, 0.46847589111328125, 0.46836224365234375, 0.470181884765625, 0.4690575256347656, 0.4688353271484375, 0.4692449340820313, 0.4685137939453125, 0.46857217407226565, 0.46875238037109374, 0.46873190307617185, 0.4689664001464844, 0.4691271667480469, 0.4686090087890625, 0.4692101135253906, 0.4687974548339844, 0.469317626953125, 0.4686315612792969, 0.46899917602539065, 0.4692490234375, 0.4690309143066406, 0.4687646789550781, 0.46850253295898436, 0.4687083435058594, 0.46883941650390626, 0.46927871704101565, 0.4686152038574219, 0.4694292297363281, 0.4691435546875, 0.46974566650390626, 0.4686940307617187, 0.468969482421875, 0.46890188598632815, 0.46895001220703125, 0.4687564697265625, 0.46904730224609376, 0.9786060791015625, 0.46841650390625, 0.4683786315917969, 0.46915994262695315, 0.46917938232421874, 0.4692777099609375, 0.46883636474609375, 0.468716552734375, 0.46857318115234375, 0.4691128234863281, 0.468600830078125, 0.4694783935546875, 0.46952960205078126, 0.4696739807128906, 0.4687196044921875, 0.46852505493164065, 0.46841854858398435, 0.4683591613769531, 0.4683100280761719, 0.4684431457519531, 0.4690411376953125, 0.4687923278808594, 0.4687216491699219, 0.46836224365234375, 0.4686090087890625, 0.4691363830566406, 0.46857113647460935, 0.46862335205078126, 0.46869094848632814, 0.4686581726074219, 0.4694435729980469, 0.4694640502929687, 0.46918450927734373, 0.46924288940429687, 0.4694343566894531, 0.4690462646484375, 0.469317626953125, 0.46890188598632815, 0.46938323974609375, 0.4693267822265625, 0.46866329956054686, 0.4690370483398438, 0.468864013671875, 0.4687575073242187, 0.4686438598632813, 0.46876776123046876, 0.46984698486328125, 0.4690380859375, 0.46935552978515627, 0.46853836059570314, 0.46839910888671876, 0.46874008178710935, 0.46875955200195313, 0.4687667236328125, 0.4688209838867187, 0.4691302490234375, 0.46921624755859376, 0.4689674377441406, 0.46906777954101564, 0.46867864990234376, 0.46859161376953123, 0.46864794921875, 0.4688332824707031, 0.980326416015625, 0.468701171875, 0.46863360595703124, 0.468738037109375, 0.46885989379882814, 0.4684267578125, 0.468389892578125, 0.46853839111328127, 0.46828131103515624, 0.4683601989746094, 0.46820352172851565, 0.467962890625, 0.4681553955078125, 0.4686499938964844, 0.4686612548828125, 0.4687503356933594, 0.46850253295898436, 0.4680570983886719, 0.4683458557128906, 0.46834994506835936, 0.46889471435546876, 0.46862130737304686, 0.4687196044921875, 0.46831512451171875, 0.4685035400390625, 0.4694077453613281, 0.4687421569824219, 0.4688087158203125, 0.4701829528808594, 0.4684830322265625, 0.4688619384765625, 0.4692244567871094, 0.46869094848632814, 0.4689715270996094, 0.46905548095703126, 0.4686315612792969, 0.468864013671875, 0.4689029235839844, 0.46895001220703125, 0.46892340087890627, 0.46898687744140627, 0.469212158203125, 0.46885272216796875, 0.4688230285644531, 0.46921624755859376, 0.46866943359375, 0.46926849365234374, 0.46906777954101564, 0.469248046875, 0.4691763000488281, 0.46952960205078126, 0.4694057006835938, 0.46893157958984377, 0.4688035888671875, 0.4690667419433594, 0.4687615966796875, 0.4690288696289063, 0.46915890502929686, 0.46848818969726563, 0.4687585144042969, 0.4686315612792969, 0.4686049194335937, 0.46867352294921877, 0.9793843383789063, 0.4697733154296875, 0.46955825805664064, 0.46915890502929686, 0.46897869873046877, 0.4683345947265625, 0.46859982299804687, 0.468316162109375, 0.46864694213867186, 0.4686110534667969, 0.46845745849609377, 0.46816357421875, 0.4684892272949219, 0.46949169921875, 0.46866842651367185, 0.46861312866210936, 0.4686090087890625, 0.4685424499511719, 0.46886605834960937, 0.4688752746582031, 0.46871353149414063, 0.4691127624511719, 0.46852615356445315, 0.46842361450195313, 0.46897665405273437, 0.4689141845703125, 0.4685926513671875, 0.46885989379882814, 0.468421630859375, 0.4683888549804687, 0.46856600952148436, 0.4689182739257812, 0.46858547973632814, 0.46870220947265623, 0.4691896362304688, 0.46975283813476565, 0.46981631469726565, 0.4697753601074219, 0.470096923828125, 0.469834716796875, 0.46959820556640625, 0.4698542175292969, 0.46952243041992187, 0.4697272338867188, 0.4697907104492188, 0.4687656860351562, 0.4691937255859375, 0.46966680908203123, 0.46944461059570314, 0.46914764404296877, 0.469760009765625, 0.4693544921875, 0.46949993896484377, 0.4696063537597656, 0.46877694702148437, 0.4688281555175781, 0.4692244567871094, 0.468706298828125, 0.4684646301269531, 0.46868582153320315, 0.4686725158691406, 0.46858547973632814, 0.4686612548828125, 0.9803509521484375, 0.46829159545898436, 0.4681697387695313, 0.4690083923339844, 0.46880972290039064, 0.46866329956054686, 0.4685547485351563, 0.46837042236328125, 0.468105224609375, 0.4683243713378906, 0.46842364501953127, 0.4681779174804688, 0.4683816833496094, 0.4685599365234375, 0.4682454528808594, 0.46819635009765626, 0.46841036987304685, 0.46834994506835936, 0.468537353515625, 0.46845849609375, 0.46887115478515623, 0.46867352294921877, 0.46852197265625, 0.46868377685546875, 0.46900018310546876, 0.46878207397460936, 0.4686499938964844, 0.4685537414550781, 0.46879437255859374, 0.4684031982421875, 0.4687779846191406, 0.4690032653808594, 0.4684912719726563, 0.46856298828125, 0.46899917602539065, 0.4696033020019531, 0.4696708984375, 0.469855224609375, 0.4690411376953125, 0.468674560546875, 0.46875442504882814, 0.469359619140625, 0.4701552734375, 0.469064697265625, 0.46861312866210936, 0.4686315612792969, 0.4697047119140625, 0.4690083923339844, 0.46888754272460936, 0.46868582153320315, 0.469073974609375, 0.4693411254882813, 0.4695132141113281, 0.46920089721679686, 0.46941696166992186, 0.46919271850585936, 0.46985626220703125, 0.46913946533203127, 0.46915789794921875, 0.4686253967285156, 0.46875955200195313, 0.468642822265625, 0.4691517333984375]",tokens/s,2.1000993200656053,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1460.260864,1709.703168,0.0,1063.256064,942.605312,s,10,0.8792122802734375,0.08792122802734374,0.0017793873831683257,0.08834756851196289,0.08902708129882812,0.09042998733520508,0.09155231216430663,"[0.09183289337158203, 0.08871532440185546, 0.08646198272705079, 0.08839430236816406, 0.08854621124267578, 0.08836022186279296, 0.08833491516113282, 0.08542150115966797, 0.08774205017089844, 0.08540287780761718]",tokens/s,2911.6972742962976,kWh,1.0152025072210082e-06,5.56282725791299e-07,2.6184398915211728e-06,4.1899251245334795e-06,tokens/kWh,61098943.869194776,MB,1460.588544,1709.703168,0.0,1063.256064,942.607872,s,10,54.0859736328125,5.40859736328125,0.07705683066534309,5.45104150390625,5.477568017578125,5.481690747070313,5.484988930664063,"[5.4858134765625, 5.457708984375, 5.47665185546875, 5.46595458984375, 5.472109375, 5.4443740234375, 5.39297412109375, 5.29275341796875, 5.29816357421875, 5.29947021484375]",tokens/s,11.648121641981426,kWh,6.466018401433034e-05,3.543631727276219e-05,0.00015379821301947998,0.0002538947143065725,tokens/kWh,248134.35038245353,,s,629,54.78888442230225,0.08710474470954252,0.010581359575427846,0.08675225830078125,0.08761917724609375,0.0884242431640625,0.17260260864257815,"[0.08388607788085937, 0.08398233795166016, 0.08383897399902343, 0.08367718505859376, 0.08453632354736328, 0.0890890884399414, 0.08862201690673828, 0.08834764862060547, 0.08846745300292969, 0.08839679718017578, 0.08905522918701173, 0.08837939453125, 0.08853196716308594, 0.08820326232910156, 0.08852684783935547, 0.08853094482421875, 0.08833126068115234, 0.0883599395751953, 0.08833229064941406, 0.08829747009277343, 0.08771071624755859, 0.08838553619384766, 0.08845721435546874, 0.08875212860107422, 0.08825856018066407, 0.08850534057617188, 0.08816230773925782, 0.08822886657714844, 0.08858009338378907, 0.0882339859008789, 0.08867430114746094, 0.08864665222167968, 0.08844902038574219, 0.08828108978271484, 0.0884295654296875, 0.08401407623291016, 0.08382259368896484, 0.0838635482788086, 0.08400077056884765, 0.08771788787841797, 0.0872069091796875, 0.08849203491210937, 0.08865280151367187, 0.08827187347412109, 0.08736563110351563, 0.08729804992675781, 0.08718950653076171, 0.08671231842041016, 0.08691506958007812, 0.08681779479980468, 0.0866529312133789, 0.086940673828125, 0.08705535888671875, 0.08424960327148437, 0.08400383758544921, 0.08397414398193359, 0.08412364959716796, 0.08525926208496094, 0.08683622741699219, 0.08680038452148438, 0.08671949005126953, 0.08692940521240235, 0.1763768310546875, 0.08659661102294922, 0.08731238555908204, 0.0868884506225586, 0.08696832275390624, 0.08851148986816407, 0.08714854431152344, 0.08598323059082032, 0.08636518096923829, 0.08685465240478515, 0.08704512023925781, 0.08686284637451172, 0.08745779418945313, 0.08453529357910156, 0.0869775390625, 0.08696627044677735, 0.08664575958251954, 0.0876072998046875, 0.08689766693115235, 0.08705535888671875, 0.08679936218261719, 0.0866550064086914, 0.08538006591796875, 0.08647270202636718, 0.08609996795654297, 0.08713215637207031, 0.08740147399902344, 0.08702365112304687, 0.08689250946044921, 0.08694169616699218, 0.08711577606201172, 0.08716902160644531, 0.08723149108886719, 0.0867747802734375, 0.08696012878417969, 0.08699494171142579, 0.0842239990234375, 0.08377855682373046, 0.08404684448242188, 0.08385945892333985, 0.08593510437011719, 0.08742301177978516, 0.08683209228515625, 0.08660889434814453, 0.0867952651977539, 0.08660377502441406, 0.08784178924560547, 0.08544153594970703, 0.08774246215820312, 0.08707891082763672, 0.08705331420898438, 0.08723865509033203, 0.08705843353271485, 0.08711577606201172, 0.08695193481445312, 0.08490188598632813, 0.08705741119384766, 0.08451789093017578, 0.08685260772705078, 0.08724787139892579, 0.08672563171386719, 0.08705638122558594, 0.08634572601318359, 0.1737533416748047, 0.08698675537109375, 0.08697344207763671, 0.08704000091552734, 0.08702982330322266, 0.08781715393066407, 0.08694579315185547, 0.08745369720458984, 0.08681785583496093, 0.08706249237060547, 0.08565961456298828, 0.0864000015258789, 0.0841502685546875, 0.08501248168945312, 0.08692018890380859, 0.08674201965332032, 0.0869222412109375, 0.08693862152099609, 0.0867041244506836, 0.08703385925292968, 0.08675225830078125, 0.086761474609375, 0.08619315338134766, 0.08686489868164063, 0.08680652618408204, 0.08746598052978516, 0.08694783782958984, 0.08730828857421875, 0.08752537536621094, 0.08693350219726563, 0.08705023956298828, 0.08706867218017578, 0.08747212982177735, 0.0880005111694336, 0.08732262420654296, 0.08604876708984376, 0.08707180786132812, 0.08670201873779297, 0.0873164825439453, 0.08682189178466797, 0.08709529876708984, 0.08716492462158203, 0.08606719970703125, 0.08672358703613281, 0.08690790557861328, 0.0867962875366211, 0.08702361297607422, 0.08684031677246094, 0.08695507049560547, 0.08671123504638673, 0.08706867218017578, 0.08863744354248047, 0.08740863800048829, 0.08729503631591796, 0.08682080078125, 0.08690380859375, 0.08818892669677734, 0.08729702758789062, 0.08700415802001953, 0.08678809356689453, 0.0868106231689453, 0.08693657684326171, 0.08690995025634765, 0.17623654174804687, 0.08711167907714844, 0.08703897857666015, 0.08722227478027343, 0.08726322937011718, 0.08703590393066406, 0.08675942230224609, 0.08699088287353515, 0.08680445098876953, 0.08698777770996094, 0.08604876708984376, 0.08687308502197266, 0.08715878295898437, 0.08707379150390625, 0.087108642578125, 0.08752022552490234, 0.08703385925292968, 0.08690995025634765, 0.0869939193725586, 0.08686182403564453, 0.08672665405273437, 0.08708710479736329, 0.086866943359375, 0.08725094604492188, 0.08706047821044922, 0.08675945281982422, 0.08477897644042968, 0.08724479675292969, 0.08646656036376953, 0.086866943359375, 0.08677273559570313, 0.08715058898925782, 0.08593714904785156, 0.08755712127685547, 0.08769945526123046, 0.08737586975097657, 0.0873512954711914, 0.08723763275146484, 0.08681779479980468, 0.08708710479736329, 0.08671129608154297, 0.08673894500732422, 0.08689766693115235, 0.0878828125, 0.08707987213134766, 0.08682710266113282, 0.08714435577392578, 0.08673996734619141, 0.08677581024169922, 0.08577126312255859, 0.08668160247802735, 0.08681574249267578, 0.0868136978149414, 0.08693555450439452, 0.08668364715576173, 0.0859504623413086, 0.08378470611572265, 0.08481996917724609, 0.08730931091308594, 0.08659455871582031, 0.08389631652832032, 0.08560025787353516, 0.08680038452148438, 0.17275801086425782, 0.08369971466064453, 0.08384204864501953, 0.08377139282226563, 0.08545689392089843, 0.08574976348876953, 0.08727859497070313, 0.08688025665283203, 0.0867799072265625, 0.08705741119384766, 0.08711580657958984, 0.08679933166503906, 0.0867962875366211, 0.08704307556152344, 0.08686386871337891, 0.08696320343017579, 0.08698880004882813, 0.08742403411865235, 0.0870973129272461, 0.08734207916259766, 0.08837529754638672, 0.08715980529785156, 0.08705023956298828, 0.086793212890625, 0.0869744644165039, 0.08696217346191407, 0.08680550384521485, 0.08711167907714844, 0.08749362945556641, 0.08701235198974609, 0.08706150054931641, 0.08685772705078125, 0.08689766693115235, 0.08696115112304688, 0.0865955810546875, 0.08719564819335937, 0.08694989013671875, 0.08730931091308594, 0.08871324920654297, 0.08721711730957031, 0.08766668701171874, 0.08723865509033203, 0.08547942352294922, 0.088416259765625, 0.08738614654541016, 0.08706454467773438, 0.08716083526611328, 0.08708815765380859, 0.08697238159179688, 0.08681676483154296, 0.08706559753417968, 0.0872499237060547, 0.08688639831542969, 0.08803123474121094, 0.0873512954711914, 0.08714649963378907, 0.08679936218261719, 0.08706047821044922, 0.08689254760742188, 0.08732466888427734, 0.0865843505859375, 0.08680239868164062, 0.08697548675537109, 0.1747763214111328, 0.08670310211181641, 0.08710451507568359, 0.08699903869628907, 0.08693657684326171, 0.08607334136962891, 0.0869969940185547, 0.08681574249267578, 0.08696627044677735, 0.08683827209472657, 0.08736153411865234, 0.08679219055175781, 0.08696012878417969, 0.086830078125, 0.0871352310180664, 0.08736870574951172, 0.08679424285888672, 0.08719462585449218, 0.08674508666992188, 0.08693862152099609, 0.08799231719970703, 0.08773529815673828, 0.08741171264648437, 0.08712397003173829, 0.08704512023925781, 0.08727654266357422, 0.08699903869628907, 0.08696729278564454, 0.0868853759765625, 0.08697344207763671, 0.08712499237060548, 0.08712499237060548, 0.0865771484375, 0.08749158477783203, 0.08688127899169922, 0.08696524810791016, 0.08687923431396484, 0.08711475372314453, 0.08697856140136719, 0.08686489868164063, 0.08696832275390624, 0.0843724822998047, 0.08361881256103515, 0.08333516693115234, 0.08410316467285156, 0.08653314971923828, 0.0870778579711914, 0.08714342498779297, 0.08421171569824219, 0.08394239807128906, 0.08393215942382813, 0.08404377746582031, 0.08362393951416015, 0.08348060607910156, 0.08413180541992188, 0.08405094146728516, 0.08691302490234375, 0.08714854431152344, 0.08712191772460938, 0.08708812713623047, 0.08714649963378907, 0.08697241973876953, 0.08708812713623047, 0.175857666015625, 0.08723865509033203, 0.08733695983886719, 0.08694886779785156, 0.08750080108642579, 0.08700927734375, 0.0869959716796875, 0.08687718200683593, 0.08966963195800781, 0.0872069091796875, 0.08702873229980469, 0.0869713897705078, 0.08688333129882812, 0.08693247985839844, 0.08705228424072266, 0.08724275207519532, 0.0872847671508789, 0.08704819488525391, 0.08684848022460938, 0.08682803344726563, 0.08715570831298829, 0.08688742065429687, 0.08504934692382812, 0.08777011108398437, 0.08640819549560547, 0.08897232055664063, 0.08616751861572265, 0.0871905288696289, 0.0890224609375, 0.0864686050415039, 0.08654438018798828, 0.08633036804199219, 0.08477593231201172, 0.08412876892089843, 0.08437452697753907, 0.08408985900878906, 0.08387686157226562, 0.08403353881835937, 0.08393318176269532, 0.08396185302734376, 0.08435302734375, 0.08403558349609375, 0.08378470611572265, 0.0841338882446289, 0.08378470611572265, 0.08367820739746094, 0.08367922973632813, 0.0835563507080078, 0.08376831817626954, 0.08383078765869141, 0.08380723571777343, 0.0837734375, 0.08563814544677735, 0.08487833404541016, 0.08407552337646484, 0.0837580795288086, 0.0836485137939453, 0.08376012420654297, 0.08358809661865234, 0.08362290954589843, 0.08371814727783203, 0.08357478332519531, 0.08497869110107421, 0.1696030731201172, 0.08372121429443359, 0.08371916961669922, 0.0837918701171875, 0.08386457824707032, 0.08376319885253906, 0.08395263671875, 0.08374784088134765, 0.08342527770996094, 0.08687206268310547, 0.08412262725830078, 0.08380416107177735, 0.08415436553955079, 0.0838440933227539, 0.08380316925048828, 0.08431715393066407, 0.08392499542236329, 0.08376319885253906, 0.08381747436523437, 0.08540467071533203, 0.08532991790771484, 0.08402022552490235, 0.084421630859375, 0.0841195526123047, 0.08383897399902343, 0.08380623626708984, 0.08381231689453125, 0.08393830108642578, 0.08379705810546875, 0.08394233703613281, 0.08370175933837891, 0.08405811309814454, 0.08380210876464844, 0.0837734375, 0.08362290954589843, 0.08380006408691407, 0.08357478332519531, 0.08359935760498047, 0.08378470611572265, 0.0834549789428711, 0.08362188720703125, 0.08369152069091797, 0.08387686157226562, 0.08376627349853516, 0.08377961730957031, 0.0838430404663086, 0.08387686157226562, 0.08553778839111328, 0.0837232666015625, 0.08371916961669922, 0.08374476623535156, 0.08370073699951172, 0.08382259368896484, 0.08384921264648437, 0.08401203155517578, 0.08357990264892579, 0.08365055847167968, 0.08378575897216797, 0.08367919921875, 0.08369055938720703, 0.0851127700805664, 0.08426700592041016, 0.08618905639648437, 0.1722030029296875, 0.08384819030761718, 0.08397721862792969, 0.0837570571899414, 0.08369664001464844, 0.0834549789428711, 0.08403865814208984, 0.08388505554199219, 0.08392908477783204, 0.08388198089599609, 0.08392704010009766, 0.08387379455566406, 0.0837027816772461, 0.08390860748291015, 0.08369254302978515, 0.08369766235351563, 0.08587161254882812, 0.0840478744506836, 0.08396288299560548, 0.08394649505615234, 0.08418099212646485, 0.08524288177490234, 0.08649318695068359, 0.0840816650390625, 0.08387174224853515, 0.08375603485107422, 0.08405811309814454, 0.08390758514404296, 0.0839393310546875, 0.08409497833251953, 0.08389836883544922, 0.08448000335693359, 0.08434893035888671, 0.08451583862304687, 0.0842639389038086, 0.08398745727539063, 0.08408370971679688, 0.08378880310058594, 0.08385740661621094, 0.08418406677246094, 0.08394035339355468, 0.08402124786376954, 0.08522137451171875, 0.08547840118408204, 0.08503705596923829, 0.08401510620117188, 0.08397516632080078, 0.08380723571777343, 0.08466022491455077, 0.0840079345703125, 0.08379193878173828, 0.08372525024414063, 0.08389631652832032, 0.0839393310546875, 0.08357071685791016, 0.08390652465820313, 0.08391577911376953, 0.0837550048828125, 0.08391474914550781, 0.08372633361816406, 0.08374476623535156, 0.08390758514404296, 0.0837550048828125, 0.1732474822998047, 0.08425472259521484, 0.0840273895263672, 0.08414924621582032, 0.08400179290771484, 0.08393830108642578, 0.08408268737792969, 0.08393727874755859, 0.08398745727539063, 0.08395468902587891, 0.08501862335205078, 0.08602214050292968, 0.08572313690185547, 0.08382463836669922, 0.08390962982177734, 0.08375296020507812, 0.08380928039550781, 0.0840263671875, 0.08383385467529297, 0.08391986846923828, 0.08388505554199219, 0.083666015625, 0.08356137847900391, 0.08396697235107421, 0.08387481689453125, 0.08371916961669922, 0.08389119720458985, 0.08397926330566406, 0.0838656005859375, 0.08394751739501953, 0.08407244873046875, 0.0837232666015625, 0.08388813018798828, 0.08374578857421874, 0.08396185302734376, 0.08385330963134766, 0.08368434906005859, 0.08367206573486329, 0.08419328308105468, 0.08365875244140625, 0.08380825805664062, 0.08445235443115234, 0.0841533432006836, 0.08358707427978515, 0.0837027816772461, 0.08361472320556641, 0.08509645080566407, 0.08371302032470704, 0.08371097564697266, 0.08373248291015625, 0.08388198089599609, 0.08400691223144531, 0.08375910186767578, 0.08609996795654297, 0.08662732696533203, 0.08376422119140625, 0.08392396545410157, 0.08380006408691407, 0.08389119720458985, 0.0837754898071289, 0.0837734375, 0.0838175048828125, 0.08397615814208985]",tokens/s,11.480430869002337,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1303.977984,1030.22592,0.0,383.778816,312.280064,s,10,0.30782016181945804,0.030782016181945804,0.001617235684294077,0.030563695907592774,0.031164259910583494,0.03323989057540893,0.03490039510726929,"[0.03531552124023438, 0.03054502487182617, 0.030670495986938478, 0.029116479873657227, 0.030544319152832032, 0.02917731285095215, 0.030620607376098632, 0.030559423446655274, 0.030567968368530273, 0.030703008651733397]",tokens/s,8316.544260351227,kWh,3.557098667864181e-07,1.9491237936146868e-07,8.339715507108577e-07,1.3845937968587447e-06,tokens/kWh,184891771.5656334,MB,1303.977984,1030.22592,0.0,383.778816,347.090432,s,10,18.765849975585937,1.8765849975585938,0.01629298714952931,1.8788928833007814,1.8852921142578125,1.8978253173828123,1.9078518798828124,"[1.9103585205078124, 1.8825069580078124, 1.879499267578125, 1.8580986328125, 1.8786011962890625, 1.8434554443359374, 1.8807445068359374, 1.8754886474609376, 1.8791845703125, 1.8779122314453125]",tokens/s,33.5716208335684,kWh,2.201072483317281e-05,1.2059537590589184e-05,4.825437327548859e-05,8.232463569925058e-05,tokens/kWh,765263.0280703872,,s,629,19.00581683158875,0.030215925010474952,0.003670631507539437,0.02977996826171875,0.03002470397949219,0.03029995498657227,0.05998026794433601,"[0.03094118309020996, 0.03138150405883789, 0.03138355255126953, 0.031524864196777344, 0.030980096817016602, 0.03207884979248047, 0.032363521575927735, 0.03149004745483398, 0.03120844841003418, 0.031268863677978515, 0.03084492874145508, 0.032194561004638675, 0.031160320281982422, 0.03057868766784668, 0.030236671447753907, 0.03517747116088867, 0.030717952728271485, 0.030108671188354492, 0.03002470397949219, 0.029833215713500977, 0.029899776458740233, 0.02977177619934082, 0.02977791976928711, 0.02977382469177246, 0.030176256179809572, 0.0299683837890625, 0.029740032196044923, 0.02977894401550293, 0.029844480514526366, 0.029732864379882814, 0.02974412727355957, 0.029797376632690428, 0.029791231155395507, 0.029726720809936522, 0.029648895263671874, 0.029749248504638674, 0.02977791976928711, 0.029949951171875, 0.02977996826171875, 0.02976255989074707, 0.02981990432739258, 0.029797376632690428, 0.0297728328704834, 0.02997039985656738, 0.029839359283447265, 0.029833215713500977, 0.029870080947875976, 0.02978508758544922, 0.02968780708312988, 0.029784063339233398, 0.02979327964782715, 0.029755392074584962, 0.02972774314880371, 0.029976640701293945, 0.02982700729370117, 0.029852672576904295, 0.029896703720092774, 0.029731840133666993, 0.029747200012207032, 0.02983628845214844, 0.029772800445556642, 0.029739007949829102, 0.06072115325927734, 0.029829120635986327, 0.02977689552307129, 0.029834239959716798, 0.030591999053955078, 0.03021414375305176, 0.029755392074584962, 0.02983628845214844, 0.02980352020263672, 0.029842432022094727, 0.029707263946533204, 0.029665279388427734, 0.029698047637939453, 0.02979430389404297, 0.029593599319458007, 0.02993152046203613, 0.0301527042388916, 0.029792255401611328, 0.029764608383178712, 0.029861888885498046, 0.03102822494506836, 0.030027776718139648, 0.02980147171020508, 0.030087167739868165, 0.02975129508972168, 0.030086143493652344, 0.029826047897338868, 0.02974617576599121, 0.030042112350463866, 0.029730815887451172, 0.029899776458740233, 0.029838336944580077, 0.02994790458679199, 0.02980659294128418, 0.02976972770690918, 0.029682687759399414, 0.02978816032409668, 0.029817855834960938, 0.029734912872314452, 0.029831167221069335, 0.02974208068847656, 0.02978508758544922, 0.029848575592041016, 0.029800447463989257, 0.029785120010375976, 0.029689823150634766, 0.03014553642272949, 0.029886463165283202, 0.03018035125732422, 0.029938688278198244, 0.02973695945739746, 0.030068735122680663, 0.02973695945739746, 0.029897727966308595, 0.0297891845703125, 0.0298024959564209, 0.02995814323425293, 0.02976051139831543, 0.02976870346069336, 0.02997555160522461, 0.029975584030151367, 0.029692895889282228, 0.029708288192749024, 0.061895679473876954, 0.030073856353759764, 0.02974412727355957, 0.029680639266967773, 0.029848575592041016, 0.029882368087768556, 0.029543424606323244, 0.02978713607788086, 0.029755392074584962, 0.029875200271606447, 0.029856767654418945, 0.029831167221069335, 0.029770751953125, 0.02977791976928711, 0.02973695945739746, 0.029897727966308595, 0.030079999923706056, 0.029894655227661132, 0.029703168869018554, 0.0297574405670166, 0.029837312698364257, 0.029813760757446288, 0.029814783096313476, 0.029743167877197267, 0.029764543533325194, 0.029707263946533204, 0.02977791976928711, 0.029730815887451172, 0.030044160842895507, 0.029833215713500977, 0.02974617576599121, 0.029815807342529296, 0.029740032196044923, 0.029815807342529296, 0.029816831588745117, 0.029799423217773437, 0.029713407516479492, 0.0297891845703125, 0.02983628845214844, 0.029896703720092774, 0.029834239959716798, 0.0297574405670166, 0.029868032455444334, 0.029748224258422853, 0.029849599838256836, 0.02977382469177246, 0.02986604881286621, 0.029807552337646485, 0.030026752471923827, 0.02976255989074707, 0.02976972770690918, 0.029892608642578124, 0.029701120376586915, 0.02976870346069336, 0.02976255989074707, 0.029797376632690428, 0.02977177619934082, 0.029867008209228517, 0.029808639526367187, 0.02974515151977539, 0.029740032196044923, 0.029821952819824218, 0.030018560409545897, 0.05799935913085937, 0.028490751266479493, 0.028519424438476562, 0.028368896484375, 0.028404735565185548, 0.02834022331237793, 0.028285951614379884, 0.02834739112854004, 0.028308479309082032, 0.028387327194213868, 0.028321792602539062, 0.028298240661621094, 0.028923904418945313, 0.031113216400146484, 0.030136320114135744, 0.02981990432739258, 0.029868032455444334, 0.029728832244873046, 0.02970515251159668, 0.029386751174926756, 0.0297523193359375, 0.029822975158691405, 0.02971238327026367, 0.02978508758544922, 0.02980352020263672, 0.02977484893798828, 0.029677600860595704, 0.029713375091552734, 0.029651968002319336, 0.02973798370361328, 0.029881343841552735, 0.029772800445556642, 0.02979532814025879, 0.029672447204589843, 0.029861888885498046, 0.029646848678588866, 0.029551616668701174, 0.029764608383178712, 0.02973695945739746, 0.029662208557128908, 0.02970419120788574, 0.029772800445556642, 0.02975436782836914, 0.02972876739501953, 0.02979430389404297, 0.029699071884155274, 0.02981888008117676, 0.029753376007080078, 0.02983011245727539, 0.029718528747558592, 0.029489152908325194, 0.029640703201293944, 0.029736991882324218, 0.02938057518005371, 0.029748224258422853, 0.029775871276855468, 0.02980659294128418, 0.029814783096313476, 0.029716512680053712, 0.029680608749389648, 0.02979635238647461, 0.029783039093017577, 0.02978816032409668, 0.06087680053710937, 0.029916160583496092, 0.030342144012451173, 0.029867008209228517, 0.029744159698486327, 0.029726688385009765, 0.02975846481323242, 0.029857791900634766, 0.0297891845703125, 0.029784063339233398, 0.02979020881652832, 0.029849599838256836, 0.029449216842651366, 0.02939084815979004, 0.029541376113891602, 0.029874176025390626, 0.029816831588745117, 0.029724672317504884, 0.029915136337280275, 0.029921279907226563, 0.029829120635986327, 0.029734912872314452, 0.02981888008117676, 0.02980147171020508, 0.029860864639282225, 0.029874176025390626, 0.029817855834960938, 0.029855743408203125, 0.029718528747558592, 0.029854719161987304, 0.029833215713500977, 0.02995199966430664, 0.02972979164123535, 0.02980147171020508, 0.029864959716796875, 0.030071807861328126, 0.029874208450317383, 0.02984239959716797, 0.02975027275085449, 0.02978508758544922, 0.029832191467285156, 0.029860864639282225, 0.02976255989074707, 0.029702144622802733, 0.029920255661010742, 0.02975948715209961, 0.02994790458679199, 0.02977382469177246, 0.02973388862609863, 0.029643775939941407, 0.029703168869018554, 0.029852672576904295, 0.029839359283447265, 0.02983628845214844, 0.029784063339233398, 0.029769792556762695, 0.029781951904296874, 0.02998784065246582, 0.02981068801879883, 0.02998784065246582, 0.0297574405670166, 0.029809696197509766, 0.029711328506469726, 0.05807513427734375, 0.02834329605102539, 0.028271615982055662, 0.02834329605102539, 0.028402687072753906, 0.028421119689941408, 0.028279808044433592, 0.028318719863891603, 0.028318719863891603, 0.02834022331237793, 0.02835148811340332, 0.02838118362426758, 0.02836172866821289, 0.028310527801513673, 0.02832793617248535, 0.02815795135498047, 0.028197887420654297, 0.02831667137145996, 0.028222463607788087, 0.028233728408813476, 0.028296192169189452, 0.028251136779785156, 0.028318719863891603, 0.02973798370361328, 0.030136320114135744, 0.029817855834960938, 0.029731840133666993, 0.02978099250793457, 0.02972979164123535, 0.029930496215820314, 0.029886463165283202, 0.029709312438964845, 0.029739007949829102, 0.029848575592041016, 0.02972774314880371, 0.02976870346069336, 0.029648895263671874, 0.029732864379882814, 0.029813760757446288, 0.029820928573608397, 0.029755392074584962, 0.029713407516479492, 0.029643775939941407, 0.02977996826171875, 0.029856767654418945, 0.029802528381347657, 0.029723615646362306, 0.029940736770629882, 0.029823007583618163, 0.029761503219604492, 0.02976972770690918, 0.029691904067993165, 0.02968783950805664, 0.02980246353149414, 0.029711360931396483, 0.029895679473876953, 0.029684736251831056, 0.029894655227661132, 0.029791231155395507, 0.029692928314208986, 0.029740032196044923, 0.029820928573608397, 0.030217216491699218, 0.06100377655029297, 0.029852672576904295, 0.02978508758544922, 0.029661184310913087, 0.029660160064697266, 0.02972368049621582, 0.029649887084960937, 0.02981171226501465, 0.02997555160522461, 0.02975846481323242, 0.02975846481323242, 0.029682687759399414, 0.029800447463989257, 0.02975129508972168, 0.030038015365600586, 0.02983526420593262, 0.02981888008117676, 0.029861888885498046, 0.029864959716796875, 0.029840383529663086, 0.030003200531005858, 0.0307906551361084, 0.03020697593688965, 0.029899776458740233, 0.02975846481323242, 0.02978508758544922, 0.029917184829711913, 0.02975129508972168, 0.029702144622802733, 0.02978713607788086, 0.02976972770690918, 0.02976665687561035, 0.029708288192749024, 0.029823999404907226, 0.029826047897338868, 0.02983628845214844, 0.03013222312927246, 0.03099545669555664, 0.030038015365600586, 0.029878271102905272, 0.029697023391723632, 0.02977791976928711, 0.029929471969604493, 0.029862911224365234, 0.029932544708251952, 0.029714431762695313, 0.02996940803527832, 0.02980659294128418, 0.02976051139831543, 0.02951372718811035, 0.029684736251831056, 0.029820928573608397, 0.02977996826171875, 0.029778976440429688, 0.029754335403442383, 0.029816831588745117, 0.02976563262939453, 0.029755456924438477, 0.02987615966796875, 0.029709312438964845, 0.029723743438720703, 0.029749151229858398, 0.02977382469177246, 0.06091059112548828, 0.03012403106689453, 0.0297574405670166, 0.029799423217773437, 0.02974515151977539, 0.02982809638977051, 0.029861888885498046, 0.029743104934692382, 0.02978201675415039, 0.029713407516479492, 0.029759519577026366, 0.029883359909057616, 0.02977484893798828, 0.02979532814025879, 0.029874176025390626, 0.029875200271606447, 0.029850624084472657, 0.02984351921081543, 0.02972358322143555, 0.029660160064697266, 0.02992742347717285, 0.02974515151977539, 0.02968780708312988, 0.029831167221069335, 0.029741056442260744, 0.029732864379882814, 0.029642751693725586, 0.029642751693725586, 0.029895679473876953, 0.029628416061401368, 0.029723648071289063, 0.029618175506591796, 0.029715456008911133, 0.029864959716796875, 0.029732864379882814, 0.02978201675415039, 0.029748224258422853, 0.029813760757446288, 0.02982707214355469, 0.0297574405670166, 0.029854719161987304, 0.02972876739501953, 0.029657087326049804, 0.029668352127075196, 0.029611007690429687, 0.029412351608276367, 0.02972876739501953, 0.029885440826416015, 0.029694976806640624, 0.02976870346069336, 0.029775871276855468, 0.02993152046203613, 0.02977894401550293, 0.029700096130371095, 0.02977382469177246, 0.029710336685180663, 0.029688831329345702, 0.029684736251831056, 0.029700096130371095, 0.02970419120788574, 0.029667327880859375, 0.029730815887451172, 0.029863935470581054, 0.060827648162841794, 0.02976563262939453, 0.029716480255126954, 0.029770751953125, 0.02973388862609863, 0.02977689552307129, 0.02975846481323242, 0.029849599838256836, 0.0298024959564209, 0.029850624084472657, 0.029688831329345702, 0.029731840133666993, 0.029740032196044923, 0.029731840133666993, 0.029859840393066408, 0.02975027275085449, 0.029707263946533204, 0.029718528747558592, 0.02973388862609863, 0.029772800445556642, 0.029853696823120116, 0.02976563262939453, 0.029611007690429687, 0.02977996826171875, 0.02979430389404297, 0.029807615280151366, 0.029343807220458984, 0.029495231628417967, 0.029894655227661132, 0.03171123123168945, 0.030046207427978516, 0.03002470397949219, 0.029706239700317383, 0.02974412727355957, 0.029679616928100585, 0.029911039352416992, 0.02976051139831543, 0.029823999404907226, 0.029944831848144532, 0.02991926383972168, 0.029790176391601562, 0.02977382469177246, 0.029692928314208986, 0.029726720809936522, 0.029773855209350587, 0.029737951278686524, 0.02981990432739258, 0.029906944274902345, 0.029869056701660155, 0.029907968521118163, 0.029767679214477538, 0.02978201675415039, 0.02968780708312988, 0.02976563262939453, 0.029872127532958984, 0.02996428871154785, 0.02977689552307129, 0.02996326446533203, 0.029944831848144532, 0.029889568328857422, 0.029706207275390625, 0.029854719161987304, 0.029902847290039062, 0.06100787353515625, 0.029770816802978516, 0.029812671661376952, 0.02975027275085449, 0.029656063079833983, 0.029666303634643554, 0.02972979164123535, 0.0297256965637207, 0.029929471969604493, 0.029775871276855468, 0.029690879821777344, 0.029693952560424806, 0.02958028793334961, 0.029640703201293944, 0.02977996826171875, 0.029739007949829102, 0.03000115203857422, 0.029897727966308595, 0.029840383529663086, 0.029814783096313476, 0.0297523193359375, 0.029708288192749024, 0.02975027275085449, 0.02981068801879883, 0.029732864379882814, 0.029838336944580077, 0.029718528747558592, 0.02970419120788574, 0.0297205753326416, 0.029740032196044923, 0.02974617576599121, 0.02969599914550781, 0.029757503509521485, 0.0298853759765625, 0.02979840087890625, 0.029700096130371095, 0.029752351760864257, 0.02980656051635742, 0.02973695945739746, 0.029663232803344725, 0.02972159957885742, 0.030027776718139648, 0.029971519470214845, 0.029904832839965822, 0.029829120635986327, 0.02974412727355957, 0.030027776718139648, 0.029853696823120116, 0.029837312698364257, 0.029840383529663086, 0.029850624084472657, 0.02986911964416504, 0.030047168731689455, 0.030050304412841795, 0.029815807342529296, 0.029775871276855468, 0.02998784065246582, 0.029890560150146486, 0.02975334358215332, 0.029883392333984377, 0.02983526420593262, 0.029847551345825195, 0.02976972770690918]",tokens/s,33.09513111557333,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1736.183808,12352.749568,0.0,11706.302464,11035.465216,s,10,13.024343627929689,1.3024343627929689,0.002338564901261307,1.30185107421875,1.3038882202148436,1.3064520446777343,1.3085031042480468,"[1.309015869140625, 1.3033184814453125, 1.3003494873046875, 1.3011973876953125, 1.3011253662109374, 1.300955810546875, 1.301638427734375, 1.302063720703125, 1.3022261962890624, 1.302452880859375]",tokens/s,196.55501061184225,kWh,1.5360654360718197e-05,8.415124671664671e-06,7.215477994602094e-05,9.593055897840379e-05,tokens/kWh,2668596.9802139024,MB,1736.183808,12352.749568,0.0,11706.302464,11329.172992,s,10,763.3166562499999,76.331665625,0.01404827179512581,76.33404687500001,76.34705,76.34801328125,76.34878390625,"[76.3116171875, 76.3329609375, 76.3489765625, 76.3411171875, 76.3468359375, 76.331546875, 76.3441953125, 76.3351328125, 76.31521875, 76.3090546875]",tokens/s,0.8253455428249735,kWh,0.0009011750282347202,0.0004939259724664952,0.0042084087556131695,0.005603509756314386,tokens/kWh,11242.953566558468,,s,629,773.821041748047,1.2302401299650985,0.15462762199250651,1.21159375,1.2122087890625,1.2124819091796875,2.513270859375,"[1.2114217529296876, 1.2109813232421875, 1.2112762451171875, 1.2113695068359376, 1.2109967041015626, 1.21078369140625, 1.210820556640625, 1.2114083251953125, 1.211473876953125, 1.2109915771484374, 1.2110264892578124, 1.211177001953125, 1.2111329345703126, 1.2108482666015625, 1.210894287109375, 1.2108328857421875, 1.2114595947265625, 1.21109912109375, 1.2110377197265625, 1.2114605712890625, 1.21109814453125, 1.2109107666015626, 1.2113837890625, 1.211439208984375, 1.2113089599609375, 1.2114852294921874, 1.2108953857421876, 1.211968505859375, 1.2109854736328125, 1.2114114990234375, 1.211503662109375, 1.2115701904296876, 1.2111954345703124, 1.212129150390625, 1.211504638671875, 1.2116378173828124, 1.21143701171875, 1.211441162109375, 1.2110633544921876, 1.21100390625, 1.210900390625, 1.2119766845703126, 1.21135302734375, 1.211661376953125, 1.21140625, 1.211376708984375, 1.21117578125, 1.211125732421875, 1.2112117919921874, 1.2114166259765624, 1.2111329345703126, 1.2116644287109375, 1.2111021728515625, 1.211261962890625, 1.2113489990234374, 1.2112978515625, 1.211167724609375, 1.211284423828125, 1.2117083740234376, 1.2115958251953125, 1.2114329833984374, 1.2118804931640625, 2.514330810546875, 1.210652587890625, 1.2109178466796875, 1.21115234375, 1.210639404296875, 1.2111728515625, 1.2111964111328124, 1.2109495849609375, 1.210982421875, 1.210829833984375, 1.211577392578125, 1.2116376953125, 1.2123822021484374, 1.2119183349609375, 1.2111278076171874, 1.2120401611328124, 1.2116695556640624, 1.2114871826171876, 1.2117001953125, 1.2118865966796875, 1.211429931640625, 1.2113817138671874, 1.21173095703125, 1.2116612548828125, 1.211536376953125, 1.211513916015625, 1.2119930419921876, 1.2118671875, 1.2117279052734375, 1.211217041015625, 1.2116253662109375, 1.211358154296875, 1.211610107421875, 1.21132958984375, 1.2115968017578125, 1.2115753173828125, 1.212295166015625, 1.211431884765625, 1.211931640625, 1.21170947265625, 1.21185791015625, 1.2116807861328125, 1.211569091796875, 1.2115169677734374, 1.2117945556640626, 1.211475830078125, 1.2115762939453125, 1.2120863037109375, 1.211937744140625, 1.2119654541015625, 1.2116817626953125, 1.2118035888671874, 1.212337158203125, 1.2117073974609376, 1.2118385009765624, 1.2118773193359376, 1.21191015625, 1.2120863037109375, 1.21194091796875, 1.212105712890625, 1.2121497802734376, 1.2118917236328124, 1.211799560546875, 2.513431640625, 1.2114862060546876, 1.2116961669921875, 1.2118785400390626, 1.2121865234375, 1.2122286376953124, 1.2120013427734375, 1.2121025390625, 1.21226953125, 1.211442138671875, 1.2113643798828124, 1.2111483154296876, 1.2115557861328126, 1.2120247802734374, 1.2112076416015625, 1.2115435791015625, 1.2116663818359374, 1.2110797119140626, 1.2113746337890625, 1.2114166259765624, 1.2112281494140624, 1.2118333740234375, 1.211620361328125, 1.2116275634765625, 1.211684814453125, 1.2115281982421875, 1.212507080078125, 1.2123299560546874, 1.211989990234375, 1.2121068115234375, 1.212232666015625, 1.2115784912109375, 1.21185888671875, 1.211282470703125, 1.211768798828125, 1.2113284912109374, 1.2114544677734376, 1.2116142578125, 1.2119736328125, 1.21160400390625, 1.2118660888671875, 1.211783203125, 1.211763671875, 1.21185888671875, 1.211821044921875, 1.211894775390625, 1.212494873046875, 1.2119080810546874, 1.2119388427734374, 1.2123709716796875, 1.211740234375, 1.2130374755859374, 1.212662841796875, 1.212389404296875, 1.21267919921875, 1.2124569091796875, 1.212505126953125, 1.212078125, 1.2119132080078125, 1.2119111328125, 1.2119111328125, 1.2122039794921875, 1.212859375, 2.513799072265625, 1.2114248046875, 1.211298828125, 1.2112598876953125, 1.2111002197265626, 1.211157470703125, 1.2119388427734374, 1.2117012939453125, 1.211177001953125, 1.21225927734375, 1.2121456298828126, 1.212099609375, 1.2122501220703126, 1.212095458984375, 1.2122705078125, 1.212464111328125, 1.21236474609375, 1.21200439453125, 1.211461669921875, 1.21135107421875, 1.2123013916015626, 1.2120933837890624, 1.2120391845703125, 1.2125921630859375, 1.2116397705078126, 1.21157421875, 1.2111861572265625, 1.21097216796875, 1.2113848876953126, 1.2114390869140625, 1.21153125, 1.211658203125, 1.2115947265625, 1.2115711669921876, 1.2116275634765625, 1.211242431640625, 1.21172998046875, 1.2113121337890624, 1.2114923095703125, 1.211609130859375, 1.2118292236328125, 1.2115927734375, 1.21164794921875, 1.2114503173828124, 1.2116920166015626, 1.2116182861328124, 1.2114801025390625, 1.211564208984375, 1.21261865234375, 1.2118896484375, 1.211937744140625, 1.2124937744140625, 1.2117872314453124, 1.2115753173828125, 1.2119429931640624, 1.211613037109375, 1.212316650390625, 1.211663330078125, 1.2121661376953126, 1.2119859619140625, 1.2116695556640624, 1.2118455810546875, 1.2117862548828124, 2.5132001953125, 1.21116162109375, 1.2113070068359375, 1.2113653564453124, 1.211356201171875, 1.2111585693359375, 1.21143505859375, 1.211683837890625, 1.2116121826171875, 1.2118189697265624, 1.2116448974609375, 1.21178515625, 1.2120279541015626, 1.21154052734375, 1.211658203125, 1.211451416015625, 1.21213134765625, 1.2125296630859375, 1.2122440185546874, 1.212705810546875, 1.2123677978515626, 1.2122071533203125, 1.2123084716796875, 1.2123751220703125, 1.2121220703125, 1.2126556396484376, 1.2121026611328125, 1.21187841796875, 1.212506103515625, 1.21231982421875, 1.212422119140625, 1.2116009521484374, 1.2117115478515625, 1.2120238037109374, 1.2117667236328125, 1.2115660400390624, 1.2122071533203125, 1.211494384765625, 1.211684814453125, 1.21158251953125, 1.211788330078125, 1.2116644287109375, 1.211957275390625, 1.2114073486328125, 1.211895751953125, 1.2115875244140626, 1.2115179443359374, 1.211845703125, 1.2115126953125, 1.2115343017578124, 1.2121968994140624, 1.2115262451171875, 1.211950927734375, 1.2120555419921875, 1.211916259765625, 1.2119879150390624, 1.2116162109375, 1.2115435791015625, 1.212336181640625, 1.2118609619140626, 1.21176171875, 1.211826171875, 1.2119696044921875, 2.513016845703125, 1.2115302734375, 1.21221533203125, 1.211273193359375, 1.211552734375, 1.21154248046875, 1.211378662109375, 1.2118077392578126, 1.2109425048828124, 1.2108125, 1.211199462890625, 1.2112393798828125, 1.2109864501953125, 1.2110008544921875, 1.2113499755859376, 1.21132958984375, 1.211335693359375, 1.2113797607421875, 1.211218994140625, 1.21196240234375, 1.211410400390625, 1.2120770263671874, 1.2116243896484375, 1.2119141845703125, 1.2118671875, 1.211895751953125, 1.2114534912109376, 1.2118670654296875, 1.211398193359375, 1.2112353515625, 1.21159375, 1.2112230224609375, 1.211525146484375, 1.211252685546875, 1.2116796875, 1.21173095703125, 1.2115252685546876, 1.21129150390625, 1.211832275390625, 1.211515869140625, 1.21154052734375, 1.21152001953125, 1.2113541259765626, 1.211916259765625, 1.211999267578125, 1.211557861328125, 1.2116612548828125, 1.2116644287109375, 1.211683837890625, 1.2118814697265625, 1.2116859130859374, 1.21154248046875, 1.212590087890625, 1.211631591796875, 1.21187841796875, 1.2120863037109375, 1.211619384765625, 1.2120166015625, 1.2119869384765625, 1.2120177001953125, 1.2121640625, 1.211704345703125, 1.211842529296875, 2.51329833984375, 1.2113704833984376, 1.2111278076171874, 1.21133056640625, 1.2119141845703125, 1.2111728515625, 1.2112230224609375, 1.2113807373046874, 1.2113182373046876, 1.211325439453125, 1.2114288330078125, 1.211557861328125, 1.211810791015625, 1.2117626953125, 1.21128759765625, 1.2117484130859375, 1.21141650390625, 1.2113756103515625, 1.2116080322265625, 1.211427978515625, 1.2113477783203126, 1.2119552001953124, 1.211189208984375, 1.2114248046875, 1.211610107421875, 1.21135205078125, 1.2119481201171876, 1.2117523193359374, 1.211484130859375, 1.2121180419921875, 1.21152099609375, 1.2116326904296875, 1.211673583984375, 1.2113121337890624, 1.2119869384765625, 1.2122685546875, 1.2123853759765626, 1.2128603515625, 1.2124027099609376, 1.2125235595703125, 1.2127314453125, 1.2127191162109374, 1.212885986328125, 1.2127242431640626, 1.2118333740234375, 1.211953125, 1.2121129150390626, 1.2117801513671875, 1.212015625, 1.211763671875, 1.2117012939453125, 1.2120667724609375, 1.21187939453125, 1.2117197265625, 1.2119981689453125, 1.21179443359375, 1.2120340576171875, 1.212147705078125, 1.21187841796875, 1.2120791015625, 1.21209033203125, 1.211905029296875, 1.2124200439453126, 2.514125732421875, 1.2114227294921875, 1.211292724609375, 1.211658203125, 1.2115589599609375, 1.2113983154296875, 1.2119888916015624, 1.2113704833984376, 1.211610107421875, 1.211758544921875, 1.2114852294921874, 1.21145654296875, 1.211821044921875, 1.2112169189453126, 1.2116602783203125, 1.211989990234375, 1.2113223876953125, 1.2115517578125, 1.2113489990234374, 1.2112762451171875, 1.2118035888671874, 1.211737060546875, 1.2115977783203125, 1.2129249267578126, 1.21261767578125, 1.2122420654296875, 1.211623291015625, 1.2114923095703125, 1.212148681640625, 1.2119019775390625, 1.21175146484375, 1.2118385009765624, 1.2116920166015626, 1.2118294677734376, 1.21165185546875, 1.2111922607421874, 1.2113212890625, 1.2114442138671875, 1.211694091796875, 1.21236474609375, 1.212018798828125, 1.211768798828125, 1.211747314453125, 1.2114288330078125, 1.2113223876953125, 1.2116746826171876, 1.21150048828125, 1.21159375, 1.21177294921875, 1.21147802734375, 1.212080078125, 1.21192138671875, 1.2113746337890625, 1.2114554443359375, 1.2115548095703126, 1.21162646484375, 1.211575439453125, 1.2113038330078125, 1.211806640625, 1.2116695556640624, 1.211400146484375, 1.2115343017578124, 1.2114677734375, 2.5139150390625, 1.2104947509765625, 1.2104796142578125, 1.2110601806640624, 1.210818603515625, 1.21080419921875, 1.2113817138671874, 1.2107110595703126, 1.2115599365234375, 1.210962890625, 1.21103466796875, 1.2109844970703125, 1.2109957275390626, 1.210639404296875, 1.210967041015625, 1.210735595703125, 1.211298828125, 1.2114892578125, 1.210882080078125, 1.2117001953125, 1.211292724609375, 1.2112998046875, 1.211736083984375, 1.2114698486328126, 1.2116868896484374, 1.212020751953125, 1.2116572265625, 1.2118538818359375, 1.211252685546875, 1.2111072998046875, 1.211619384765625, 1.2111688232421876, 1.211368408203125, 1.2118763427734376, 1.2112978515625, 1.211556884765625, 1.211440185546875, 1.2111011962890625, 1.21163671875, 1.2113223876953125, 1.2111298828125, 1.2116162109375, 1.2110653076171876, 1.2111217041015625, 1.211494384765625, 1.211451416015625, 1.2114759521484375, 1.21181689453125, 1.2115538330078126, 1.2116080322265625, 1.211831298828125, 1.211515869140625, 1.21173193359375, 1.211282470703125, 1.2113018798828126, 1.21145654296875, 1.2112281494140624, 1.2112547607421875, 1.2116612548828125, 1.21142578125, 1.21210986328125, 1.2116920166015626, 1.2119910888671874, 2.51358935546875, 1.21063427734375, 1.211430908203125, 1.210544189453125, 1.2111871337890625, 1.21103564453125, 1.2112589111328125, 1.2109700927734375, 1.2114503173828124, 1.2111072998046875, 1.21147802734375, 1.210945556640625, 1.2111922607421874, 1.211684814453125, 1.2114554443359375, 1.2112486572265626, 1.2119346923828125, 1.2112281494140624, 1.2114483642578124, 1.2110079345703124, 1.2108524169921875, 1.2109935302734376, 1.2109833984375, 1.2113858642578126, 1.2111492919921876, 1.210892333984375, 1.2111124267578126, 1.2112025146484375, 1.2109700927734375, 1.211509765625, 1.21159375, 1.2114063720703125, 1.211040771484375, 1.21109814453125, 1.210799072265625, 1.210892333984375, 1.210966064453125, 1.210841064453125, 1.2113079833984375, 1.21124560546875, 1.2112608642578124, 1.2112496337890626, 1.2110306396484376, 1.2115035400390626, 1.211241455078125, 1.2117279052734375, 1.2114945068359375, 1.2111419677734374, 1.2113212890625, 1.211431884765625, 1.2111728515625, 1.2113961181640625, 1.211167724609375, 1.210883056640625, 1.21160498046875, 1.211494384765625, 1.2115814208984375, 1.2117698974609374, 1.2115025634765626, 1.2116275634765625, 1.2118814697265625, 1.211230224609375, 1.21170947265625]",tokens/s,0.8128494394247809,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1688.977408,2250.768384,0.0,1604.32128,1463.693312,s,10,1.2614907455444335,0.12614907455444335,0.001156305474696835,0.12596041870117186,0.12672372131347656,0.12802366485595704,0.12906361968994143,"[0.1293236083984375, 0.12533795166015624, 0.12527750396728515, 0.1264024658203125, 0.12550204467773438, 0.12507218933105468, 0.1257669143676758, 0.12615392303466796, 0.12621929931640624, 0.12643484497070312]",tokens/s,2029.3450499275416,kWh,1.4795928945144019e-06,8.107423014348568e-07,6.482848936275459e-06,8.773184132224718e-06,tokens/kWh,29179827.545131337,MB,1688.977408,2250.768384,0.0,1604.32128,1560.975872,s,10,72.8006865234375,7.280068652343751,0.00707557452467299,7.2794865722656255,7.288760400390625,7.290637524414063,7.292139223632812,"[7.2925146484375, 7.2867998046875, 7.28834326171875, 7.272900390625, 7.28120458984375, 7.28177783203125, 7.2702080078125, 7.2777685546875, 7.277201171875, 7.27196826171875]",tokens/s,8.653764546535935,kWh,8.591771873335045e-05,4.70891264273655e-05,0.0003703246191761246,0.0005033314643368406,tokens/kWh,125166.02768516574,,s,629,73.8150656280517,0.1173530455135958,0.015000349665490152,0.11545600128173829,0.11595755310058595,0.11627356262207031,0.24130564147949218,"[0.1175910415649414, 0.11764224243164062, 0.11673804473876953, 0.11585126495361328, 0.115281982421875, 0.11545388793945313, 0.11590860748291015, 0.11528498840332031, 0.11539968109130859, 0.11538841247558594, 0.11548159790039063, 0.11523072052001954, 0.11521842956542969, 0.11532492828369141, 0.11577958679199218, 0.11560755157470703, 0.1152696304321289, 0.11566079711914062, 0.1159925765991211, 0.11540889739990234, 0.11546623992919922, 0.11522150421142578, 0.11538944244384766, 0.1153617935180664, 0.11544371032714844, 0.1166714859008789, 0.11574886322021484, 0.1154672622680664, 0.11605299377441407, 0.11584102630615234, 0.1156485137939453, 0.11544882965087891, 0.11554099273681641, 0.115557373046875, 0.11551026916503906, 0.1155788803100586, 0.115725341796875, 0.11555939483642579, 0.11551129913330078, 0.11544371032714844, 0.11543244934082031, 0.11539968109130859, 0.11540684509277344, 0.11523072052001954, 0.11638988494873047, 0.11598438262939453, 0.11537510681152344, 0.11538534545898438, 0.11558604431152343, 0.11559219360351562, 0.11591474914550781, 0.11588198089599609, 0.1159362564086914, 0.11570175933837891, 0.11595673370361329, 0.11578368377685547, 0.11646463775634766, 0.11617485046386719, 0.11598336029052735, 0.11553279876708984, 0.11578470611572265, 0.11579801940917969, 0.24151040649414063, 0.11573862457275391, 0.11605094146728516, 0.11689881896972656, 0.1161523208618164, 0.1156648941040039, 0.11554099273681641, 0.11535564422607422, 0.11528498840332031, 0.11527065277099609, 0.11566387176513672, 0.11533516693115234, 0.11576012420654297, 0.11564749145507812, 0.1155051498413086, 0.11542221069335938, 0.11543961334228516, 0.11525529479980469, 0.11523993682861328, 0.11541913604736329, 0.1155225601196289, 0.1157734375, 0.11563929748535157, 0.11553587341308594, 0.11531571197509766, 0.115378173828125, 0.11524201965332032, 0.11551340484619141, 0.11532073974609375, 0.11577961730957032, 0.11592704010009766, 0.11556451416015626, 0.1154549789428711, 0.11527680206298828, 0.11526656341552734, 0.11538739013671875, 0.11530854034423828, 0.11538432312011719, 0.11546214294433593, 0.11541401672363281, 0.11528192138671875, 0.11616563415527344, 0.11571199798583984, 0.11578880310058594, 0.11551747131347656, 0.11540067291259766, 0.11685273742675781, 0.11557068634033203, 0.11726950073242187, 0.1159393310546875, 0.11543148803710937, 0.11552044677734374, 0.1161891860961914, 0.11548876953125, 0.11572940826416016, 0.1157396469116211, 0.11657421112060547, 0.11594854736328125, 0.11598851013183593, 0.11593417358398438, 0.11551846313476563, 0.11548365020751954, 0.11548569488525391, 0.2411530303955078, 0.11556454467773437, 0.11577139282226563, 0.11626496124267578, 0.11560652923583985, 0.11565161895751953, 0.11566384124755859, 0.11581747436523437, 0.1154672622680664, 0.11527474975585937, 0.11536998748779297, 0.11562598419189453, 0.11528089904785156, 0.11566902160644531, 0.11589116668701172, 0.11617894744873047, 0.1157201919555664, 0.11589734649658204, 0.11571302032470702, 0.11588607788085938, 0.11542323303222657, 0.11556966400146484, 0.1156833267211914, 0.11643392181396485, 0.11568434906005859, 0.11659468841552735, 0.11596083068847657, 0.11616665649414062, 0.11567922973632813, 0.11553794860839844, 0.11543036651611328, 0.11538636779785157, 0.11580620574951171, 0.11554918670654298, 0.11579296112060547, 0.1156412811279297, 0.11582361602783203, 0.11581132507324218, 0.11555328369140624, 0.11571507263183593, 0.11545600128173829, 0.11533926391601562, 0.1156147232055664, 0.1153966064453125, 0.1154119644165039, 0.11549286651611328, 0.11582566070556641, 0.11649638366699219, 0.1159516830444336, 0.11595462036132813, 0.11600281524658203, 0.11591474914550781, 0.11610931396484375, 0.11565261077880859, 0.1153290252685547, 0.11547238159179687, 0.1153290252685547, 0.115346435546875, 0.11534438323974609, 0.11529523468017579, 0.115378173828125, 0.11543551635742187, 0.11540377807617187, 0.24315084838867188, 0.11575193786621094, 0.11506790161132813, 0.11524915313720703, 0.1155962905883789, 0.11531366729736328, 0.11553177642822265, 0.11558297729492187, 0.11519385528564453, 0.11537305450439453, 0.11545811462402343, 0.11599967956542968, 0.11572838592529297, 0.1153966064453125, 0.11536077117919921, 0.11558092498779297, 0.1153617935180664, 0.1152911376953125, 0.11529011535644532, 0.11535564422607422, 0.11525939178466797, 0.1152143325805664, 0.11515494537353516, 0.11549696350097656, 0.11525017547607422, 0.11560550689697266, 0.11576217651367188, 0.11544268798828125, 0.11537203216552734, 0.11532288360595704, 0.11530239868164062, 0.11551334381103516, 0.115162109375, 0.11524813079833984, 0.1151436767578125, 0.11530035400390624, 0.11520614624023437, 0.11535871887207032, 0.11520716857910156, 0.11560345458984375, 0.11539968109130859, 0.11531676483154298, 0.11529417419433594, 0.1153966064453125, 0.11623017883300782, 0.11542422485351563, 0.11526451110839844, 0.11549388885498046, 0.11539762878417968, 0.11577855682373046, 0.1153955841064453, 0.11553279876708984, 0.11524403381347656, 0.11579084777832031, 0.11539968109130859, 0.11569152069091797, 0.11539353942871093, 0.11553997039794922, 0.11545600128173829, 0.11532492828369141, 0.1152696304321289, 0.11538841247558594, 0.1153433609008789, 0.241364990234375, 0.11547853088378907, 0.11533721923828125, 0.11539049530029297, 0.11531874847412109, 0.11551129913330078, 0.1152542724609375, 0.11560758209228515, 0.11542422485351563, 0.1155389404296875, 0.11542118072509766, 0.11545804595947265, 0.1156178207397461, 0.115463134765625, 0.11520716857910156, 0.11564339447021485, 0.11541913604736329, 0.11587276458740234, 0.1153955841064453, 0.11544882965087891, 0.11543654632568359, 0.11536793518066406, 0.11598540496826172, 0.11562393951416015, 0.11578470611572265, 0.11597004699707031, 0.11561062622070313, 0.11569459533691406, 0.11571302032470702, 0.11590144348144531, 0.11557068634033203, 0.11549900817871094, 0.11544678497314453, 0.11544268798828125, 0.11540889739990234, 0.11547443389892578, 0.11561574554443359, 0.11543449401855468, 0.11550624084472656, 0.11566381072998047, 0.11555225372314454, 0.11549286651611328, 0.11550822448730469, 0.11547955322265625, 0.1153064956665039, 0.11546623992919922, 0.11551744079589844, 0.11544371032714844, 0.11622195434570312, 0.11589017486572266, 0.11552665710449218, 0.1153986587524414, 0.11532390594482422, 0.11578470611572265, 0.11576934051513672, 0.11560550689697266, 0.11554713439941407, 0.11664281463623047, 0.11574784088134765, 0.11553997039794922, 0.11533824157714843, 0.11548569488525391, 0.11580518341064452, 0.2409891815185547, 0.11542425537109376, 0.11530342102050781, 0.11544166564941406, 0.115346435546875, 0.11540684509277344, 0.11648614501953125, 0.1165475845336914, 0.11569561767578125, 0.11536281585693359, 0.11529318237304688, 0.11541401672363281, 0.11542221069335938, 0.11558809661865234, 0.11539456176757812, 0.11545702362060548, 0.11533209228515626, 0.11550822448730469, 0.11541913604736329, 0.11535667419433594, 0.11532185363769532, 0.11544985961914063, 0.11528300476074219, 0.11542829132080078, 0.11535564422607422, 0.11552665710449218, 0.11533004760742188, 0.11546419525146484, 0.11538022613525391, 0.11521539306640625, 0.1153545913696289, 0.11576934051513672, 0.11584614562988281, 0.11546214294433593, 0.11551232147216797, 0.11547647857666016, 0.11558092498779297, 0.1154796142578125, 0.11671033477783203, 0.11560243225097656, 0.11540480041503906, 0.11559120178222657, 0.11558499145507813, 0.11560243225097656, 0.11611443328857422, 0.11554815673828125, 0.11593113708496093, 0.11560857391357422, 0.11550003051757812, 0.1153433609008789, 0.11553488159179688, 0.11636220550537109, 0.11614924621582032, 0.11589631652832032, 0.11544371032714844, 0.11543961334228516, 0.11590144348144531, 0.11554303741455078, 0.11543961334228516, 0.11548467254638672, 0.1157918701171875, 0.11545804595947265, 0.11548159790039063, 0.24169778442382814, 0.11536077117919921, 0.11515801239013672, 0.1152573471069336, 0.11527884674072265, 0.11527986907958984, 0.115346435546875, 0.11558502197265624, 0.1152471694946289, 0.11550918579101563, 0.11519078063964844, 0.11538329315185547, 0.11518669128417969, 0.11536281585693359, 0.11518163299560547, 0.11539347076416015, 0.11518678283691407, 0.11522755432128906, 0.11527577972412109, 0.11537920379638672, 0.11528710174560547, 0.11573241424560547, 0.11531775665283203, 0.11599462127685548, 0.11536895751953125, 0.11559219360351562, 0.1152174072265625, 0.11544473266601563, 0.11515187072753906, 0.11589734649658204, 0.11575091552734375, 0.11540377807617187, 0.11524508666992188, 0.11525116729736327, 0.1152573471069336, 0.11555532836914062, 0.1153259506225586, 0.1153617935180664, 0.1152542724609375, 0.11547135925292969, 0.11532083129882813, 0.11537305450439453, 0.11539968109130859, 0.11537612915039062, 0.11517235565185546, 0.11529216003417969, 0.11535257720947266, 0.11539046478271485, 0.11532390594482422, 0.11542940521240234, 0.11545801544189453, 0.115557373046875, 0.11530239868164062, 0.11545193481445312, 0.11529827117919922, 0.11565363311767578, 0.11541299438476563, 0.11608678436279297, 0.11551641845703126, 0.11544064331054688, 0.11531980895996094, 0.11546214294433593, 0.11526348876953126, 0.2421370849609375, 0.11553485107421875, 0.11565363311767578, 0.11698073577880859, 0.11526451110839844, 0.11523788452148437, 0.11507615661621094, 0.11531769561767578, 0.11525939178466797, 0.11527884674072265, 0.11663155364990234, 0.11554815673828125, 0.11534745788574219, 0.11536281585693359, 0.11533106994628907, 0.11543449401855468, 0.11581439971923828, 0.11606221008300781, 0.11575091552734375, 0.11566182708740234, 0.1154867172241211, 0.1154119644165039, 0.11538432312011719, 0.11553075408935547, 0.11544985961914063, 0.1154119644165039, 0.11534540557861328, 0.11543142700195312, 0.11537612915039062, 0.11558502197265624, 0.11556352233886719, 0.1155072021484375, 0.11545394897460938, 0.11533824157714843, 0.11553485107421875, 0.11541094207763672, 0.1153597412109375, 0.11537407684326172, 0.11529216003417969, 0.11547955322265625, 0.11540787506103516, 0.11534950256347656, 0.11540275573730469, 0.11537305450439453, 0.11569664001464844, 0.11560038757324219, 0.11557785797119141, 0.11525635528564453, 0.115210205078125, 0.11607859039306641, 0.11538022613525391, 0.11546419525146484, 0.11593727874755859, 0.11551436614990235, 0.11531263732910156, 0.11544576263427735, 0.11548569488525391, 0.11550924682617188, 0.11583999633789062, 0.11551641845703126, 0.11535871887207032, 0.11538739013671875, 0.11543142700195312, 0.24210124206542968, 0.11537920379638672, 0.11549593353271484, 0.11537612915039062, 0.11512525177001953, 0.11541709136962891, 0.1153280029296875, 0.11576729583740235, 0.115378173828125, 0.11518669128417969, 0.11519593811035156, 0.115463134765625, 0.11528498840332031, 0.11549491119384765, 0.11540172576904296, 0.11594445037841797, 0.11559526062011719, 0.116279296875, 0.11579698944091797, 0.11580210876464844, 0.11537203216552734, 0.11540991973876953, 0.11529318237304688, 0.11539456176757812, 0.1153597412109375, 0.11614412689208985, 0.11539968109130859, 0.11535155487060547, 0.11532492828369141, 0.11529523468017579, 0.11584108734130859, 0.1154241943359375, 0.11634585571289062, 0.11548159790039063, 0.11571302032470702, 0.1153219223022461, 0.1154579849243164, 0.11549388885498046, 0.1154303970336914, 0.11558297729492187, 0.11536383819580077, 0.11582975769042969, 0.11551436614990235, 0.11530035400390624, 0.11534233856201172, 0.11510886383056641, 0.11527168273925781, 0.11539968109130859, 0.11526860809326171, 0.1152573471069336, 0.11550822448730469, 0.11615952301025391, 0.1159669418334961, 0.11542835235595703, 0.11572223663330078, 0.11554918670654298, 0.11545906829833984, 0.11568742370605468, 0.1153812484741211, 0.11539762878417968, 0.11535155487060547, 0.1154303970336914, 0.11551129913330078, 0.24231832885742188, 0.11538432312011719, 0.1153259506225586, 0.11540480041503906, 0.11546419525146484, 0.11534745788574219, 0.11531878662109375, 0.11547138977050782, 0.115283935546875, 0.11556147003173828, 0.11574169921875, 0.11635916900634766, 0.11543142700195312, 0.11534540557861328, 0.11524813079833984, 0.1152911376953125, 0.11551436614990235, 0.11536589050292968, 0.11532083129882813, 0.11543247985839844, 0.11533615875244141, 0.11537407684326172, 0.11525631713867188, 0.11534438323974609, 0.11512217712402344, 0.11535769653320313, 0.11603763580322266, 0.11543449401855468, 0.1153986587524414, 0.11541506958007812, 0.11541094207763672, 0.11551331329345703, 0.11517440032958984, 0.11537715148925781, 0.11538534545898438, 0.11531263732910156, 0.11526758575439452, 0.11542835235595703, 0.11535052490234375, 0.11555430603027343, 0.11530035400390624, 0.11539046478271485, 0.11545398712158203, 0.1154815673828125, 0.11535667419433594, 0.11539250946044922, 0.1153812484741211, 0.1153986587524414, 0.1155051498413086, 0.11556665802001953, 0.11569554901123047, 0.11560447692871094, 0.11522866821289063, 0.11546419525146484, 0.11527577972412109, 0.11578880310058594, 0.11543142700195312, 0.1154119644165039, 0.115346435546875, 0.11526553344726563, 0.11522560119628907, 0.11540480041503906, 0.11531673431396484]",tokens/s,8.5212956819612,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2265.919488,3330.801664,0.0,2684.35456,2447.59552,s,10,2.4030111236572265,0.24030111236572266,0.0012440053970201484,0.24010930633544922,0.24119976043701172,0.24230707931518555,0.24319293441772463,"[0.2434143981933594, 0.2409536895751953, 0.23907139587402343, 0.23946762084960938, 0.2396122589111328, 0.23890797424316407, 0.23970700073242188, 0.2405301055908203, 0.24051161193847656, 0.24083506774902344]",tokens/s,1065.330066430923,kWh,2.8237744456245787e-06,1.5472998550649208e-06,1.2583984934904673e-05,1.6955059235594172e-05,tokens/kWh,15098738.166751605,MB,2265.919488,3330.801664,0.0,2684.35456,2597.681664,s,10,139.56764257812497,13.9567642578125,0.0037662168729373563,13.95505322265625,13.962588671875,13.96323818359375,13.963757792968751,"[13.95483984375, 13.95481640625, 13.9549306640625, 13.95517578125, 13.9599765625, 13.9638876953125, 13.9624443359375, 13.953060546875, 13.952154296875, 13.9563564453125]",tokens/s,4.513940254076789,kWh,0.00016469605360239274,9.026671937368518e-05,0.0007221266028330957,0.0009770893758091737,tokens/kWh,64477.21320050864,,s,629,141.50236668396,0.22496401698562793,0.028578568615475336,0.22146662902832032,0.22197882690429688,0.222200830078125,0.46145660278320316,"[0.222501953125, 0.22133958435058593, 0.22126080322265626, 0.22115533447265626, 0.22122291564941407, 0.22115737915039063, 0.22111334228515625, 0.2211881561279297, 0.22123001098632813, 0.22108671569824218, 0.22202674865722657, 0.2213621826171875, 0.22130892944335936, 0.221412353515625, 0.22178816223144532, 0.22125669860839844, 0.22154550170898438, 0.22125155639648436, 0.22184141540527344, 0.22194586181640624, 0.22131817626953126, 0.22113481140136718, 0.22139187622070314, 0.22203596496582031, 0.22120550537109376, 0.22182707214355468, 0.22148197937011718, 0.22121574401855468, 0.22116249084472656, 0.22113999938964843, 0.22153315734863283, 0.22137344360351563, 0.22128536987304687, 0.22147686767578126, 0.22139497375488282, 0.2215167694091797, 0.22115020751953124, 0.22119526672363282, 0.2216222686767578, 0.22131507873535156, 0.2215731201171875, 0.22197042846679688, 0.2214871063232422, 0.22137344360351563, 0.22149530029296874, 0.2211829833984375, 0.22195916748046876, 0.22144102478027344, 0.22115122985839844, 0.22134988403320313, 0.2216417236328125, 0.22153318786621093, 0.2213396453857422, 0.22140824890136718, 0.2213949432373047, 0.22160794067382814, 0.22140109252929688, 0.22149017333984375, 0.22158233642578126, 0.2214686737060547, 0.2215290832519531, 0.2215116729736328, 0.4632596435546875, 0.22171238708496094, 0.221233154296875, 0.22143283081054688, 0.2215679931640625, 0.22207183837890626, 0.22199087524414063, 0.2218137664794922, 0.22159461975097655, 0.2211604766845703, 0.22112969970703125, 0.22134375, 0.22146156311035156, 0.2211706237792969, 0.2218250274658203, 0.2215004119873047, 0.2212454376220703, 0.22122291564941407, 0.22121881103515625, 0.22129049682617188, 0.22144000244140624, 0.22122496032714845, 0.22121574401855468, 0.22197247314453125, 0.2214799346923828, 0.22136012268066407, 0.22134066772460936, 0.22136422729492186, 0.22131097412109374, 0.22127923583984374, 0.22128128051757812, 0.22142156982421876, 0.22127308654785155, 0.22122598266601562, 0.22121676635742188, 0.22201344299316406, 0.2224384002685547, 0.22226841735839845, 0.22168576049804686, 0.22131507873535156, 0.2211666259765625, 0.22145021057128905, 0.2213509063720703, 0.2210908203125, 0.2211420135498047, 0.2215116729736328, 0.22274458312988282, 0.22148300170898438, 0.2212454376220703, 0.22115327453613282, 0.22183013916015626, 0.22141746520996095, 0.22183628845214845, 0.2215557098388672, 0.22127104187011717, 0.2220482635498047, 0.2213693389892578, 0.22153421020507813, 0.22129356384277343, 0.22213938903808594, 0.22136114501953125, 0.22106112670898437, 0.22137957763671876, 0.4608409729003906, 0.22114405822753908, 0.2210918426513672, 0.22119833374023437, 0.22196018981933593, 0.2215413818359375, 0.22137139892578125, 0.2214246368408203, 0.22122802734375, 0.22138983154296876, 0.2213478698730469, 0.2211327667236328, 0.22113591003417968, 0.2211645050048828, 0.22161613464355467, 0.22142771911621092, 0.22118502807617188, 0.22131715393066406, 0.22171746826171876, 0.22127410888671875, 0.22118911743164063, 0.22119740295410156, 0.22118185424804687, 0.22219775390625, 0.22114816284179686, 0.22126797485351563, 0.22138265991210937, 0.22202163696289062, 0.221623291015625, 0.22125567626953124, 0.2214297637939453, 0.22170930480957032, 0.22169293212890626, 0.22206874084472655, 0.22159257507324218, 0.2214799346923828, 0.22199090576171876, 0.22157720947265624, 0.22174208068847656, 0.22161613464355467, 0.2212843475341797, 0.22151271057128907, 0.22158233642578126, 0.22143283081054688, 0.22131404113769532, 0.22166323852539063, 0.22163046264648437, 0.22240870666503906, 0.22161407470703126, 0.22181785583496094, 0.22200831604003907, 0.22142874145507813, 0.22144102478027344, 0.22163661193847656, 0.22149530029296874, 0.22142668151855469, 0.22134169006347656, 0.2213683166503906, 0.2215076141357422, 0.2218208923339844, 0.221412353515625, 0.2214072265625, 0.22158233642578126, 0.46169601440429686, 0.2212136993408203, 0.22105702209472655, 0.2211266632080078, 0.2211031036376953, 0.22123622131347656, 0.22111846923828124, 0.22110105895996093, 0.22219468688964844, 0.22128536987304687, 0.2216058807373047, 0.22124237060546875, 0.22226022338867188, 0.22136323547363282, 0.2218741455078125, 0.22145741271972658, 0.22111538696289063, 0.22111231994628905, 0.22129766845703125, 0.22105702209472655, 0.22113792419433595, 0.22142874145507813, 0.2212351989746094, 0.22122496032714845, 0.22117068481445312, 0.22113279724121093, 0.22115122985839844, 0.22112973022460938, 0.2212024383544922, 0.2211102752685547, 0.2212956085205078, 0.22164582824707033, 0.2216058807373047, 0.22137548828125, 0.22140524291992186, 0.22150752258300782, 0.22146560668945312, 0.22119526672363282, 0.22161715698242188, 0.22124032592773438, 0.22137651062011718, 0.221770751953125, 0.22136524963378906, 0.22113690185546875, 0.22289407348632811, 0.22195814514160156, 0.22175027465820313, 0.22173286437988282, 0.2216048583984375, 0.221559814453125, 0.22163661193847656, 0.22160383605957032, 0.22145741271972658, 0.22292991638183593, 0.22169804382324218, 0.22141644287109374, 0.22166732788085938, 0.22217625427246093, 0.2222335968017578, 0.22209843444824218, 0.22188134765625, 0.2213939208984375, 0.22153727722167968, 0.46185061645507813, 0.2211788787841797, 0.22111436462402342, 0.22129049682617188, 0.22150553894042968, 0.22128128051757812, 0.2212024383544922, 0.22144000244140624, 0.22146969604492187, 0.22136114501953125, 0.22112562561035157, 0.2214481964111328, 0.2222335968017578, 0.22209536743164063, 0.22137548828125, 0.2215034942626953, 0.22125567626953124, 0.22210150146484375, 0.22135398864746095, 0.22115122985839844, 0.22146560668945312, 0.22135501098632812, 0.2211973114013672, 0.2217144317626953, 0.22152809143066407, 0.22151266479492188, 0.2218260498046875, 0.22138777160644532, 0.22115327453613282, 0.22133042907714845, 0.2219438018798828, 0.22181068420410155, 0.22151679992675782, 0.22163967895507813, 0.22198374938964843, 0.222202880859375, 0.2216089630126953, 0.22158848571777343, 0.22197964477539062, 0.2219304962158203, 0.2216058807373047, 0.22188236999511718, 0.2217144317626953, 0.2216990966796875, 0.22185162353515625, 0.22161509704589843, 0.22144825744628907, 0.2215761260986328, 0.22152294921875, 0.22147789001464843, 0.22122189331054687, 0.22149017333984375, 0.22142054748535156, 0.2215905303955078, 0.2212833251953125, 0.22145535278320314, 0.2214256591796875, 0.22370611572265625, 0.22159666442871093, 0.22157209777832032, 0.22164889526367187, 0.22127622985839843, 0.2215331268310547, 0.4607232055664062, 0.2214246368408203, 0.22150758361816406, 0.22157212829589842, 0.22139695739746093, 0.22143283081054688, 0.22193766784667968, 0.22149017333984375, 0.22146456909179688, 0.22140824890136718, 0.22149533081054687, 0.22149014282226562, 0.22156288146972655, 0.22154342651367187, 0.22170623779296875, 0.2220062713623047, 0.22157107543945312, 0.22163865661621093, 0.22165196228027345, 0.22187519836425781, 0.22198477172851563, 0.22197862243652344, 0.22181272888183592, 0.22163456726074218, 0.22144921875, 0.2217902069091797, 0.22170623779296875, 0.22131404113769532, 0.22122291564941407, 0.22189158630371095, 0.22152499389648436, 0.2215362548828125, 0.22123417663574219, 0.2213365783691406, 0.2216407012939453, 0.22166015625, 0.22149221801757812, 0.22126182556152343, 0.22392626953125, 0.2219438018798828, 0.22167347717285157, 0.22165606689453124, 0.22170930480957032, 0.22157626342773437, 0.2213846435546875, 0.22152499389648436, 0.2214307861328125, 0.2218014678955078, 0.22188134765625, 0.22133042907714845, 0.22165298461914062, 0.22161407470703126, 0.22176870727539064, 0.22166835021972656, 0.22149017333984375, 0.22150656127929688, 0.22176768493652343, 0.22180863952636717, 0.22148101806640624, 0.221297607421875, 0.22151373291015625, 0.222308349609375, 0.22171034240722656, 0.46220184326171876, 0.22253363037109375, 0.22171034240722656, 0.22214041137695312, 0.22114816284179686, 0.2212351989746094, 0.22153932189941405, 0.22129664611816408, 0.2211778564453125, 0.22154853820800782, 0.22134176635742187, 0.22164781188964844, 0.22147378540039062, 0.22127615356445313, 0.2212843475341797, 0.22165913391113282, 0.2215885467529297, 0.22137234497070313, 0.22157516479492187, 0.22207180786132813, 0.22170623779296875, 0.22145126342773438, 0.22118502807617188, 0.22266777038574218, 0.22152601623535156, 0.2213744659423828, 0.22128947448730468, 0.22164991760253908, 0.2218076171875, 0.22161920166015625, 0.22146969604492187, 0.22127513122558592, 0.22119424438476562, 0.22168269348144531, 0.22124441528320313, 0.22097100830078126, 0.221085693359375, 0.22113381958007813, 0.22170008850097656, 0.22122700500488282, 0.22127923583984374, 0.22145330810546876, 0.22255923461914062, 0.22140007019042968, 0.22144613647460937, 0.22161509704589843, 0.222023681640625, 0.22214041137695312, 0.22231552124023438, 0.2217574462890625, 0.22175949096679687, 0.22195610046386718, 0.22176666259765626, 0.221739013671875, 0.22195001220703126, 0.22145529174804687, 0.2223953857421875, 0.22163661193847656, 0.22160281372070312, 0.22152294921875, 0.22183322143554687, 0.2217840576171875, 0.2217410583496094, 0.46331597900390625, 0.22162431335449218, 0.22123930358886718, 0.2212843475341797, 0.2211409912109375, 0.22146662902832032, 0.22136729431152344, 0.22115020751953124, 0.22153216552734376, 0.22155059814453126, 0.22213427734375, 0.2212034606933594, 0.221412353515625, 0.22128640747070313, 0.22130995178222657, 0.22144102478027344, 0.22194688415527344, 0.22158131408691406, 0.22149632263183594, 0.22149427795410156, 0.2216222686767578, 0.22129664611816408, 0.22110617065429689, 0.22211993408203126, 0.2216058807373047, 0.22146456909179688, 0.22136012268066407, 0.22197760009765624, 0.2215905303955078, 0.22179327392578124, 0.2214993896484375, 0.22128230285644532, 0.2213017578125, 0.22127410888671875, 0.22146456909179688, 0.22121778869628905, 0.22168576049804686, 0.22120037841796875, 0.22118406677246094, 0.22135084533691407, 0.22131301879882812, 0.22129869079589845, 0.22116659545898437, 0.2210508728027344, 0.2215045166015625, 0.22192332458496095, 0.2215854034423828, 0.22149325561523436, 0.22160076904296874, 0.2215669708251953, 0.22166323852539063, 0.22128536987304687, 0.22132333374023438, 0.22139077758789064, 0.22124549865722656, 0.22128941345214845, 0.221665283203125, 0.22161613464355467, 0.22151577758789062, 0.22136524963378906, 0.2216407012939453, 0.22126693725585939, 0.22150860595703126, 0.4624425048828125, 0.22111744689941407, 0.22104473876953126, 0.2211829833984375, 0.22187826538085936, 0.22165196228027345, 0.2212290496826172, 0.22156083679199218, 0.22200729370117187, 0.22158950805664063, 0.22172671508789063, 0.221380615234375, 0.221306884765625, 0.22107449340820312, 0.22128428649902343, 0.22132121276855468, 0.221306884765625, 0.22197042846679688, 0.22143589782714843, 0.2213027801513672, 0.22142771911621092, 0.22136524963378906, 0.22115225219726561, 0.22165811157226561, 0.22134988403320313, 0.22114303588867187, 0.22162124633789063, 0.22177484130859376, 0.22148812866210937, 0.2213570556640625, 0.22117170715332032, 0.22172569274902343, 0.221444091796875, 0.22128640747070313, 0.22114816284179686, 0.22123423767089845, 0.22163040161132813, 0.22124134826660155, 0.22113900756835939, 0.22141433715820313, 0.22133351135253906, 0.22119833374023437, 0.22116659545898437, 0.2211727294921875, 0.2214256591796875, 0.22142771911621092, 0.22107034301757814, 0.2211973114013672, 0.22134579467773438, 0.22134988403320313, 0.2222950439453125, 0.22165298461914062, 0.22142361450195314, 0.22213938903808594, 0.22171749877929686, 0.2223206329345703, 0.2216816711425781, 0.22124032592773438, 0.22122700500488282, 0.22170623779296875, 0.22179840087890626, 0.22177587890625, 0.2214297637939453, 0.4629862365722656, 0.22168269348144531, 0.22135910034179687, 0.22179840087890626, 0.22143487548828125, 0.2214686737060547, 0.22133042907714845, 0.22189260864257812, 0.22169088745117188, 0.22153421020507813, 0.22115327453613282, 0.22121165466308593, 0.22176153564453124, 0.221380615234375, 0.22127308654785155, 0.22127206420898438, 0.2213959655761719, 0.22139085388183594, 0.22160592651367186, 0.2212945556640625, 0.22128128051757812, 0.2216611785888672, 0.22154751586914062, 0.22151373291015625, 0.2215004119873047, 0.22142361450195314, 0.22282957458496094, 0.22140313720703125, 0.22110617065429689, 0.22118502807617188, 0.22146456909179688, 0.2219325408935547, 0.22170828247070312, 0.22170008850097656, 0.22203187561035156, 0.22157005310058595, 0.22241897583007814, 0.22136521911621093, 0.22132940673828125, 0.22154444885253907, 0.22126284790039064, 0.22154444885253907, 0.22126080322265626, 0.22116966247558595, 0.22158335876464844, 0.22131199645996094, 0.2215188446044922, 0.22126797485351563, 0.22128128051757812, 0.22140518188476563, 0.22145126342773438, 0.221559814453125, 0.22204620361328126, 0.22181068420410155, 0.22146456909179688, 0.22160794067382814, 0.22161715698242188, 0.22150553894042968, 0.22128640747070313, 0.22156185913085938, 0.22152806091308594, 0.22130995178222657, 0.2214297637939453]",tokens/s,4.445155333725598,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3951.190016,12732.33408,0.0,12085.886976,11337.496064,s,10,10.983638916015623,1.0983638916015628,0.002145812483257245,1.0982380981445312,1.1009110717773436,1.1018160705566407,1.1025400695800782,"[1.1027210693359375, 1.1007099609375, 1.0960345458984375, 1.0957255859375, 1.0965640869140625, 1.096655029296875, 1.0976143798828124, 1.09886181640625, 1.0996507568359375, 1.0991016845703125]",tokens/s,233.07394021003134,kWh,1.2942184060811996e-05,7.0910633564744784e-06,6.339727294000231e-05,8.343052035728877e-05,tokens/kWh,3068421.4709879244,MB,3951.190016,12732.33408,0.0,12085.886976,11686.800384,s,10,637.80165234375,63.780165234375,0.009100497531913991,63.777451171875,63.784855859375,63.7953341796875,63.8037168359375,"[63.775953125, 63.7747265625, 63.78252734375, 63.77445703125, 63.77937109375, 63.7818203125, 63.8058125, 63.7783125, 63.77208203125, 63.77658984375]",tokens/s,0.9877679019565393,kWh,0.0007529117455250688,0.00041266306092793453,0.003710265523765599,0.004875840330218602,tokens/kWh,12920.849686063342,,s,629,646.6700090332033,1.028092224218129,0.13044395391192767,1.0122936401367189,1.0129029296874998,1.0132424438476564,2.1090928125,"[1.0118082275390625, 1.0120591430664063, 1.0121963500976563, 1.0122721557617187, 1.0119987182617187, 1.0119086303710938, 1.0127564697265625, 1.0119772338867188, 1.013127197265625, 1.0117877807617188, 1.0121246948242189, 1.0118850708007812, 1.0127083740234375, 1.0122250366210936, 1.0122311401367188, 1.01222705078125, 1.0121298217773438, 1.01207958984375, 1.0122537231445312, 1.0121769409179688, 1.0120580444335938, 1.0120304565429687, 1.0126674194335938, 1.0122168579101563, 1.0119823608398437, 1.0118645629882812, 1.0125404052734375, 1.0122362670898437, 1.01228955078125, 1.012083740234375, 1.0118092651367188, 1.0122362670898437, 1.0126837768554688, 1.0127236938476563, 1.0127257690429687, 1.0131476440429688, 1.0123878784179687, 1.0122587280273438, 1.0124882202148437, 1.0123243408203124, 1.0121123657226563, 1.0124574584960937, 1.012696044921875, 1.0123038940429687, 1.012063232421875, 1.01218408203125, 1.0126653442382811, 1.012890625, 1.0124677124023438, 1.0129090576171875, 1.0120939331054688, 1.01304931640625, 1.0123591918945312, 1.0120345458984374, 1.0119608154296875, 1.0121820068359375, 1.0124882202148437, 1.0120724487304686, 1.01193115234375, 1.0123868408203125, 1.0123858642578125, 1.0128014526367188, 2.113512451171875, 1.0119444580078125, 1.0120970458984375, 1.0116249389648437, 1.0117980346679687, 1.0118410034179688, 1.0118922119140625, 1.0117406616210938, 1.012063232421875, 1.0121236572265624, 1.0118225708007813, 1.0117457885742187, 1.012200439453125, 1.0118594360351563, 1.0123018188476562, 1.0119075927734376, 1.0119393310546876, 1.0118450927734375, 1.0121697387695312, 1.0119772338867188, 1.0117940063476563, 1.0117733764648438, 1.0121502685546875, 1.0117877807617188, 1.0121226196289062, 1.0117345581054686, 1.011778564453125, 1.0116874389648438, 1.0122762451171874, 1.0118328247070312, 1.0120519409179687, 1.0118645629882812, 1.0123099975585939, 1.0122506103515625, 1.012801513671875, 1.0120089721679688, 1.0126571655273438, 1.0122014770507812, 1.0129817504882812, 1.0126458740234374, 1.012490234375, 1.0121615600585938, 1.0127646484375, 1.0119393310546876, 1.0128711547851563, 1.011999755859375, 1.0165934448242187, 1.0120037841796874, 1.0129735717773438, 1.0124287719726563, 1.0122291259765626, 1.0124595336914062, 1.01273291015625, 1.0122619018554688, 1.0127390747070313, 1.0128394165039063, 1.0125383911132813, 1.01269091796875, 1.013369873046875, 1.0124830932617188, 1.0126510009765626, 1.0123509521484375, 1.0127933349609375, 2.1089033203125, 1.0117857055664063, 1.0120253295898438, 1.012158447265625, 1.0124718017578125, 1.0120816650390625, 1.0120447998046875, 1.0125680541992188, 1.0123571166992187, 1.0120724487304686, 1.0127575073242188, 1.01252001953125, 1.0122987060546875, 1.0124451904296874, 1.0127728881835938, 1.011794921875, 1.011989501953125, 1.0122485961914063, 1.0122066040039062, 1.0119823608398437, 1.012210693359375, 1.0119772338867188, 1.01199462890625, 1.0123724975585937, 1.0123673706054688, 1.0120867919921874, 1.0119280395507813, 1.0121994018554688, 1.0121431274414063, 1.0121226196289062, 1.0124666748046875, 1.0119382934570313, 1.012031494140625, 1.0129346313476562, 1.012621337890625, 1.0127984619140624, 1.0124257202148437, 1.012552734375, 1.0124932861328124, 1.0126151733398439, 1.0124769287109374, 1.0124185791015625, 1.0123202514648437, 1.0127390747070313, 1.0145853271484375, 1.0121594848632813, 1.0121994018554688, 1.016111083984375, 1.01210009765625, 1.012337646484375, 1.0126919555664062, 1.0125444946289062, 1.0131015625, 1.0132469482421875, 1.0124186401367188, 1.0119146728515624, 1.0125578002929687, 1.0123294677734376, 1.0121595458984376, 1.0121768798828126, 1.0121963500976563, 1.0120345458984374, 1.0120714111328124, 2.108083251953125, 1.0117611694335937, 1.0117723999023438, 1.0123253784179687, 1.0121656494140625, 1.011751953125, 1.011820556640625, 1.0120601806640626, 1.0119454956054688, 1.01188916015625, 1.0119761962890625, 1.0117877807617188, 1.0130258178710938, 1.0123018188476562, 1.0121328735351562, 1.01246875, 1.0121953125, 1.0118369140625, 1.0122199096679687, 1.0124267578125, 1.0122546997070312, 1.0124503173828125, 1.0120703735351562, 1.0122537231445312, 1.0119536743164061, 1.0123397216796874, 1.0123960571289063, 1.0120325317382812, 1.0122772216796876, 1.012115478515625, 1.012005859375, 1.011937255859375, 1.0124103393554686, 1.0128527221679688, 1.012220947265625, 1.0126182250976563, 1.0130596313476563, 1.012464599609375, 1.0132828369140625, 1.0123468627929688, 1.0119547119140626, 1.0120048828125, 1.0124175415039063, 1.012010986328125, 1.0151219482421876, 1.0123069458007812, 1.0124124145507813, 1.0122076416015624, 1.0128138427734374, 1.0126878662109375, 1.0123939819335936, 1.0121318359375, 1.01235302734375, 1.011895263671875, 1.0121062622070311, 1.0122885131835937, 1.0123724975585937, 1.012242431640625, 1.012421630859375, 1.0123786010742188, 1.0121113891601563, 1.0120017700195312, 1.0122393798828124, 2.10916650390625, 1.0120038452148437, 1.0127247314453125, 1.0125916137695312, 1.0125660400390626, 1.0119761962890625, 1.0121748657226564, 1.0119721069335939, 1.0121370239257812, 1.0121942138671876, 1.0123048706054687, 1.0119403686523438, 1.0117099609375, 1.01187890625, 1.0121195678710937, 1.0117509155273436, 1.0118615112304687, 1.013433349609375, 1.012052978515625, 1.0117959594726562, 1.0120274047851563, 1.0121123657226563, 1.012421630859375, 1.012052978515625, 1.0121298217773438, 1.0119987182617187, 1.0118973388671875, 1.0125066528320312, 1.0120836791992187, 1.0121307983398438, 1.01264697265625, 1.0120714111328124, 1.0120857543945312, 1.0123540649414062, 1.0124544067382812, 1.0121564331054687, 1.0122383422851562, 1.0122454833984376, 1.0119935913085938, 1.0119239501953126, 1.0125363159179688, 1.0117755126953125, 1.011968017578125, 1.0119495239257812, 1.0124840698242188, 1.0122025146484375, 1.0122854614257812, 1.01258447265625, 1.0125885620117188, 1.0125301513671876, 1.012516845703125, 1.012400146484375, 1.0123970336914063, 1.0127892456054688, 1.01247998046875, 1.01256396484375, 1.0124237060546875, 1.0165504150390625, 1.0126458740234374, 1.0126848754882813, 1.0138418579101562, 1.0126428833007812, 1.0132418212890626, 2.10956298828125, 1.0119423828125, 1.0119608154296875, 1.0120847778320312, 1.011984375, 1.0120007934570312, 1.0124031982421875, 1.01182568359375, 1.0117908325195313, 1.0122680053710937, 1.012263916015625, 1.0119035034179686, 1.0124677734375, 1.0120057983398438, 1.0118276977539062, 1.011904541015625, 1.0125209350585938, 1.012252685546875, 1.0120653076171875, 1.0124205932617187, 1.0122495727539063, 1.0123724975585937, 1.0126407470703125, 1.012590576171875, 1.0119659423828125, 1.0125209350585938, 1.0127493286132812, 1.0124779663085937, 1.0127708129882813, 1.0121123657226563, 1.0121646118164063, 1.0116229248046875, 1.0122229614257812, 1.0120335083007812, 1.0120325317382812, 1.0120078735351563, 1.0126233520507812, 1.0124185791015625, 1.01650634765625, 1.0135787353515624, 1.012727783203125, 1.0122936401367189, 1.0128947143554687, 1.0121277465820313, 1.012600830078125, 1.0126612548828124, 1.0123960571289063, 1.012105224609375, 1.0127769775390625, 1.01275341796875, 1.012274169921875, 1.012279296875, 1.0123386840820312, 1.0121942749023438, 1.0125885620117188, 1.012947998046875, 1.0124779663085937, 1.0123930053710937, 1.0128373413085938, 1.0122721557617187, 1.01233251953125, 1.0123386840820312, 1.0127564697265625, 2.110841796875, 1.0128209838867188, 1.0126233520507812, 1.012595703125, 1.0131865844726562, 1.01278515625, 1.0128342895507811, 1.0131220703125, 1.0128527221679688, 1.012337646484375, 1.0124810180664063, 1.0129080200195313, 1.012747314453125, 1.0124318237304688, 1.0134036254882812, 1.0129408569335938, 1.0123612060546876, 1.0120929565429688, 1.0134886474609375, 1.0132265014648438, 1.0129080200195313, 1.0132152099609375, 1.0127984619140624, 1.0145208129882812, 1.013728271484375, 1.0132162475585937, 1.0127083740234375, 1.0127789916992187, 1.012833251953125, 1.0120120239257813, 1.0122465209960938, 1.0125762329101562, 1.0127605590820312, 1.0129448852539062, 1.01275341796875, 1.0117826538085937, 1.0119331665039062, 1.0120591430664063, 1.0131445922851563, 1.0131220703125, 1.0128814086914062, 1.0128875732421876, 1.0127513427734376, 1.013012451171875, 1.0131896362304686, 1.0130308837890625, 1.0130473022460937, 1.0133401489257812, 1.0132930297851563, 1.0126981201171874, 1.0128568115234375, 1.012906982421875, 1.0124503173828125, 1.0124984130859376, 1.0132520751953125, 1.0128076782226563, 1.0122936401367189, 1.0126827392578126, 1.0122035522460937, 1.0120509033203124, 1.0123540649414062, 1.0125946655273437, 1.0123171997070313, 2.112021484375, 1.0123406982421874, 1.0120253295898438, 1.0121513061523437, 1.0124144897460938, 1.012274169921875, 1.012401123046875, 1.0119710693359374, 1.012263916015625, 1.0128281860351562, 1.0122537231445312, 1.0122373046875, 1.0121615600585938, 1.0122373046875, 1.0120325317382812, 1.0120929565429688, 1.01382861328125, 1.0119669799804687, 1.0119239501953126, 1.0123489379882813, 1.0119403686523438, 1.0117539672851563, 1.0119976806640625, 1.0121533203125, 1.0122137451171875, 1.0119065551757813, 1.0129019165039062, 1.0124779052734374, 1.0123939819335936, 1.0126571655273438, 1.0125834350585938, 1.0119721069335939, 1.0120038452148437, 1.012474853515625, 1.0120499267578125, 1.0122383422851562, 1.0124830932617188, 1.0124758911132812, 1.0127083740234375, 1.012632568359375, 1.0122034912109374, 1.0120222778320314, 1.012178955078125, 1.0122977294921875, 1.0122034912109374, 1.01212158203125, 1.0132428588867188, 1.0123274536132814, 1.0125435180664062, 1.0123755493164062, 1.0122383422851562, 1.01226904296875, 1.0125250854492187, 1.0119833374023437, 1.0120601806640626, 1.0121134033203125, 1.012974609375, 1.0124656372070313, 1.01243798828125, 1.012580322265625, 1.0127124633789062, 1.0127247314453125, 1.0126704711914063, 2.11262353515625, 1.0118778686523437, 1.0124564208984375, 1.0118143920898437, 1.0125772705078124, 1.0121441040039063, 1.0118225708007813, 1.0119639892578125, 1.0122833251953125, 1.011894287109375, 1.0119280395507813, 1.0119198608398436, 1.0127401123046875, 1.01235205078125, 1.012570068359375, 1.011726318359375, 1.0117744750976563, 1.013375, 1.0122465209960938, 1.01218505859375, 1.0120325317382812, 1.011989501953125, 1.0127339477539063, 1.0121031494140624, 1.0123878173828125, 1.0119874267578124, 1.011962890625, 1.0119680786132812, 1.0123140258789063, 1.0120407104492188, 1.012220947265625, 1.0121410522460939, 1.0122772216796876, 1.011979248046875, 1.0125343017578126, 1.01197412109375, 1.01228955078125, 1.0118973388671875, 1.0122465209960938, 1.0119423828125, 1.012041748046875, 1.0123131103515626, 1.0124677124023438, 1.0119342041015624, 1.012621337890625, 1.012105224609375, 1.0122291259765626, 1.0119669799804687, 1.0125332641601563, 1.0122034912109374, 1.0122045288085937, 1.0123519897460938, 1.012875244140625, 1.0126397705078125, 1.0126233520507812, 1.0123182373046875, 1.0127708129882813, 1.0124646606445313, 1.0125987548828126, 1.0120775756835938, 1.012442138671875, 1.012552734375, 1.012252685546875, 2.11210546875, 1.011620849609375, 1.0120519409179687, 1.0119721069335939, 1.0122485961914063, 1.0120714111328124, 1.0120621948242188, 1.0127001342773438, 1.0125178833007813, 1.012548583984375, 1.01243701171875, 1.0118604736328125, 1.0117130126953124, 1.0120304565429687, 1.0122587890625, 1.01167822265625, 1.0118010864257811, 1.0119813232421875, 1.011894287109375, 1.0122905883789062, 1.01184716796875, 1.0117959594726562, 1.0116761474609375, 1.0118410034179688, 1.0117744750976563, 1.0119515991210937, 1.0144215087890625, 1.0122987670898438, 1.012021240234375, 1.0126315307617189, 1.0124862060546875, 1.0122158203125, 1.0121727905273437, 1.0125875244140625, 1.0124472045898438, 1.0122403564453124, 1.0122117309570313, 1.0127349853515626, 1.0121431274414063, 1.012495361328125, 1.012738037109375, 1.0124758911132812, 1.0128773193359375, 1.0123079833984374, 1.0123099975585939, 1.0121298217773438, 1.0122557373046874, 1.0126878662109375, 1.0124810180664063, 1.013000244140625, 1.012516845703125, 1.0124298095703126, 1.012358154296875, 1.0124656372070313, 1.0123171997070313, 1.01239501953125, 1.01268994140625, 1.0127656860351562, 1.01236328125, 1.0124308471679688, 1.012527099609375, 1.0123038940429687, 1.0129132080078125]",tokens/s,0.972675384993313,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3905.671168,12732.33408,0.0,12085.886976,11337.501696,s,10,10.970613281250001,1.097061328125,0.0017840508518984434,1.097439453125,1.0989015869140624,1.0993332397460938,1.0996785620117187,"[1.0981041259765625, 1.099764892578125, 1.0942474365234376, 1.09466943359375, 1.09612890625, 1.0954654541015625, 1.0967747802734376, 1.0988056640625, 1.098486328125, 1.098166259765625]",tokens/s,233.35067369253872,kWh,1.2926500191291174e-05,7.083296092205274e-06,6.255157781898113e-05,8.256137410247758e-05,tokens/kWh,3100723.5863376665,MB,3909.746688,12732.33408,0.0,12085.886976,11686.806016,s,10,636.987296875,63.6987296875,0.008792231338471245,63.69969140625,63.70640390625,63.70996171875,63.71280796875,"[63.682359375, 63.687671875, 63.70561328125, 63.70497265625, 63.69943359375, 63.69994921875, 63.70396484375, 63.71351953125, 63.6979921875, 63.6918203125]",tokens/s,0.9890307123716925,kWh,0.0007521740069488685,0.00041225789324771534,0.003629864737222821,0.0047942966374194046,tokens/kWh,13140.613684244328,,s,629,645.8482131958002,1.0267857125529425,0.13035148276579694,1.0110596313476563,1.01170830078125,1.0120460571289063,2.107026708984375,"[1.0111242065429686, 1.0104995727539063, 1.0103193359375, 1.0103952026367187, 1.0104954223632812, 1.0104678955078126, 1.0100704956054687, 1.0106204223632813, 1.0101810913085938, 1.0110658569335937, 1.0104442749023437, 1.0103756713867187, 1.0101524658203125, 1.0103828735351563, 1.0101749877929687, 1.0103490600585938, 1.0100950927734376, 1.0109429931640626, 1.0103602905273437, 1.0109215698242187, 1.0110667724609375, 1.0112665405273438, 1.0110863647460937, 1.01104638671875, 1.0108724365234374, 1.0105343017578126, 1.0103214111328125, 1.010872314453125, 1.0106912841796876, 1.0113380737304687, 1.01102490234375, 1.011188720703125, 1.0115625, 1.0118072509765625, 1.011420166015625, 1.0109522094726562, 1.010282470703125, 1.0109522094726562, 1.0106019897460938, 1.0113290405273438, 1.0105897216796875, 1.0110146484375, 1.0105538330078125, 1.0109419555664063, 1.0105497436523438, 1.0111878051757812, 1.0105476684570311, 1.0111273193359376, 1.0112327880859375, 1.0111181640625, 1.0113422241210936, 1.0108223266601561, 1.0107902221679688, 1.0107567749023438, 1.0107431030273437, 1.0107125854492187, 1.0108497924804687, 1.0110812377929688, 1.0113054809570312, 1.011472412109375, 1.010951171875, 1.011072998046875, 2.110793701171875, 1.0105712890625, 1.0109173583984374, 1.0111078491210939, 1.0105538940429688, 1.0114539184570313, 1.0104873046875, 1.0103736572265625, 1.01060302734375, 1.0104473876953124, 1.0103705444335938, 1.0103173217773438, 1.0103797607421876, 1.0106593017578125, 1.0105569458007813, 1.0109276123046875, 1.0109112548828125, 1.0105016479492188, 1.0110904541015624, 1.0104453125, 1.0106286010742187, 1.0105743408203125, 1.0106060791015625, 1.0110525512695312, 1.0107782592773438, 1.0118408813476563, 1.0113218383789062, 1.0111395874023437, 1.0107064208984375, 1.010524169921875, 1.0105466918945312, 1.0102415161132812, 1.010682861328125, 1.0110791625976563, 1.0108632202148438, 1.01125830078125, 1.0108221435546876, 1.0107473754882812, 1.0111160278320312, 1.0105282592773437, 1.0108026733398439, 1.0104678344726563, 1.0108098754882813, 1.0106849365234376, 1.0112696533203125, 1.0114283447265624, 1.01136181640625, 1.01125634765625, 1.0113484497070313, 1.0112225341796874, 1.0113013916015625, 1.0110914306640626, 1.0109788208007813, 1.011293212890625, 1.0114918212890625, 1.0115665893554688, 1.0111375122070312, 1.0111918334960937, 1.0110986328125, 1.0106838989257811, 1.011178466796875, 1.01096142578125, 1.0111539306640625, 2.106599365234375, 1.0114857177734375, 1.0112235717773437, 1.011198974609375, 1.01125732421875, 1.010946044921875, 1.0106123657226562, 1.010948974609375, 1.0108016357421874, 1.0105702514648438, 1.011140625, 1.0107391967773438, 1.0105272216796874, 1.010386962890625, 1.0104515991210938, 1.010242431640625, 1.0104063720703125, 1.010171875, 1.0105518188476563, 1.0103695068359375, 1.01096142578125, 1.0108364868164061, 1.0114160766601563, 1.0112081909179687, 1.0110771484375, 1.01035107421875, 1.0108743896484376, 1.01146728515625, 1.0117959594726562, 1.0120355834960937, 1.012463623046875, 1.0119403686523438, 1.01194140625, 1.0119802856445312, 1.011323974609375, 1.0110104370117188, 1.0110330810546875, 1.0113382568359375, 1.0116249389648437, 1.0108795166015625, 1.01194140625, 1.0112030639648437, 1.0113484497070313, 1.011093505859375, 1.0115000610351563, 1.0112604370117189, 1.0109788208007813, 1.0104954833984374, 1.011493896484375, 1.01127783203125, 1.0117058715820313, 1.0108528442382811, 1.0114898071289062, 1.0111221923828124, 1.0114129638671876, 1.011178466796875, 1.0114631958007811, 1.0113863525390625, 1.0115481567382814, 1.01125732421875, 1.0124105834960937, 1.0113810424804688, 1.0117324829101562, 2.10709814453125, 1.0106634521484374, 1.0110371704101562, 1.0109081420898438, 1.0120171508789062, 1.0121226196289062, 1.011262451171875, 1.0106112060546875, 1.0108251953125, 1.0108651733398437, 1.0108108520507812, 1.0108057861328126, 1.0110238647460938, 1.0108446655273438, 1.01113037109375, 1.0110238647460938, 1.0114150390625, 1.01075146484375, 1.0109603881835938, 1.0105835571289064, 1.010745361328125, 1.0105211181640625, 1.0108262329101563, 1.01054052734375, 1.0108231811523438, 1.0112245483398437, 1.0115286865234374, 1.0111610717773438, 1.0113402709960937, 1.0108292846679687, 1.0113024291992188, 1.0110156860351562, 1.011251220703125, 1.0109337768554687, 1.0112122802734376, 1.0112041015625, 1.0106275634765625, 1.0104483642578126, 1.0109224853515626, 1.0123694458007813, 1.0121512451171875, 1.0115277099609374, 1.011831787109375, 1.0113003540039063, 1.0119905395507813, 1.0121932983398438, 1.01144677734375, 1.0107801513671875, 1.011119140625, 1.0112194213867187, 1.0110924682617188, 1.0110105590820313, 1.0112767944335936, 1.0110873413085937, 1.0113557739257812, 1.0116627807617187, 1.0114488525390626, 1.01161474609375, 1.0121482543945313, 1.0110945434570313, 1.0111918334960937, 1.0114703369140625, 1.011420166015625, 2.107243408203125, 1.011304443359375, 1.0116792602539062, 1.0110904541015624, 1.0111897583007812, 1.0108170166015624, 1.0107166748046874, 1.010555908203125, 1.0108446655273438, 1.0105784301757812, 1.0112839965820313, 1.0110791625976563, 1.011198974609375, 1.0106951904296875, 1.0111498413085938, 1.0108477172851562, 1.0110279541015625, 1.0106736450195313, 1.01078125, 1.0110596313476563, 1.0108917846679688, 1.0110576782226564, 1.0108016357421874, 1.01058251953125, 1.0113638305664063, 1.01103515625, 1.0112214965820312, 1.0109030151367187, 1.0114221801757812, 1.01172021484375, 1.011646484375, 1.0109634399414062, 1.0110392456054687, 1.01072998046875, 1.0113239135742187, 1.0107545776367188, 1.0113812255859376, 1.0107801513671875, 1.0113116455078126, 1.011267578125, 1.0112327880859375, 1.0109522094726562, 1.0110904541015624, 1.0106736450195313, 1.0117355346679688, 1.0115389404296875, 1.0110474243164063, 1.0108436279296875, 1.01097265625, 1.0110955810546876, 1.011800048828125, 1.0115277099609374, 1.0110167236328125, 1.0107422485351563, 1.0111610717773438, 1.0109552612304689, 1.01119384765625, 1.0112645263671876, 1.0109450073242188, 1.0111027221679687, 1.0116802368164062, 1.0114508666992188, 1.0115563354492187, 2.106843017578125, 1.01136181640625, 1.0113515625, 1.0108784790039063, 1.0107105102539062, 1.0108948364257813, 1.0118154296875, 1.0109542236328124, 1.0106787719726562, 1.0108887329101564, 1.0112225341796874, 1.0112604370117189, 1.0115020751953125, 1.010966552734375, 1.0110965576171875, 1.0111488037109375, 1.0121779174804688, 1.01123583984375, 1.0113885498046875, 1.011718017578125, 1.0113597412109374, 1.0111610717773438, 1.0113341674804688, 1.0110709838867187, 1.01110986328125, 1.0114406127929687, 1.0113106079101561, 1.0103971557617188, 1.0105989379882812, 1.0106224365234375, 1.010735107421875, 1.0104473876953124, 1.010703369140625, 1.0104063720703125, 1.01077099609375, 1.0111979370117188, 1.01096240234375, 1.0108262329101563, 1.011472412109375, 1.011409912109375, 1.0115133666992187, 1.0109255981445313, 1.0116690063476563, 1.0109921264648438, 1.0113484497070313, 1.011146728515625, 1.0115604248046874, 1.0110126342773438, 1.0111477661132813, 1.0111273193359376, 1.01097265625, 1.0108907470703126, 1.0107822265625, 1.0106972045898437, 1.0111293334960938, 1.011146728515625, 1.0112236938476562, 1.0110370483398436, 1.0111190795898437, 1.0111826171875, 1.01117236328125, 1.0109869995117187, 1.0112276611328126, 2.108168212890625, 1.0113248901367187, 1.0116497192382812, 1.0111066284179688, 1.0111590576171876, 1.010862060546875, 1.0108671875, 1.0107658081054687, 1.0111181030273437, 1.010798583984375, 1.0113054809570312, 1.0110842895507812, 1.01054052734375, 1.0102753295898437, 1.0105231323242188, 1.0106941528320312, 1.0107473754882812, 1.0110699462890624, 1.01083544921875, 1.0106234741210938, 1.0105692138671876, 1.0113546142578125, 1.0107955322265625, 1.01106689453125, 1.011330078125, 1.0107811889648437, 1.010820068359375, 1.0105374755859375, 1.0116658935546874, 1.011794921875, 1.0121381225585937, 1.0115756225585937, 1.0113126220703126, 1.0110075073242188, 1.0110658569335937, 1.01071875, 1.0106624145507812, 1.0105538330078125, 1.0108600463867188, 1.0104791259765624, 1.0111047973632812, 1.010713623046875, 1.0108712768554688, 1.0103910522460937, 1.0111826171875, 1.0108528442382811, 1.0117273559570312, 1.0112481079101563, 1.0113095703125, 1.0112542724609375, 1.0117642211914062, 1.0118338623046874, 1.0117969970703125, 1.0115205078125, 1.0120662841796875, 1.0122158203125, 1.011726318359375, 1.0116546630859375, 1.011726318359375, 1.0113873901367187, 1.0115338134765626, 1.0122117309570313, 1.0120550537109374, 2.11171728515625, 1.0110648193359375, 1.0112655639648438, 1.011040283203125, 1.0112337646484375, 1.011103759765625, 1.0117447509765625, 1.011694580078125, 1.0115451049804687, 1.0108006591796874, 1.0108999633789062, 1.0106654663085937, 1.0113208618164062, 1.0110341186523437, 1.0105753784179687, 1.0110699462890624, 1.0127493286132812, 1.0114774780273437, 1.0117990112304687, 1.0114754638671875, 1.01243701171875, 1.0126602172851562, 1.0127821044921874, 1.012316162109375, 1.0116639404296874, 1.0120989990234375, 1.0121307983398438, 1.0107197265625, 1.0107975463867187, 1.0107320556640624, 1.0111702880859375, 1.0104524536132813, 1.0106388549804688, 1.01058251953125, 1.0108671875, 1.0108385009765626, 1.0115369262695313, 1.0105947875976562, 1.010924560546875, 1.0109214477539064, 1.0113648681640626, 1.0106675415039061, 1.0112532348632812, 1.0106183471679688, 1.0113659057617188, 1.0111826171875, 1.0120530395507812, 1.0109584350585938, 1.0110103759765625, 1.0113085327148437, 1.0110975952148438, 1.0110146484375, 1.01137109375, 1.0108394775390626, 1.0119804077148438, 1.01148046875, 1.0113474731445313, 1.0114826049804688, 1.0118276977539062, 1.0114006958007813, 1.011209228515625, 1.0110105590820313, 1.0114006958007813, 2.110498779296875, 1.0119219360351563, 1.01136181640625, 1.0108313598632812, 1.0110064697265626, 1.0115112915039062, 1.0112849731445313, 1.011282958984375, 1.0111334228515625, 1.01097265625, 1.011146728515625, 1.0113351440429688, 1.0111365356445312, 1.010808837890625, 1.010713623046875, 1.0107698974609376, 1.0108856201171874, 1.0107218017578126, 1.01076171875, 1.0107422485351563, 1.0111477661132813, 1.011072998046875, 1.01098291015625, 1.0104760131835937, 1.0106972045898437, 1.01064501953125, 1.0107012939453126, 1.010724853515625, 1.0107740478515626, 1.0106009521484376, 1.010820068359375, 1.0108784790039063, 1.0107863159179689, 1.0108590698242188, 1.01104736328125, 1.0110914306640626, 1.0109132690429687, 1.0108549194335938, 1.0113065795898437, 1.0108507690429687, 1.0111047973632812, 1.0121011352539062, 1.0114979858398438, 1.0112440185546876, 1.0114396362304687, 1.0111139526367188, 1.0110658569335937, 1.01098291015625, 1.011177490234375, 1.0114918212890625, 1.0113802490234376, 1.0121666259765625, 1.0110453491210938, 1.0111324462890625, 1.0114744262695312, 1.0106736450195313, 1.0109685668945312, 1.0110310668945313, 1.0110238647460938, 1.0108549194335938, 1.0109911499023438, 1.0115542602539063, 1.010951171875, 2.10921875, 1.0104708862304688, 1.0106941528320312, 1.0109368286132812, 1.0107924194335938, 1.0110361328125, 1.0113505249023438, 1.0111631469726563, 1.010882568359375, 1.011051513671875, 1.0112184448242187, 1.0114017333984375, 1.0117243041992188, 1.0111027221679687, 1.0108477172851562, 1.0110914306640626, 1.0114437255859374, 1.01085595703125, 1.0109983520507813, 1.0107094116210937, 1.0110496215820313, 1.010912109375, 1.0108661499023437, 1.010777099609375, 1.0107576904296875, 1.0106285400390624, 1.0111324462890625, 1.011009521484375, 1.01085693359375, 1.0107545776367188, 1.0111273193359376, 1.010572265625, 1.0108016357421874, 1.0107443237304687, 1.0111344604492187, 1.010861083984375, 1.0115419921875, 1.0107218017578126, 1.0108344116210937, 1.0103900146484375, 1.0109389038085939, 1.0106685180664063, 1.0109644775390625, 1.0104258422851562, 1.0109030151367187, 1.0110167236328125, 1.0119915771484376, 1.011103759765625, 1.0113802490234376, 1.0111365356445312, 1.0113638305664063, 1.0108446655273438, 1.0109859619140624, 1.01060400390625, 1.0110167236328125, 1.0108129272460937, 1.0115399780273437, 1.0108231811523438, 1.0112481079101563, 1.0108765869140626, 1.0110595703125, 1.0108630981445312, 1.0109900512695313]",tokens/s,0.9739130451218064,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,5984.595968,19933.954048,0.0,19287.506944,18376.399872,s,10,24.56320629882812,2.456320629882813,0.001691664801696018,2.4561865234375,2.4583423828125,2.4586625244140623,2.4589186376953127,"[2.456571533203125, 2.458982666015625, 2.4545791015625, 2.455032470703125, 2.454942626953125, 2.453741943359375, 2.455801513671875, 2.458271240234375, 2.45821728515625, 2.45706591796875]",tokens/s,104.22092168489151,kWh,2.8975297907988232e-05,1.5879395596475657e-05,0.00013865674981416998,0.00018351144331863386,tokens/kWh,1395008.3731590684,MB,5988.143104,19933.954048,0.0,19287.506944,18871.04,s,10,1458.421484375,145.84214843750001,0.012211079453744932,145.84282812499998,145.852815625,145.8578921875,145.8619534375,"[145.82440625, 145.837078125, 145.840359375, 145.8516875, 145.845296875, 145.8391875, 145.86296875, 145.8513125, 145.82034375, 145.84884375]",tokens/s,0.43197388872119064,kWh,0.001721854172928466,0.000943728472916573,0.008154870135002046,0.010820452780847084,tokens/kWh,5822.307187691273,,s,629,1478.2121892089838,2.350098869966589,0.29170661243792756,2.3148583984375,2.3159804199218748,2.3165888183593752,4.76929474609375,"[2.31657470703125, 2.316729248046875, 2.31575048828125, 2.31400439453125, 2.31361328125, 2.314355712890625, 2.31430859375, 2.313990234375, 2.314439697265625, 2.31400439453125, 2.314337158203125, 2.31450830078125, 2.3143525390625, 2.313754638671875, 2.3145595703125, 2.314746826171875, 2.313860107421875, 2.314616943359375, 2.31397998046875, 2.314455078125, 2.314322021484375, 2.315072509765625, 2.314198974609375, 2.314560546875, 2.31431689453125, 2.314648681640625, 2.314249267578125, 2.3142431640625, 2.31486767578125, 2.314310546875, 2.31423681640625, 2.314349609375, 2.31522509765625, 2.314227783203125, 2.314047607421875, 2.314390380859375, 2.31465380859375, 2.31459130859375, 2.3147294921875, 2.31495263671875, 2.31514306640625, 2.314708984375, 2.314705810546875, 2.314702880859375, 2.31510009765625, 2.314967041015625, 2.314390625, 2.3144326171875, 2.31436279296875, 2.3152353515625, 2.31499169921875, 2.31524560546875, 2.314324951171875, 2.3151728515625, 2.31494970703125, 2.314418212890625, 2.315260009765625, 2.3155927734375, 2.314987548828125, 2.3148759765625, 2.314437744140625, 2.314975341796875, 4.77530908203125, 2.315552734375, 2.313754638671875, 2.31431787109375, 2.314470458984375, 2.31491064453125, 2.313483154296875, 2.3140751953125, 2.314690673828125, 2.313935791015625, 2.31429541015625, 2.313943115234375, 2.3145830078125, 2.314388427734375, 2.3144130859375, 2.314663818359375, 2.314819580078125, 2.31438134765625, 2.31452978515625, 2.313989013671875, 2.314265625, 2.31451025390625, 2.31416015625, 2.313996337890625, 2.314390625, 2.314997802734375, 2.31469970703125, 2.31465380859375, 2.314850341796875, 2.315021240234375, 2.31556396484375, 2.314933349609375, 2.315074462890625, 2.314526611328125, 2.31516162109375, 2.314416015625, 2.31450634765625, 2.3146474609375, 2.31455322265625, 2.314646484375, 2.315080810546875, 2.314736572265625, 2.315146240234375, 2.316416015625, 2.316883056640625, 2.317212646484375, 2.317498291015625, 2.316708984375, 2.314590087890625, 2.31486669921875, 2.314609619140625, 2.314794921875, 2.3152783203125, 2.314463134765625, 2.31488720703125, 2.3152783203125, 2.3158486328125, 2.315106201171875, 2.31514208984375, 2.315134033203125, 2.314939453125, 2.31448974609375, 2.31490966796875, 4.76900048828125, 2.314322998046875, 2.31459423828125, 2.314704833984375, 2.3145400390625, 2.314397705078125, 2.315212890625, 2.315070556640625, 2.3151708984375, 2.314818603515625, 2.31474072265625, 2.314556396484375, 2.314756103515625, 2.314300537109375, 2.314501220703125, 2.314758056640625, 2.31469580078125, 2.314715087890625, 2.314291259765625, 2.313954345703125, 2.3144990234375, 2.3141162109375, 2.314789794921875, 2.3138662109375, 2.314201171875, 2.31396044921875, 2.314891357421875, 2.313994140625, 2.3142041015625, 2.31410595703125, 2.31444384765625, 2.314310546875, 2.3143935546875, 2.3148564453125, 2.314733642578125, 2.314330078125, 2.31438134765625, 2.3142685546875, 2.3148154296875, 2.315052978515625, 2.31476318359375, 2.31545751953125, 2.315313232421875, 2.315279296875, 2.315707275390625, 2.314758056640625, 2.3152158203125, 2.31581689453125, 2.315241455078125, 2.3146220703125, 2.315763671875, 2.3165615234375, 2.31669873046875, 2.3161025390625, 2.31678759765625, 2.31653173828125, 2.316707763671875, 2.3152138671875, 2.31585693359375, 2.314685546875, 2.315296875, 2.3153837890625, 2.315890625, 4.7694091796875, 2.314258544921875, 2.3142666015625, 2.31421435546875, 2.314051513671875, 2.314406982421875, 2.314616943359375, 2.314188720703125, 2.31503662109375, 2.31619287109375, 2.31446533203125, 2.313934814453125, 2.315040771484375, 2.314817626953125, 2.315828125, 2.3156357421875, 2.316775390625, 2.314610595703125, 2.31668017578125, 2.31729052734375, 2.31545556640625, 2.315461669921875, 2.315554931640625, 2.31465576171875, 2.314517578125, 2.3154697265625, 2.31606689453125, 2.314260498046875, 2.314863525390625, 2.314272705078125, 2.314649658203125, 2.3157421875, 2.315529296875, 2.3159091796875, 2.314768310546875, 2.3160771484375, 2.315504638671875, 2.314666015625, 2.314771484375, 2.315051025390625, 2.3154248046875, 2.314206298828125, 2.314478515625, 2.31497021484375, 2.315460693359375, 2.314807373046875, 2.31501708984375, 2.315440185546875, 2.315284423828125, 2.31450634765625, 2.314502197265625, 2.314387451171875, 2.314957763671875, 2.3149384765625, 2.314912841796875, 2.314670166015625, 2.3156572265625, 2.316516357421875, 2.3157412109375, 2.315861083984375, 2.315828125, 2.315040771484375, 2.3151728515625, 4.7702314453125, 2.31434033203125, 2.314682373046875, 2.313740234375, 2.314062744140625, 2.31486767578125, 2.314884033203125, 2.314498046875, 2.314965087890625, 2.314472412109375, 2.314133544921875, 2.31417041015625, 2.313879638671875, 2.313923583984375, 2.314146728515625, 2.314799072265625, 2.3140986328125, 2.314390625, 2.315798583984375, 2.31642822265625, 2.315537353515625, 2.316564453125, 2.314660888671875, 2.313965576171875, 2.314789794921875, 2.31402294921875, 2.314662841796875, 2.314347412109375, 2.315102294921875, 2.3144150390625, 2.315137939453125, 2.31604833984375, 2.315811767578125, 2.3149404296875, 2.31493115234375, 2.315124755859375, 2.3152158203125, 2.314859619140625, 2.315197509765625, 2.314984375, 2.315462646484375, 2.31446533203125, 2.3143095703125, 2.31438037109375, 2.31520556640625, 2.31512158203125, 2.314987548828125, 2.314483642578125, 2.31514208984375, 2.314712158203125, 2.31526513671875, 2.314966064453125, 2.314924072265625, 2.314987548828125, 2.31532958984375, 2.31476318359375, 2.3151298828125, 2.315618408203125, 2.316012451171875, 2.317046875, 2.316642333984375, 2.317076416015625, 2.317365234375, 4.7678056640625, 2.314080322265625, 2.31505908203125, 2.315978759765625, 2.315252685546875, 2.31602392578125, 2.3155966796875, 2.31449609375, 2.31440087890625, 2.31406396484375, 2.31448583984375, 2.314968017578125, 2.314263671875, 2.31400146484375, 2.314314697265625, 2.314521484375, 2.3153408203125, 2.31357861328125, 2.3147294921875, 2.31446728515625, 2.315052978515625, 2.314033203125, 2.314588134765625, 2.314469482421875, 2.31505517578125, 2.3144765625, 2.3147109375, 2.31465478515625, 2.315134033203125, 2.31459521484375, 2.31478466796875, 2.314600341796875, 2.314883056640625, 2.315136962890625, 2.315252685546875, 2.31488818359375, 2.315322265625, 2.31524755859375, 2.31511767578125, 2.3150263671875, 2.3153603515625, 2.315388916015625, 2.31535009765625, 2.31446826171875, 2.31495068359375, 2.31469775390625, 2.315263916015625, 2.314312744140625, 2.3146865234375, 2.314904541015625, 2.31448974609375, 2.314483642578125, 2.314174560546875, 2.314915771484375, 2.314462158203125, 2.3165869140625, 2.317093994140625, 2.31678759765625, 2.315781005859375, 2.315177978515625, 2.315336669921875, 2.31464453125, 2.31521484375, 4.770552734375, 2.314354736328125, 2.31482568359375, 2.313882568359375, 2.31376171875, 2.313788330078125, 2.315693115234375, 2.31585986328125, 2.31478271484375, 2.315926513671875, 2.3154453125, 2.315216796875, 2.314701904296875, 2.315554931640625, 2.31552197265625, 2.315801513671875, 2.316396484375, 2.314292236328125, 2.314648681640625, 2.31490771484375, 2.315061279296875, 2.3164384765625, 2.316370849609375, 2.316866455078125, 2.31659521484375, 2.31632080078125, 2.315591796875, 2.31587646484375, 2.3159716796875, 2.315903076171875, 2.315663330078125, 2.31596240234375, 2.316310546875, 2.314661865234375, 2.315462646484375, 2.31440380859375, 2.31526611328125, 2.315337646484375, 2.315576416015625, 2.31480419921875, 2.314577880859375, 2.316158935546875, 2.316590087890625, 2.31474072265625, 2.314672119140625, 2.315419677734375, 2.315107421875, 2.315031494140625, 2.314947509765625, 2.31512158203125, 2.31507861328125, 2.315275146484375, 2.31488916015625, 2.314337158203125, 2.315274169921875, 2.315357177734375, 2.314990478515625, 2.314895263671875, 2.315843505859375, 2.315281494140625, 2.315041748046875, 2.314649658203125, 2.3151708984375, 4.7742177734375, 2.314451904296875, 2.314017822265625, 2.316307373046875, 2.31636376953125, 2.315851806640625, 2.3162041015625, 2.315658203125, 2.315419677734375, 2.315569091796875, 2.31410693359375, 2.3144345703125, 2.3141181640625, 2.3143701171875, 2.314511474609375, 2.314544189453125, 2.31514208984375, 2.314577880859375, 2.314501220703125, 2.314337158203125, 2.315443115234375, 2.314441650390625, 2.314269775390625, 2.3141171875, 2.31440380859375, 2.315146240234375, 2.31492919921875, 2.314220458984375, 2.31446533203125, 2.3145595703125, 2.314896484375, 2.3150439453125, 2.314417236328125, 2.31532861328125, 2.315260009765625, 2.315807861328125, 2.31515234375, 2.315829345703125, 2.315790283203125, 2.3159296875, 2.31573193359375, 2.315969482421875, 2.316851318359375, 2.3155537109375, 2.314724365234375, 2.3145625, 2.315716552734375, 2.31531005859375, 2.315375732421875, 2.315345947265625, 2.31587744140625, 2.31564599609375, 2.315336669921875, 2.3145, 2.31510107421875, 2.31535400390625, 2.314984375, 2.31453076171875, 2.3153857421875, 2.314976318359375, 2.315102294921875, 2.31464453125, 2.31491064453125, 4.77313525390625, 2.31535205078125, 2.314526611328125, 2.3150869140625, 2.315890625, 2.314620849609375, 2.3140322265625, 2.313732177734375, 2.31478466796875, 2.31486669921875, 2.3156357421875, 2.31585986328125, 2.314743896484375, 2.314322021484375, 2.3139072265625, 2.31398291015625, 2.314142822265625, 2.31423388671875, 2.31518408203125, 2.31429833984375, 2.314175537109375, 2.3156572265625, 2.3145810546875, 2.313807861328125, 2.314330078125, 2.31499169921875, 2.314958740234375, 2.3144365234375, 2.3145185546875, 2.31440185546875, 2.315052978515625, 2.314460205078125, 2.3145419921875, 2.3140576171875, 2.314851318359375, 2.314546142578125, 2.314408935546875, 2.31389892578125, 2.31480419921875, 2.31436279296875, 2.3144580078125, 2.314177490234375, 2.314638427734375, 2.3145419921875, 2.31480322265625, 2.314745849609375, 2.314291259765625, 2.31537255859375, 2.31572265625, 2.31389599609375, 2.313974853515625, 2.314586181640625, 2.314895263671875, 2.314090576171875, 2.314375244140625, 2.314859619140625, 2.3148583984375, 2.3144326171875, 2.314412109375, 2.314270751953125, 2.31518115234375, 2.314324951171875, 2.314473388671875, 4.7714609375, 2.31414794921875, 2.313611328125, 2.314270751953125, 2.314892333984375, 2.31461572265625, 2.315198486328125, 2.3143720703125, 2.314142822265625, 2.31568701171875, 2.315873291015625, 2.31440478515625, 2.314210205078125, 2.31602587890625, 2.316198974609375, 2.31553857421875, 2.315966552734375, 2.31545947265625, 2.31545849609375, 2.31590185546875, 2.31619287109375, 2.31564599609375, 2.316030029296875, 2.315187255859375, 2.314642333984375, 2.313966552734375, 2.3152138671875, 2.31450830078125, 2.315548583984375, 2.314388427734375, 2.315707275390625, 2.3161650390625, 2.31663623046875, 2.316718994140625, 2.315875244140625, 2.315216796875, 2.31624609375, 2.315987060546875, 2.315421630859375, 2.315747314453125, 2.314558349609375, 2.313786376953125, 2.313956298828125, 2.3144541015625, 2.3148984375, 2.314367919921875, 2.31458203125, 2.315187255859375, 2.31473046875, 2.314349609375, 2.314651611328125, 2.31442626953125, 2.315454345703125, 2.31490771484375, 2.314850341796875, 2.315431884765625, 2.31543798828125, 2.314609619140625, 2.31511767578125, 2.314015625, 2.31457177734375, 2.314642333984375, 2.31451953125]",tokens/s,0.4255140125292758,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1562.181632,1957.167104,0.0,1310.72,1163.955712,s,10,1.3245970916748049,0.13245970916748048,0.001266998397567483,0.13200260162353517,0.1341699493408203,0.13443305816650392,0.1346435452270508,"[0.1346961669921875, 0.1341114807128906, 0.1312022705078125, 0.13115029907226564, 0.13112367248535156, 0.1315875244140625, 0.13163551330566406, 0.13236968994140624, 0.13365353393554688, 0.13306694030761718]",tokens/s,1932.6631593031557,kWh,1.5487117942793545e-06,8.486256414471426e-07,6.508118482104666e-06,8.905455917831163e-06,tokens/kWh,28746422.683134936,MB,1562.181632,1959.264256,0.0,1312.817152,1232.77568,s,10,76.92137255859376,7.692137255859374,0.017487914910414803,7.693824951171875,7.702772509765625,7.711682641601563,7.718810747070313,"[7.69504345703125, 7.69082470703125, 7.6926064453125, 7.6866689453125, 7.7205927734375, 7.69639892578125, 7.6467568359375, 7.70079248046875, 7.69208056640625, 7.699607421875]",tokens/s,8.190181467707257,kWh,9.084797721340027e-05,4.979003748630931e-05,0.0003722497123017,0.0005128877270014096,tokens/kWh,122833.9004489902,,s,629,77.98684770965575,0.12398544945891218,0.01575529620495015,0.121744384765625,0.12330782623291016,0.12368689422607422,0.2534359008789062,"[0.12507750701904297, 0.12424604797363281, 0.1234441909790039, 0.12341862487792969, 0.12180786895751954, 0.1224120330810547, 0.12155903625488282, 0.12088832092285157, 0.12079001617431641, 0.12080332946777343, 0.1209518051147461, 0.12134912109375, 0.12099378967285156, 0.12220416259765625, 0.12147408294677735, 0.12154876708984375, 0.12117708587646485, 0.12111360168457032, 0.12086067199707032, 0.12105318450927735, 0.12099174499511718, 0.12216934204101562, 0.12297727966308594, 0.12166143798828125, 0.12153446197509765, 0.12155801391601563, 0.12161023712158203, 0.12131839752197265, 0.12115455627441406, 0.12147609710693359, 0.12320972442626953, 0.12328857421875, 0.12294041442871094, 0.1213460464477539, 0.12115360260009765, 0.12151289367675781, 0.12153343963623046, 0.12244172668457032, 0.12324454498291015, 0.12321791839599609, 0.1221396484375, 0.123504638671875, 0.12190930938720704, 0.12093536376953125, 0.12353228759765625, 0.1231247329711914, 0.12405248260498047, 0.12286566162109375, 0.12216831970214843, 0.1221580810546875, 0.12220928192138672, 0.12256153869628907, 0.12320460510253907, 0.12468940734863282, 0.12308889770507812, 0.1220997085571289, 0.12128972625732422, 0.12146380615234376, 0.12163587188720704, 0.12112380981445313, 0.12155289459228516, 0.12149759674072266, 0.2558975982666016, 0.12277452850341797, 0.12251545715332031, 0.12256665802001954, 0.12305101013183593, 0.12229837036132812, 0.1212968978881836, 0.1239582748413086, 0.12347596740722656, 0.12176076507568359, 0.12155289459228516, 0.12141977691650391, 0.1213685760498047, 0.1208463363647461, 0.12073062133789063, 0.1210040283203125, 0.12351385498046875, 0.12159487915039062, 0.12144640350341797, 0.12170649719238281, 0.12249292755126953, 0.12232396697998046, 0.12123442840576172, 0.12141567993164062, 0.12308889770507812, 0.12184268951416016, 0.1215098876953125, 0.12254208374023437, 0.12142387390136719, 0.12173619079589844, 0.12151910400390625, 0.12153446197509765, 0.12306739044189453, 0.12236492919921875, 0.12201881408691406, 0.12173824310302735, 0.12155699157714844, 0.12209356689453126, 0.12139315032958985, 0.12139520263671875, 0.12346572875976562, 0.12301107025146485, 0.12293836975097656, 0.12192870330810547, 0.12156313323974609, 0.12141056060791015, 0.12382415771484374, 0.12120060729980468, 0.12172902679443359, 0.12317593383789062, 0.12274995422363282, 0.12246323394775391, 0.12149657440185548, 0.12139110565185547, 0.12291788482666016, 0.12123340606689453, 0.12196150207519531, 0.12387631988525391, 0.12276838684082031, 0.12148223876953125, 0.12265267181396484, 0.1214044189453125, 0.12125389099121094, 0.25443635559082034, 0.12313497924804688, 0.12153446197509765, 0.12169830322265625, 0.12147097778320312, 0.12127334594726563, 0.12249088287353516, 0.1221396484375, 0.12196044921875, 0.12192460632324219, 0.12144742584228516, 0.1228226547241211, 0.12275917053222657, 0.12112697601318359, 0.12176787567138672, 0.1217228775024414, 0.12139424133300782, 0.12269356536865235, 0.1223720932006836, 0.12153343963623046, 0.1215478057861328, 0.12162351989746094, 0.12261682891845703, 0.12271308898925781, 0.1215098876953125, 0.12233932495117188, 0.12170342254638672, 0.121238525390625, 0.12113817596435547, 0.12111769866943359, 0.12136653137207032, 0.12165529632568359, 0.1218897933959961, 0.121744384765625, 0.12428594970703125, 0.12338790130615235, 0.12270489501953125, 0.1215129623413086, 0.12190310668945313, 0.12194918060302734, 0.12152114868164063, 0.12212838745117187, 0.12310630035400391, 0.12293427276611328, 0.12179046630859375, 0.12117298889160157, 0.12185906982421875, 0.12301107025146485, 0.1216522216796875, 0.120953857421875, 0.12247654724121093, 0.12268236541748047, 0.12308684539794922, 0.12277657318115234, 0.12169420623779297, 0.12158668518066407, 0.12113510131835938, 0.1211361312866211, 0.1227540512084961, 0.12328550720214844, 0.12320972442626953, 0.12324147033691406, 0.12298242950439453, 0.25302217102050784, 0.1215467529296875, 0.12238642883300781, 0.12331622314453125, 0.12284416198730469, 0.12259225463867188, 0.1225902099609375, 0.12293222045898437, 0.12212531280517579, 0.12088832092285157, 0.12144127655029296, 0.12321894073486328, 0.12279398345947265, 0.12161740875244141, 0.12199628448486328, 0.12130611419677734, 0.12155699157714844, 0.12218367767333985, 0.12152627563476562, 0.12282061004638672, 0.12332236480712891, 0.12172697448730468, 0.1214730224609375, 0.12131743621826171, 0.12206380462646485, 0.12272434997558594, 0.12205158233642578, 0.12277043151855468, 0.12318822479248047, 0.12324352264404297, 0.12150886535644531, 0.12119551849365234, 0.12129280090332031, 0.12111567687988281, 0.1214658203125, 0.12191129302978515, 0.12111974334716796, 0.1216358413696289, 0.12116377258300781, 0.12117708587646485, 0.12161539459228515, 0.1231072998046875, 0.12339097595214844, 0.12407295989990234, 0.12407295989990234, 0.12135116577148437, 0.12155903625488282, 0.12125183868408203, 0.12111154937744141, 0.1215272979736328, 0.12139315032958985, 0.12157440185546875, 0.12285030364990235, 0.12157644653320313, 0.1217976303100586, 0.12137881469726562, 0.12175360107421875, 0.12123750305175782, 0.12148838043212891, 0.12119859313964844, 0.12210585784912109, 0.12211199951171875, 0.12212940979003906, 0.2530303955078125, 0.12125593566894531, 0.12254003143310546, 0.1212252197265625, 0.12286156463623046, 0.12248985290527344, 0.12266598510742187, 0.12236185455322265, 0.12159385681152343, 0.1212590103149414, 0.12165631866455077, 0.12115660858154297, 0.12235878753662109, 0.12386713409423829, 0.1217802276611328, 0.12304691314697265, 0.12239974212646484, 0.12245503997802734, 0.12413849639892578, 0.12299366760253906, 0.12299263763427734, 0.12301107025146485, 0.12142489624023438, 0.12230963134765625, 0.12332236480712891, 0.12175052642822265, 0.12143718719482421, 0.12149247741699219, 0.1212938232421875, 0.12226150512695312, 0.12151193237304687, 0.12162457275390624, 0.12326092529296875, 0.12302438354492187, 0.12452352142333985, 0.12339405059814453, 0.12212735748291016, 0.12276131439208984, 0.12299766540527343, 0.1216911392211914, 0.12096409606933593, 0.12123033905029297, 0.12140953826904297, 0.12123238372802735, 0.12380467224121093, 0.12290969848632813, 0.12300806427001953, 0.12272940826416015, 0.12320665740966796, 0.12299878692626953, 0.12275917053222657, 0.12293119812011719, 0.12306432342529297, 0.12317798614501953, 0.12322406768798828, 0.12217958068847656, 0.1228267822265625, 0.12593353271484375, 0.1234872283935547, 0.12296601867675781, 0.12340940856933594, 0.12347187042236328, 0.12346470642089843, 0.25439436340332033, 0.12301414489746093, 0.1228062744140625, 0.1231431655883789, 0.12312268829345703, 0.12464742279052735, 0.12372991943359375, 0.1234708480834961, 0.12335411071777344, 0.12233523559570313, 0.12217958068847656, 0.12264959716796875, 0.12329881286621094, 0.12281549072265625, 0.12341248321533203, 0.123109375, 0.12312268829345703, 0.12331110382080078, 0.12178943634033203, 0.12153343963623046, 0.12128153228759765, 0.12127027130126954, 0.12139008331298828, 0.12159487915039062, 0.12141875457763672, 0.12109516906738281, 0.12095590209960938, 0.12107469177246094, 0.12224205017089844, 0.1224263687133789, 0.12177005004882813, 0.12150163269042968, 0.12146585845947265, 0.12140953826904297, 0.12113005065917969, 0.12179347229003906, 0.12241817474365234, 0.12146585845947265, 0.1236316146850586, 0.12164096069335938, 0.12152934265136718, 0.12139622497558594, 0.12146688079833984, 0.12121600341796875, 0.12141567993164062, 0.12133478546142579, 0.12168498992919922, 0.1214167709350586, 0.12150675201416015, 0.12333977508544922, 0.12334899139404297, 0.12139826965332032, 0.12266291046142579, 0.12162457275390624, 0.1213675537109375, 0.12148838043212891, 0.123219970703125, 0.12320358276367188, 0.12155494689941407, 0.12110848236083985, 0.12138294219970704, 0.12153238677978516, 0.12358963012695312, 0.25359359741210935, 0.12145458984375, 0.12112588500976562, 0.12119039916992187, 0.12124671936035156, 0.12149964904785156, 0.12119961547851563, 0.12119245147705078, 0.12146688079833984, 0.12120269012451172, 0.12097740936279297, 0.1208616943359375, 0.12124671936035156, 0.12146176147460938, 0.12102349090576171, 0.1214505615234375, 0.12145657348632813, 0.12126924896240235, 0.12115869140625, 0.12115657806396485, 0.12137574768066406, 0.12156928253173828, 0.12129894256591797, 0.1242234878540039, 0.12195740509033202, 0.12128765106201173, 0.12127436828613282, 0.12119142150878906, 0.12085968017578125, 0.12119548797607421, 0.12113203430175781, 0.12153139495849609, 0.12129587554931641, 0.12157234954833984, 0.12127641296386718, 0.12110438537597656, 0.12128870391845703, 0.12145970916748047, 0.12127846527099609, 0.12160921478271484, 0.12154879760742188, 0.1213306884765625, 0.12123238372802735, 0.12116070556640625, 0.12110643005371094, 0.12110031890869141, 0.12115042877197266, 0.12125593566894531, 0.12130406188964844, 0.12108185577392579, 0.12092108917236329, 0.12128562927246093, 0.12140748596191406, 0.12148838043212891, 0.12150784301757812, 0.12131737518310547, 0.12162969970703125, 0.12153958129882812, 0.122281982421875, 0.12196249389648438, 0.12154879760742188, 0.1211883544921875, 0.12114022064208985, 0.25422848510742185, 0.12144435119628906, 0.12121600341796875, 0.12331622314453125, 0.12173926544189453, 0.12114329528808594, 0.1231800308227539, 0.1233438720703125, 0.1232721939086914, 0.12304691314697265, 0.12367155456542969, 0.12309913635253907, 0.12304998779296875, 0.12310630035400391, 0.12295680236816406, 0.12294348907470704, 0.12386918640136718, 0.12152320098876954, 0.12160205078125, 0.121059326171875, 0.12094258880615234, 0.12096717071533203, 0.12145868682861329, 0.12323331451416016, 0.12289020538330078, 0.12314832305908203, 0.1236971206665039, 0.12315955352783203, 0.12311039733886718, 0.12137471771240234, 0.12235366058349609, 0.1222451171875, 0.1222973403930664, 0.12167884826660157, 0.12279296112060546, 0.12148735809326172, 0.12146995544433593, 0.12277760314941406, 0.12156723022460937, 0.1216358413696289, 0.12169728088378906, 0.12312166595458984, 0.12145664215087891, 0.12275507354736329, 0.1232353286743164, 0.12311347198486328, 0.123072509765625, 0.1227171859741211, 0.12111154937744141, 0.12276019287109376, 0.12255232238769531, 0.12235059356689452, 0.12264141082763672, 0.1211514892578125, 0.12123545837402344, 0.12166246032714843, 0.12119347381591797, 0.12130201721191407, 0.12149964904785156, 0.12166246032714843, 0.12141875457763672, 0.12106034851074218, 0.12132147216796875, 0.25745306396484374, 0.12312064361572266, 0.12307865905761718, 0.12312166595458984, 0.12121913909912109, 0.12116063690185547, 0.1212774429321289, 0.12236185455322265, 0.1215498275756836, 0.12132454681396485, 0.12093132781982421, 0.12260454559326171, 0.12201676940917969, 0.12116480255126953, 0.12119449615478516, 0.121206787109375, 0.12122112274169922, 0.12207103729248046, 0.12143001556396485, 0.12134400177001953, 0.12148531341552735, 0.12378009796142578, 0.12321279907226562, 0.12303257751464844, 0.12391117095947266, 0.12308889770507812, 0.12307762908935548, 0.12230758666992188, 0.12212636566162109, 0.12145769500732422, 0.12122207641601562, 0.12131123352050781, 0.12108799743652343, 0.1211156463623047, 0.12251033782958984, 0.12306432342529297, 0.12318310546875, 0.12159795379638672, 0.12140850830078125, 0.12139520263671875, 0.12131839752197265, 0.12117196655273438, 0.12120371246337891, 0.12156723022460937, 0.12148941040039063, 0.12145664215087891, 0.12146892547607421, 0.1215447006225586, 0.12250316619873047, 0.12295782470703125, 0.12217036437988281, 0.12270387268066406, 0.12305919647216797, 0.12331520080566406, 0.12305203247070312, 0.12250521850585938, 0.12303667449951172, 0.12276838684082031, 0.12293427276611328, 0.12293427276611328, 0.12159283447265624, 0.12137062072753907, 0.12141670227050781, 0.2548654022216797, 0.1212416000366211, 0.12151193237304687, 0.12308480072021484, 0.12140338897705077, 0.12150377655029297, 0.12124156951904297, 0.121275390625, 0.12150374603271484, 0.12148838043212891, 0.12146482849121094, 0.12227788543701172, 0.12289740753173828, 0.12292819213867187, 0.12136339569091797, 0.1211822052001953, 0.12195027160644531, 0.12174432373046876, 0.12153241729736328, 0.12099174499511718, 0.12128463745117188, 0.12138390350341798, 0.121385986328125, 0.12258918762207031, 0.1230161895751953, 0.12225638580322265, 0.12275917053222657, 0.12151602935791016, 0.12140850830078125, 0.12134809875488281, 0.12176076507568359, 0.1217791976928711, 0.12272946929931641, 0.12169420623779297, 0.12219187164306641, 0.12145561981201172, 0.12164096069335938, 0.12145254516601563, 0.12175360107421875, 0.12436787414550782, 0.1233602523803711, 0.12332339477539063, 0.1232701416015625, 0.12218675231933594, 0.1233039321899414, 0.12280217742919922, 0.12283596801757812, 0.1221048355102539, 0.12304793548583984, 0.1227509765625, 0.12293023681640625, 0.12202182769775391, 0.12274483489990234, 0.1233070068359375, 0.12290560150146485, 0.12302130889892578, 0.1229496307373047, 0.12324864196777344, 0.12319129943847656, 0.12199935913085938, 0.12175363159179688, 0.12325577545166015, 0.12313600158691407]",tokens/s,8.06546255519598,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa508-06e716dc7bd8955e0cad2f3c;2b0f0b48-bf56-40d7-b02f-9d9f146c3922) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1938.939904,5480.382464,0.0,4833.93536,4503.41376,s,10,5.748573669433593,0.5748573669433594,0.0017742246285147622,0.5745578002929688,0.5774038818359375,0.5778909057617188,0.5782805249023437,"[0.5783779296875, 0.577295654296875, 0.57393994140625, 0.573126220703125, 0.5733052978515625, 0.5724807739257812, 0.5741940307617187, 0.5749215698242187, 0.5756366577148437, 0.5752955932617188]",tokens/s,445.32785821499897,kWh,6.7656206220020495e-06,3.7063076173277895e-06,3.206590528233145e-05,4.2537833521661295e-05,tokens/kWh,6018172.0319545325,MB,1940.045824,5480.382464,0.0,4833.93536,4688.700416,s,10,334.972359375,33.49723593750001,0.0033039843001841955,33.498505859375,33.500188281250004,33.500191796875,33.500194609375,"[33.5001953125, 33.49962890625, 33.4886484375, 33.4951328125, 33.496859375, 33.49850390625, 33.49586328125, 33.5001875, 33.49883203125, 33.4985078125]",tokens/s,1.8807521945257513,kWh,0.0003954924626188515,0.00021676498843461862,0.0018363540061190774,0.0024486114571725475,tokens/kWh,25728.867605948046,,s,629,339.6100241088865,0.5399205470729518,0.06828609533356728,0.531684326171875,0.532189599609375,0.5323219116210938,1.1051400146484376,"[0.5314457397460938, 0.5318953247070313, 0.5313341674804688, 0.5320222778320313, 0.53230078125, 0.5322966918945312, 0.532052978515625, 0.5319219360351563, 0.5317222290039062, 0.5324605712890625, 0.5316915283203125, 0.5319751586914062, 0.5320540161132813, 0.532116455078125, 0.5318584594726562, 0.5316935424804687, 0.5311477661132813, 0.5319874267578125, 0.5314037475585938, 0.5320007934570312, 0.5313106079101563, 0.5315860595703125, 0.5312788696289062, 0.5318707275390625, 0.5317847290039063, 0.5322864379882812, 0.5312890625, 0.5320038452148438, 0.5311918334960938, 0.5317908325195313, 0.53142529296875, 0.5319515991210938, 0.5317119750976562, 0.5320806274414063, 0.5313812255859375, 0.53172119140625, 0.5311549682617187, 0.532105224609375, 0.5318379516601562, 0.5316167602539063, 0.5311580200195313, 0.5316771850585937, 0.5315532836914062, 0.532158447265625, 0.531968017578125, 0.5320550537109375, 0.5314191284179688, 0.5322158203125, 0.5313853149414063, 0.5317304077148437, 0.5311661987304688, 0.5319700317382813, 0.5315399780273438, 0.5316935424804687, 0.531431396484375, 0.5315706787109375, 0.53121435546875, 0.5316372680664062, 0.5313402709960937, 0.531820556640625, 0.5316812744140625, 0.532168701171875, 1.109749755859375, 0.531346435546875, 0.5321318359375, 0.5310955810546875, 0.5315061645507813, 0.531493896484375, 0.5316331787109375, 0.5316638793945313, 0.5315604248046875, 0.531146728515625, 0.5316085815429688, 0.5309306640625, 0.5317273559570312, 0.5312214965820312, 0.5316608276367187, 0.5313013916015625, 0.5319352416992188, 0.5315829467773437, 0.5317447509765625, 0.5317273559570312, 0.53187890625, 0.5315963134765626, 0.53159423828125, 0.5313740844726562, 0.532463623046875, 0.5318870849609375, 0.5320325317382812, 0.5313720092773437, 0.531726318359375, 0.5309389038085938, 0.5318615112304688, 0.5310996704101563, 0.5315245971679687, 0.5312379150390625, 0.5315072021484375, 0.5316474609375, 0.5321410522460938, 0.5322495727539063, 0.5316792602539062, 0.5317243041992188, 0.5318717651367187, 0.531431396484375, 0.5317796020507812, 0.5316239624023438, 0.5317867431640625, 0.5317509155273438, 0.533918701171875, 0.5316608276367187, 0.5317805786132812, 0.5316536254882812, 0.5319649047851562, 0.531599365234375, 0.531768310546875, 0.5318400268554687, 0.5323099975585938, 0.5319802856445313, 0.5322926025390625, 0.5316300659179688, 0.532094970703125, 0.5318133544921875, 0.531968017578125, 0.5319454956054688, 0.5321953125, 1.106044921875, 0.5311734008789063, 0.5317723999023437, 0.5311109008789062, 0.5316505737304688, 0.5316710205078125, 0.5314662475585937, 0.531103759765625, 0.5316044921875, 0.531599365234375, 0.5319229736328125, 0.5315625, 0.5319280395507813, 0.5312767944335938, 0.5315369262695312, 0.5314559936523438, 0.531651611328125, 0.5313187866210938, 0.5317222290039062, 0.5312973022460937, 0.5317273559570312, 0.5311549682617187, 0.531431396484375, 0.531472412109375, 0.532068359375, 0.5310628051757813, 0.5315819702148438, 0.5311876831054687, 0.5319567260742187, 0.5312583618164063, 0.5321011352539062, 0.5313074951171874, 0.5321062622070313, 0.531146728515625, 0.5314539794921875, 0.5311528930664062, 0.5316218872070313, 0.531188720703125, 0.531778564453125, 0.53129931640625, 0.53172021484375, 0.53125634765625, 0.5316557006835938, 0.5312962646484375, 0.5317734375, 0.531356689453125, 0.5318450927734375, 0.5314447631835938, 0.5319024658203125, 0.531715087890625, 0.5318553466796875, 0.5314600830078124, 0.5316638793945313, 0.5312317504882812, 0.5315184936523437, 0.5314949340820313, 0.5321676635742187, 0.5318441162109375, 0.5320765380859375, 0.5313792114257813, 0.5318953247070313, 0.5314150390625, 0.53174169921875, 1.1051417236328125, 0.531535888671875, 0.5323489379882812, 0.5317929077148438, 0.53146728515625, 0.5314027709960938, 0.5317069091796875, 0.5312337646484375, 0.5317662963867188, 0.531146728515625, 0.5315819702148438, 0.531715087890625, 0.5320007934570312, 0.531135498046875, 0.5316823120117188, 0.5312440185546875, 0.531652587890625, 0.5312808837890625, 0.53197314453125, 0.53145703125, 0.5322874755859375, 0.5318850708007813, 0.5318379516601562, 0.5312071533203125, 0.531473388671875, 0.531135498046875, 0.531435546875, 0.5312982788085937, 0.5317243041992188, 0.5311580200195313, 0.5318113403320313, 0.5314652099609375, 0.5317734375, 0.53153076171875, 0.5319024658203125, 0.5314232177734375, 0.5322066040039063, 0.5310986328125, 0.5315245971679687, 0.53191064453125, 0.5318922119140626, 0.53157275390625, 0.5320130615234375, 0.5313024291992188, 0.5326827392578125, 0.5315870971679687, 0.5319966430664063, 0.5312655639648437, 0.5317652587890624, 0.5317560424804687, 0.532094970703125, 0.5314744262695312, 0.5314744262695312, 0.5314232177734375, 0.5318430786132813, 0.531373046875, 0.5317376098632812, 0.5314959106445313, 0.53226904296875, 0.5319168090820312, 0.5326305541992188, 0.5315430297851562, 0.5318215942382812, 1.1051356201171876, 0.5312973022460937, 0.5315819702148438, 0.5314774780273438, 0.531794921875, 0.5313423461914063, 0.5317007446289063, 0.5312061157226563, 0.532337646484375, 0.5311897583007813, 0.5318450927734375, 0.5311968994140625, 0.5319700317382813, 0.5320519409179687, 0.5319915771484375, 0.531267578125, 0.5318911743164062, 0.531177490234375, 0.5319485473632812, 0.5313751220703125, 0.5316874389648437, 0.5312163696289063, 0.5318246459960938, 0.5318041381835937, 0.5321021728515625, 0.5312501831054688, 0.5314898071289063, 0.5313935546875, 0.5318809814453125, 0.5312296752929687, 0.5316013793945312, 0.5316351928710937, 0.5323223266601562, 0.5317406616210938, 0.5318369140625, 0.5311907958984375, 0.5315348510742187, 0.5314457397460938, 0.531673095703125, 0.53165771484375, 0.5315717163085938, 0.531652587890625, 0.5317069091796875, 0.5313167114257813, 0.53161474609375, 0.5311897583007813, 0.5315440673828125, 0.5312553100585937, 0.5322280883789062, 0.5312880859375, 0.5325035400390625, 0.5322506103515625, 0.5319270629882813, 0.5315594482421875, 0.53212158203125, 0.5314180908203125, 0.5320048828125, 0.5329141845703125, 0.5321277465820312, 0.5319035034179688, 0.5321103515625, 0.5317222290039062, 0.5319536743164063, 1.104712646484375, 0.531314697265625, 0.531794921875, 0.531409912109375, 0.5321390380859375, 0.5317406616210938, 0.5324042358398438, 0.5315665893554687, 0.531862548828125, 0.5314232177734375, 0.5315747680664062, 0.5311682739257813, 0.5318154296875, 0.53142529296875, 0.5318092651367188, 0.5314898071289063, 0.5318870849609375, 0.531378173828125, 0.53172119140625, 0.5311692504882812, 0.531430419921875, 0.5313966064453125, 0.531857421875, 0.5313710327148438, 0.5323397216796875, 0.5316075439453125, 0.5323120727539062, 0.5317294311523437, 0.5321543579101562, 0.5312010498046875, 0.5318461303710937, 0.5319188232421875, 0.532210693359375, 0.5314221801757812, 0.5321881713867187, 0.5314324340820312, 0.5315921630859375, 0.5312501831054688, 0.5330370483398438, 0.531240966796875, 0.5315143432617188, 0.5314150390625, 0.5320355834960937, 0.531736572265625, 0.532005859375, 0.5315584106445312, 0.5322998046875, 0.5318276977539063, 0.5317406616210938, 0.53136279296875, 0.5315768432617187, 0.531726318359375, 0.5317335205078125, 0.5318584594726562, 0.5316566772460938, 0.531314697265625, 0.5316884765625, 0.5317488403320313, 0.5318338623046875, 0.5312901000976562, 0.53187890625, 0.5315584106445312, 0.5323212890625, 1.1061114501953124, 0.5317314453125, 0.5319290771484375, 0.531072998046875, 0.5316290283203124, 0.5314488525390625, 0.5314508666992187, 0.5313126220703125, 0.5319659423828125, 0.5314447631835938, 0.5315645141601563, 0.5322833862304688, 0.5317775268554688, 0.5313228759765625, 0.5322045288085937, 0.53157373046875, 0.5320222778320313, 0.53178369140625, 0.5322998046875, 0.5316710205078125, 0.5317069091796875, 0.531219482421875, 0.5317181396484375, 0.5315245971679687, 0.531736572265625, 0.5315369262695312, 0.53231103515625, 0.5319618530273438, 0.5321339111328125, 0.5312696533203125, 0.5317406616210938, 0.53157275390625, 0.5317908325195313, 0.5312593994140625, 0.5319639282226563, 0.5313003540039063, 0.5317816162109374, 0.5313792114257813, 0.531936279296875, 0.5312071533203125, 0.5314293823242188, 0.5310996704101563, 0.531589111328125, 0.5313955688476563, 0.53193115234375, 0.5313556518554687, 0.5319588012695312, 0.531583984375, 0.532021240234375, 0.5315205078125, 0.5315798950195313, 0.5314150390625, 0.5316608276367187, 0.53139453125, 0.5319014282226563, 0.5313863525390625, 0.5320611572265626, 0.5318748168945312, 0.5321339111328125, 0.5318461303710937, 0.5317355346679687, 0.5316792602539062, 0.5318819580078125, 1.1075174560546874, 0.531072021484375, 0.5316382446289063, 0.5312890625, 0.53142529296875, 0.531535888671875, 0.5324411010742187, 0.5314232177734375, 0.5321287841796875, 0.5315972900390625, 0.5322034912109375, 0.5314857177734374, 0.5315389404296875, 0.531357666015625, 0.5317713623046875, 0.5311488037109375, 0.5317294311523437, 0.5311682739257813, 0.531652587890625, 0.5319782104492188, 0.5317386474609375, 0.5313341674804688, 0.5316669311523438, 0.5313074951171874, 0.5316454467773437, 0.5311918334960938, 0.5317662963867188, 0.5315706787109375, 0.5321380004882813, 0.5316188354492187, 0.5322280883789062, 0.5312737426757812, 0.5317304077148437, 0.5311057739257813, 0.5317775268554688, 0.5316751098632813, 0.5317723999023437, 0.5313269653320313, 0.532220947265625, 0.531409912109375, 0.531726318359375, 0.5313054809570312, 0.5317427368164063, 0.5314866943359375, 0.5319229736328125, 0.5314406127929687, 0.532294677734375, 0.5323673706054688, 0.5322874755859375, 0.5322711181640625, 0.5320068969726562, 0.5316044921875, 0.5322977294921875, 0.531583984375, 0.5319116821289063, 0.5315798950195313, 0.532516845703125, 0.5323622436523437, 0.5323622436523437, 0.5315491943359375, 0.5319935913085938, 0.5320017700195312, 0.531962890625, 1.1080765380859374, 0.5319547119140625, 0.5317703857421875, 0.53150927734375, 0.5314652099609375, 0.5311805419921874, 0.53163623046875, 0.5313218383789062, 0.531493896484375, 0.531631103515625, 0.5318482055664062, 0.5316925659179688, 0.5316792602539062, 0.5311713256835937, 0.53180517578125, 0.5314037475585938, 0.531620849609375, 0.5318748168945312, 0.5327401123046875, 0.5320171508789062, 0.5328353271484375, 0.5319864501953125, 0.5319833374023437, 0.5312788696289062, 0.53235302734375, 0.5315451049804687, 0.5321615600585937, 0.5315635375976563, 0.5320089721679687, 0.5313218383789062, 0.5321666259765625, 0.5312501831054688, 0.5315911865234375, 0.5312686157226563, 0.5321390380859375, 0.531409912109375, 0.531768310546875, 0.5311641845703124, 0.5316935424804687, 0.5315829467773437, 0.5322066040039063, 0.5317447509765625, 0.5320120239257813, 0.5312399291992187, 0.5316751098632813, 0.5313863525390625, 0.5317621459960937, 0.531684326171875, 0.5316658935546875, 0.5314549560546875, 0.5319035034179688, 0.531262451171875, 0.5315963134765626, 0.5311375122070312, 0.5317283935546875, 0.5315194702148438, 0.5318932495117188, 0.532084716796875, 0.5324154663085937, 0.5319188232421875, 0.5318717651367187, 0.5313474731445312, 0.5320181884765625, 1.10763720703125, 0.5311539306640625, 0.5317109985351562, 0.5316484985351563, 0.5316484985351563, 0.5312767944335938, 0.5317509155273438, 0.5310279541015624, 0.5314611206054688, 0.5312849731445313, 0.53146728515625, 0.5310648193359375, 0.5322076416015625, 0.53161572265625, 0.5324257202148438, 0.5314857177734374, 0.5316536254882812, 0.5312450561523437, 0.5317069091796875, 0.5315901489257813, 0.5321072387695313, 0.5315020751953125, 0.531820556640625, 0.5321041870117188, 0.5318809814453125, 0.5312317504882812, 0.532041748046875, 0.5315451049804687, 0.5318154296875, 0.5316792602539062, 0.5321062622070313, 0.5322495727539063, 0.5319905395507812, 0.531694580078125, 0.5317621459960937, 0.5315451049804687, 0.5317488403320313, 0.5312747802734376, 0.5321328735351563, 0.531746826171875, 0.5318154296875, 0.5317119750976562, 0.531684326171875, 0.5313290405273438, 0.5314979858398438, 0.5311682739257813, 0.5318164672851563, 0.5316884765625, 0.53195263671875, 0.5318246459960938, 0.532116455078125, 0.5315665893554687, 0.5320376586914063, 0.5314426879882812, 0.5319772338867188, 0.5315123291015625, 0.53215234375, 0.53184716796875, 0.5321748657226563, 0.5318123779296875, 0.5321236572265625, 0.5316618041992187, 0.5318829956054687]",tokens/s,1.8521243642629592,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1888.280576,15194.390528,0.0,14547.943424,13898.252288,s,10,16.972713500976564,1.6972713500976564,0.0011802396473078822,1.6969771728515626,1.6988580200195311,1.699245782470703,1.6995559924316406,"[1.6969825439453126, 1.6969718017578126, 1.6960919189453125, 1.6958106689453125, 1.6964619140625, 1.69635986328125, 1.6974122314453124, 1.6982171630859375, 1.699633544921875, 1.6987718505859375]",tokens/s,150.83033127570937,kWh,2.0026979611979592e-05,1.097496669508473e-05,9.603446571639917e-05,0.0001270364120234635,tokens/kWh,2015170.2643547354,MB,1888.280576,15194.390528,0.0,14547.943424,14315.97312,s,10,986.8612499999999,98.68612499999999,0.014850841233799409,98.68501953125,98.70599609374999,98.707103515625,98.707989453125,"[98.6797109375, 98.687671875, 98.6747890625, 98.6555546875, 98.6919140625, 98.6783203125, 98.6969609375, 98.7082109375, 98.70575, 98.6823671875]",tokens/s,0.6383876152802637,kWh,0.0011649224273860455,0.0006384806384544572,0.00558826860950019,0.007391671675340693,tokens/kWh,8523.105836826315,,s,629,1000.5712542724599,1.5907333136287136,0.2015788363996436,1.5663739013671876,1.5674868164062499,1.5676927978515625,3.262457841796875,"[1.5671644287109374, 1.5661424560546875, 1.56664013671875, 1.5672913818359375, 1.566525390625, 1.5666636962890625, 1.56687158203125, 1.566798828125, 1.565517822265625, 1.56556494140625, 1.56592333984375, 1.56533251953125, 1.5655465087890625, 1.5658004150390625, 1.565998046875, 1.5670538330078125, 1.5664508056640625, 1.56584033203125, 1.5667855224609375, 1.5674542236328124, 1.567055908203125, 1.5672022705078126, 1.5673907470703126, 1.56657666015625, 1.5663001708984374, 1.566665771484375, 1.5659376220703125, 1.5659520263671876, 1.5661055908203125, 1.566899169921875, 1.5660892333984375, 1.566604248046875, 1.5656611328125, 1.5667138671875, 1.565654052734375, 1.566857177734375, 1.5661802978515624, 1.5664854736328124, 1.5667210693359375, 1.5670538330078125, 1.56666162109375, 1.566857177734375, 1.5659581298828125, 1.565955078125, 1.5662294921875, 1.5661065673828125, 1.566191650390625, 1.5659642333984376, 1.5664527587890624, 1.5659898681640625, 1.5656510009765625, 1.5664803466796875, 1.566614501953125, 1.566317626953125, 1.5665059814453124, 1.565919189453125, 1.5657093505859374, 1.5657308349609376, 1.5665244140625, 1.565981689453125, 1.5660799560546874, 1.56632373046875, 3.263318115234375, 1.5650416259765625, 1.5667425537109374, 1.5671868896484376, 1.5674766845703125, 1.566899169921875, 1.5670477294921874, 1.567247314453125, 1.5669462890625, 1.566482421875, 1.56721044921875, 1.567352783203125, 1.56718603515625, 1.5668541259765625, 1.565632568359375, 1.5655035400390624, 1.565454345703125, 1.566171142578125, 1.5655423583984376, 1.5656632080078126, 1.565739013671875, 1.5655546875, 1.56594384765625, 1.565739990234375, 1.5656090087890624, 1.5654093017578126, 1.5656141357421876, 1.5657379150390625, 1.565550537109375, 1.5656468505859376, 1.566182373046875, 1.56573388671875, 1.5660994873046874, 1.566614501953125, 1.5671705322265626, 1.5668858642578125, 1.5673907470703126, 1.5655372314453124, 1.5656827392578125, 1.567224853515625, 1.5674173583984374, 1.567141845703125, 1.567562744140625, 1.56704150390625, 1.566593994140625, 1.5660902099609375, 1.56824267578125, 1.56689404296875, 1.5673763427734375, 1.5663564453125, 1.566992431640625, 1.567836181640625, 1.5679334716796876, 1.5667803955078126, 1.5659478759765626, 1.566908447265625, 1.566277587890625, 1.5658690185546875, 1.5664803466796875, 1.5664271240234375, 1.5662049560546876, 1.5662171630859374, 1.5660595703125, 3.26247021484375, 1.5656990966796875, 1.5657799072265626, 1.566017578125, 1.5667598876953126, 1.565744140625, 1.566393310546875, 1.56580859375, 1.56586083984375, 1.5654072265625, 1.56617529296875, 1.5659202880859375, 1.5659725341796875, 1.5665029296875, 1.5664486083984375, 1.5655260009765626, 1.5660780029296875, 1.5663062744140626, 1.5667117919921876, 1.566983154296875, 1.5657728271484375, 1.565895751953125, 1.566373779296875, 1.5677890625, 1.566457763671875, 1.5663277587890625, 1.566962646484375, 1.566697509765625, 1.5665489501953125, 1.566899169921875, 1.56623046875, 1.566688232421875, 1.5666124267578125, 1.5657093505859374, 1.566341064453125, 1.5660462646484374, 1.5663482666015625, 1.566123046875, 1.5662838134765624, 1.5664619140625, 1.56621826171875, 1.566017578125, 1.5659622802734374, 1.565760498046875, 1.5661669921875, 1.566396484375, 1.5663912353515625, 1.5682620849609374, 1.5664476318359375, 1.5661475830078124, 1.56608203125, 1.56626123046875, 1.5659765625, 1.5659632568359374, 1.565811767578125, 1.56657763671875, 1.5658956298828124, 1.5657799072265626, 1.5660042724609375, 1.566275634765625, 1.5663533935546874, 1.56682958984375, 1.566255126953125, 3.26236865234375, 1.5657420654296874, 1.5653990478515625, 1.5662427978515625, 1.565697021484375, 1.5651962890625, 1.56592333984375, 1.5652525634765626, 1.5654676513671875, 1.5651666259765624, 1.56598583984375, 1.5653150634765625, 1.5663011474609374, 1.5655894775390624, 1.5655516357421875, 1.565338623046875, 1.5655577392578126, 1.5656806640625, 1.56554345703125, 1.56598779296875, 1.5655843505859375, 1.5654718017578124, 1.5656263427734376, 1.5660400390625, 1.5658824462890626, 1.5657850341796875, 1.5657625732421876, 1.5657196044921875, 1.56554443359375, 1.566350341796875, 1.5656375732421874, 1.5655997314453125, 1.566192626953125, 1.565496337890625, 1.5660390625, 1.566271484375, 1.5658936767578124, 1.565685791015625, 1.5660216064453125, 1.5656141357421876, 1.565828125, 1.566434326171875, 1.5664906005859376, 1.5654307861328125, 1.56860107421875, 1.56617529296875, 1.566096435546875, 1.5658311767578126, 1.5665806884765625, 1.5662591552734375, 1.5666104736328126, 1.56617724609375, 1.566076904296875, 1.5665244140625, 1.566587890625, 1.56634521484375, 1.5658741455078125, 1.5661434326171875, 1.566371826171875, 1.5663533935546874, 1.566328857421875, 1.5666011962890625, 1.5661240234375, 3.263153076171875, 1.56556494140625, 1.565706298828125, 1.5666165771484375, 1.566034912109375, 1.566002197265625, 1.5660831298828124, 1.565319091796875, 1.5665797119140625, 1.5655751953125, 1.5664332275390624, 1.5658896484375, 1.5659744873046875, 1.5658916015625, 1.566130126953125, 1.5668695068359375, 1.5666226806640624, 1.5660933837890625, 1.566509033203125, 1.5661363525390626, 1.5657738037109374, 1.5658199462890625, 1.5663206787109376, 1.5657840576171875, 1.566213134765625, 1.5667742919921874, 1.5661240234375, 1.5659315185546876, 1.565498291015625, 1.5660472412109374, 1.5656407470703124, 1.56573486328125, 1.566254150390625, 1.56568359375, 1.5663575439453126, 1.566123046875, 1.5667579345703124, 1.5663963623046875, 1.5667947998046876, 1.5661865234375, 1.5662919921875, 1.5661158447265624, 1.5670897216796875, 1.56710302734375, 1.567573974609375, 1.5670128173828124, 1.5670006103515626, 1.5675535888671874, 1.568489501953125, 1.5677030029296875, 1.5673804931640625, 1.5675074462890626, 1.5675924072265626, 1.56819970703125, 1.56786181640625, 1.5679241943359374, 1.5680113525390624, 1.5678955078125, 1.56760888671875, 1.5664681396484375, 1.5662049560546876, 1.5662213134765626, 1.5661905517578125, 3.262426025390625, 1.5655628662109375, 1.56493212890625, 1.5661669921875, 1.5657431640625, 1.5655537109375, 1.5660482177734374, 1.5657359619140625, 1.5666063232421874, 1.5661004638671876, 1.5656785888671876, 1.5670230712890625, 1.5668284912109376, 1.5666944580078126, 1.566755859375, 1.566581787109375, 1.5667763671875, 1.5658946533203124, 1.5655372314453124, 1.5659530029296875, 1.565708251953125, 1.5669320068359376, 1.56744091796875, 1.566341064453125, 1.5660360107421876, 1.5659837646484376, 1.5658629150390626, 1.5660892333984375, 1.565887451171875, 1.56593359375, 1.5656898193359374, 1.5661455078125, 1.566086181640625, 1.5658076171875, 1.5659263916015624, 1.5658218994140625, 1.56609033203125, 1.56613525390625, 1.5673641357421875, 1.5665848388671875, 1.5666175537109375, 1.5664681396484375, 1.5664619140625, 1.566224365234375, 1.5664588623046876, 1.5661363525390626, 1.5667579345703124, 1.5667978515625, 1.5667178955078125, 1.5658916015625, 1.5658680419921875, 1.5657728271484375, 1.5670528564453126, 1.567161376953125, 1.5674490966796875, 1.567394775390625, 1.567477783203125, 1.56653369140625, 1.5658792724609376, 1.56665966796875, 1.5675699462890624, 1.56628076171875, 1.5665899658203124, 3.26359765625, 1.5656744384765624, 1.565328369140625, 1.566392333984375, 1.565917236328125, 1.566066650390625, 1.567247314453125, 1.56674560546875, 1.567057861328125, 1.566623779296875, 1.5667916259765624, 1.5669647216796876, 1.56680810546875, 1.5664117431640625, 1.566813232421875, 1.5673231201171876, 1.566376953125, 1.5661844482421876, 1.567056884765625, 1.567025146484375, 1.566773193359375, 1.5668214111328125, 1.5673548583984376, 1.568668701171875, 1.567552490234375, 1.566614501953125, 1.566899169921875, 1.567447021484375, 1.5675135498046875, 1.5669381103515625, 1.5667794189453126, 1.567139892578125, 1.5667916259765624, 1.566656494140625, 1.566287841796875, 1.5665806884765625, 1.5664046630859374, 1.5664854736328124, 1.56649169921875, 1.56577587890625, 1.5662652587890624, 1.5666688232421875, 1.5665521240234375, 1.56607080078125, 1.5668214111328125, 1.5663809814453125, 1.5665684814453125, 1.5666226806640624, 1.566630859375, 1.5663360595703124, 1.566734375, 1.5659100341796874, 1.566339111328125, 1.5664271240234375, 1.5667056884765624, 1.5661854248046876, 1.5660308837890624, 1.56668310546875, 1.5665264892578126, 1.5664742431640626, 1.5667752685546874, 1.5658568115234375, 1.566477294921875, 3.26474853515625, 1.5660062255859375, 1.5659560546875, 1.5659263916015624, 1.5657471923828126, 1.5661905517578125, 1.5662623291015625, 1.5657779541015624, 1.5659345703125, 1.5662991943359375, 1.5659427490234374, 1.565487060546875, 1.566613525390625, 1.56559765625, 1.56596533203125, 1.566318603515625, 1.566983154296875, 1.5657728271484375, 1.5661322021484374, 1.566572509765625, 1.5666114501953126, 1.5659100341796874, 1.56704150390625, 1.56611376953125, 1.5677716064453124, 1.5673876953125, 1.5676068115234374, 1.5671654052734374, 1.5670262451171875, 1.5657738037109374, 1.5667547607421874, 1.5669217529296875, 1.5676558837890624, 1.56761083984375, 1.567972412109375, 1.567614990234375, 1.5675115966796875, 1.5676702880859374, 1.56752587890625, 1.567363037109375, 1.567826904296875, 1.567635498046875, 1.5676488037109375, 1.5662724609375, 1.5680296630859376, 1.567309814453125, 1.567677490234375, 1.567458251953125, 1.5668489990234375, 1.565865966796875, 1.566993408203125, 1.5673487548828124, 1.567581298828125, 1.566005126953125, 1.5679825439453126, 1.567009765625, 1.566161865234375, 1.5676138916015625, 1.56758935546875, 1.5667568359375, 1.5663258056640625, 1.566224365234375, 1.5670302734375, 3.26736279296875, 1.5667579345703124, 1.5664261474609376, 1.5665531005859374, 1.5668193359375, 1.566329833984375, 1.5663739013671876, 1.566214111328125, 1.5671173095703126, 1.565612060546875, 1.5653099365234375, 1.565854736328125, 1.5658486328125, 1.5660902099609375, 1.566256103515625, 1.5667547607421874, 1.5674736328125, 1.56900244140625, 1.5678065185546874, 1.567677490234375, 1.5673057861328126, 1.5676702880859374, 1.5678648681640626, 1.566625732421875, 1.566234619140625, 1.5665244140625, 1.565811767578125, 1.566467041015625, 1.5663380126953126, 1.5663995361328125, 1.56670458984375, 1.566562255859375, 1.56580859375, 1.5658076171875, 1.567499267578125, 1.566665771484375, 1.5675023193359374, 1.5676190185546874, 1.5666585693359374, 1.5654676513671875, 1.5674849853515624, 1.5671900634765625, 1.567220703125, 1.5676558837890624, 1.5666514892578125, 1.5663369140625, 1.5666441650390626, 1.566841796875, 1.566866455078125, 1.56685107421875, 1.567130615234375, 1.5664896240234376, 1.5665152587890625, 1.5667691650390625, 1.56735693359375, 1.5668797607421876, 1.5669544677734375, 1.565843505859375, 1.5657564697265625, 1.5662509765625, 1.5679661865234376, 1.567494140625, 1.5670753173828125, 3.26488671875, 1.56588232421875, 1.5666165771484375, 1.56659912109375, 1.5660400390625, 1.565919189453125, 1.5653775634765625, 1.5651685791015626, 1.5665255126953126, 1.5655526123046875, 1.56554443359375, 1.565875244140625, 1.565812744140625, 1.56569091796875, 1.56594384765625, 1.566340087890625, 1.565750244140625, 1.5661793212890625, 1.5670374755859375, 1.5664691162109374, 1.5666708984375, 1.5669124755859376, 1.5670743408203125, 1.565885498046875, 1.567392822265625, 1.56645068359375, 1.567561767578125, 1.5667598876953126, 1.566843994140625, 1.566246826171875, 1.56635546875, 1.5664219970703126, 1.565961181640625, 1.5663524169921874, 1.5675914306640626, 1.56609326171875, 1.5661405029296875, 1.5667711181640624, 1.566587890625, 1.5662694091796876, 1.566982177734375, 1.566482421875, 1.566496826171875, 1.5665531005859374, 1.5663370361328126, 1.5661158447265624, 1.566561279296875, 1.566202880859375, 1.566286865234375, 1.5659674072265626, 1.5661629638671875, 1.5660155029296876, 1.566634033203125, 1.5668284912109376, 1.5664476318359375, 1.5665438232421875, 1.5667864990234375, 1.5668736572265625, 1.56661865234375, 1.5671204833984376, 1.566720947265625, 1.5667332763671875, 1.566159912109375]",tokens/s,0.6286408862079101,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3203.223552,5128.060928,0.0,4481.613824,4276.256768,s,10,3.1983958740234373,0.31983958740234375,0.001538131138588488,0.31976911926269536,0.32137343139648433,0.3217927795410156,0.3221282580566406,"[0.31961990356445313, 0.3212218933105469, 0.3187176513671875, 0.3199183349609375, 0.3183829040527344, 0.3169226989746094, 0.31905938720703125, 0.32106072998046875, 0.32128024291992185, 0.32221212768554686]",tokens/s,800.4012326277909,kWh,3.750147018581629e-06,2.054515901244258e-06,1.6957018773937002e-05,2.276168169376289e-05,tokens/kWh,11246972.14574214,MB,3203.223552,5128.060928,0.0,4481.613824,4465.662976,s,10,186.97800781249998,18.69780078125,0.017296888320766722,18.693263671875002,18.720369140625,18.729755859375,18.737265234375002,"[18.718283203125, 18.689103515625, 18.695333984375, 18.678984375, 18.739142578125, 18.694673828125, 18.691853515625, 18.6855234375, 18.701947265625, 18.683162109375]",tokens/s,3.369380214125283,kWh,0.00022053018352016812,0.00012086672241809653,0.0009756264419588673,0.001317023347897132,tokens/kWh,47835.14286257035,,s,629,189.55983416748046,0.3013669859578386,0.038019298519587436,0.29649102783203124,0.2979925964355469,0.29949728393554687,0.6154752392578126,"[0.29722113037109377, 0.2962104187011719, 0.2994401550292969, 0.3005357666015625, 0.30041702270507814, 0.2999255065917969, 0.30030029296875, 0.3005870056152344, 0.30005453491210937, 0.3003166809082031, 0.2982318115234375, 0.29764404296875, 0.29678591918945313, 0.2977320861816406, 0.2972508239746094, 0.2988697509765625, 0.29594931030273436, 0.29598825073242185, 0.29595953369140626, 0.2961408081054687, 0.29663333129882813, 0.2959800415039063, 0.2984028015136719, 0.29736856079101565, 0.29788876342773435, 0.2962769775390625, 0.2963660888671875, 0.2973767700195312, 0.2967070617675781, 0.2959728698730469, 0.2965258178710938, 0.29611212158203126, 0.2976409606933594, 0.2969599914550781, 0.2967398376464844, 0.2962391052246094, 0.29641522216796873, 0.2959902648925781, 0.29629336547851565, 0.29671832275390625, 0.29604147338867187, 0.2959872131347656, 0.2960855102539062, 0.2966855773925781, 0.29605990600585935, 0.29604147338867187, 0.29675726318359374, 0.29607012939453126, 0.2964500427246094, 0.29650942993164064, 0.2962022399902344, 0.2960516967773438, 0.29594830322265625, 0.296783935546875, 0.296870849609375, 0.29710232543945314, 0.29665484619140625, 0.29788363647460936, 0.29600152587890627, 0.29640191650390624, 0.29616229248046877, 0.2960711669921875, 0.6185267333984374, 0.29601278686523436, 0.29589199829101565, 0.29706646728515623, 0.29594418334960937, 0.29600460815429686, 0.29592166137695314, 0.2975078430175781, 0.2961551208496094, 0.29602200317382815, 0.2963240966796875, 0.2960445556640625, 0.2958970947265625, 0.2962135009765625, 0.29608447265625, 0.29607730102539065, 0.29639883422851565, 0.29659750366210935, 0.29723443603515626, 0.29676031494140626, 0.2970972290039062, 0.296911865234375, 0.29686373901367186, 0.29711358642578123, 0.29724774169921875, 0.29758157348632813, 0.29762457275390625, 0.2963056640625, 0.29703271484375, 0.29604864501953126, 0.2962616271972656, 0.296153076171875, 0.2962769775390625, 0.2961919860839844, 0.2968739929199219, 0.29633843994140624, 0.2965504150390625, 0.29617355346679686, 0.29583154296875, 0.2962042846679688, 0.2970060729980469, 0.2965278625488281, 0.29644491577148435, 0.29606298828125, 0.29650225830078125, 0.2965186462402344, 0.3018721313476562, 0.29692620849609375, 0.2963681640625, 0.29674798583984374, 0.296806396484375, 0.29622067260742185, 0.2960076904296875, 0.2968924255371094, 0.2960343017578125, 0.2965831604003906, 0.2970480651855469, 0.29702047729492187, 0.296217529296875, 0.29626266479492186, 0.29842022705078125, 0.29761843872070315, 0.29686373901367186, 0.6160445556640625, 0.2960199890136719, 0.2965903015136719, 0.2964684753417969, 0.29644390869140624, 0.2965688171386719, 0.2976983032226562, 0.29753759765625, 0.29599737548828126, 0.29653094482421877, 0.2970838928222656, 0.2961479797363281, 0.29596878051757813, 0.29587353515625, 0.2960445556640625, 0.2966005859375, 0.2975467529296875, 0.2962995300292969, 0.2958847961425781, 0.2957864990234375, 0.2960732116699219, 0.295973876953125, 0.2962944030761719, 0.29851339721679687, 0.295984130859375, 0.29607730102539065, 0.2961203308105469, 0.2960425109863281, 0.29593295288085936, 0.29733270263671874, 0.29703067016601564, 0.29734808349609376, 0.29631283569335937, 0.296816650390625, 0.29681048583984376, 0.296489990234375, 0.29625653076171876, 0.29661590576171876, 0.29632000732421876, 0.2966937561035156, 0.2978652038574219, 0.2965299072265625, 0.2963240966796875, 0.29751806640625, 0.29830859375, 0.297481201171875, 0.29719039916992185, 0.29953536987304685, 0.29737368774414064, 0.29692108154296876, 0.296585205078125, 0.296052734375, 0.29637939453125, 0.29625753784179687, 0.29655654907226564, 0.29799423217773435, 0.2968268737792969, 0.29685043334960937, 0.29692108154296876, 0.29714944458007814, 0.29722930908203127, 0.2967623596191406, 0.297270263671875, 0.6168934326171875, 0.29688626098632814, 0.296279052734375, 0.29670297241210936, 0.2962923583984375, 0.29618795776367185, 0.2966793518066406, 0.2960650329589844, 0.29615924072265626, 0.29594418334960937, 0.2973972473144531, 0.29612338256835935, 0.2971064453125, 0.2982297668457031, 0.29685043334960937, 0.2960855102539062, 0.296089599609375, 0.29661489868164065, 0.2965350341796875, 0.29597695922851563, 0.2961705017089844, 0.29597695922851563, 0.29592276000976564, 0.29643975830078123, 0.29586328125, 0.29596466064453125, 0.2959923095703125, 0.2958981018066406, 0.29643777465820315, 0.2959923095703125, 0.29621759033203127, 0.2963650512695313, 0.29609677124023437, 0.29788980102539064, 0.2977914733886719, 0.29713613891601565, 0.2964951171875, 0.29727642822265626, 0.2966794128417969, 0.2964142150878906, 0.2965821533203125, 0.2960855102539062, 0.2957701416015625, 0.2966097717285156, 0.29938388061523435, 0.29615609741210935, 0.29603640747070314, 0.2962728271484375, 0.29596978759765624, 0.2960455627441406, 0.296300537109375, 0.29595040893554686, 0.29649197387695314, 0.29635379028320313, 0.296342529296875, 0.29667330932617186, 0.2967008972167969, 0.29659442138671877, 0.29601690673828124, 0.29651455688476563, 0.29645925903320314, 0.2973665466308594, 0.29616226196289064, 0.615267333984375, 0.29617767333984374, 0.29771673583984376, 0.29707980346679685, 0.298039306640625, 0.297238525390625, 0.2971115417480469, 0.29701837158203126, 0.29682891845703124, 0.2972641296386719, 0.2971668395996094, 0.29752627563476564, 0.2972252197265625, 0.29701632690429686, 0.29692620849609375, 0.29619302368164063, 0.2964234619140625, 0.2987468566894531, 0.296131591796875, 0.2958981018066406, 0.29623501586914064, 0.29586944580078123, 0.295846923828125, 0.29605377197265625, 0.29625753784179687, 0.29595443725585935, 0.29606195068359376, 0.29590631103515624, 0.29597491455078123, 0.29599948120117187, 0.296310791015625, 0.2964664306640625, 0.29622885131835935, 0.2963630065917969, 0.29594317626953126, 0.2961397705078125, 0.29812017822265624, 0.3001466979980469, 0.3003924560546875, 0.3000289306640625, 0.29983538818359373, 0.29985791015625, 0.29986712646484376, 0.29981594848632814, 0.30056344604492186, 0.29986407470703125, 0.2999111633300781, 0.29782833862304686, 0.2960773315429687, 0.2960137939453125, 0.29763687133789063, 0.2979921875, 0.2981877746582031, 0.2967510986328125, 0.29627288818359376, 0.29777716064453125, 0.2970634155273438, 0.30057470703125, 0.2982738037109375, 0.2991800231933594, 0.30042315673828124, 0.2963568725585938, 0.2961418151855469, 0.6139617309570312, 0.29619406127929687, 0.29618585205078124, 0.29586431884765624, 0.296079345703125, 0.29643060302734375, 0.2977740783691406, 0.2977832946777344, 0.2964633483886719, 0.29604147338867187, 0.2960609130859375, 0.29627084350585936, 0.29602822875976564, 0.2961315307617188, 0.29630160522460935, 0.29599432373046874, 0.2961469421386719, 0.296531982421875, 0.2960977783203125, 0.2960373840332031, 0.2965381164550781, 0.2960506896972656, 0.29609063720703127, 0.2962063293457031, 0.2960169677734375, 0.29643975830078123, 0.2966312866210937, 0.2963077087402344, 0.2960865173339844, 0.2961162109375, 0.2960281677246094, 0.2961766357421875, 0.296352783203125, 0.2976133117675781, 0.29756414794921876, 0.29727435302734373, 0.29644595336914065, 0.2974146423339844, 0.30141543579101565, 0.2976788330078125, 0.2980526123046875, 0.2980894775390625, 0.29788058471679685, 0.2963497009277344, 0.29696920776367186, 0.29795635986328123, 0.29711566162109376, 0.29751296997070314, 0.29756314086914065, 0.2974320678710938, 0.29744332885742186, 0.29654324340820315, 0.29747711181640624, 0.2984744873046875, 0.29649203491210935, 0.2962442321777344, 0.2966384582519531, 0.2963189697265625, 0.29589913940429685, 0.2962176208496094, 0.2958837585449219, 0.29590936279296876, 0.29634048461914064, 0.6155560913085938, 0.29629541015625, 0.2960916442871094, 0.2964561767578125, 0.29645208740234374, 0.2960373840332031, 0.2963875732421875, 0.2963620300292969, 0.2962861633300781, 0.29651251220703123, 0.29747915649414064, 0.2981519470214844, 0.29662823486328127, 0.2976133117675781, 0.29637530517578126, 0.29608038330078124, 0.29605682373046877, 0.2960496520996094, 0.29625140380859377, 0.29587966918945313, 0.2968821716308594, 0.29635174560546873, 0.29616537475585936, 0.3003904113769531, 0.2970214538574219, 0.2966312866210937, 0.29685556030273436, 0.29636813354492186, 0.29714739990234373, 0.29626266479492186, 0.29595135498046876, 0.2962196350097656, 0.29621759033203127, 0.29663333129882813, 0.2961408081054687, 0.2965729370117188, 0.2961131591796875, 0.2963865661621094, 0.296479736328125, 0.296900634765625, 0.2964776611328125, 0.296848388671875, 0.29701937866210937, 0.29931622314453127, 0.29786627197265625, 0.2962636413574219, 0.29623806762695315, 0.2966210632324219, 0.2961336364746094, 0.2963711853027344, 0.29625140380859377, 0.2967357482910156, 0.29616845703125, 0.2963599853515625, 0.29687493896484374, 0.296300537109375, 0.2967142333984375, 0.29815499877929685, 0.29702349853515625, 0.29747610473632813, 0.296627197265625, 0.29662515258789063, 0.2977822570800781, 0.6183147583007812, 0.29672344970703124, 0.296764404296875, 0.29664154052734376, 0.2967080993652344, 0.29716583251953127, 0.29694155883789064, 0.2967244873046875, 0.2972221374511719, 0.2974392395019531, 0.2972866516113281, 0.2969518127441406, 0.2982625427246094, 0.296279052734375, 0.29735015869140624, 0.2961131591796875, 0.2980966491699219, 0.29623806762695315, 0.29628826904296873, 0.2956605529785156, 0.29607012939453126, 0.2962001953125, 0.29612442016601564, 0.29620736694335936, 0.29724774169921875, 0.2960086975097656, 0.297548828125, 0.2967602844238281, 0.29649102783203124, 0.2963619384765625, 0.2962708740234375, 0.29639984130859376, 0.2968350830078125, 0.2963292236328125, 0.296099853515625, 0.29622784423828125, 0.29627093505859375, 0.296197021484375, 0.2974996337890625, 0.2962104187011719, 0.2960639953613281, 0.29599948120117187, 0.2959902648925781, 0.2965739440917969, 0.2961817626953125, 0.2968320007324219, 0.29708084106445315, 0.2968320007324219, 0.29710540771484373, 0.2960558166503906, 0.2963138427734375, 0.29637326049804685, 0.29592166137695314, 0.2962083740234375, 0.29609573364257813, 0.2962114562988281, 0.2961203308105469, 0.29699481201171873, 0.29659442138671877, 0.29610906982421875, 0.29717401123046877, 0.29671218872070315, 0.2965350341796875, 0.61785498046875, 0.29675213623046875, 0.29679409790039063, 0.29726925659179687, 0.29639578247070314, 0.29744537353515627, 0.2977556457519531, 0.29687704467773435, 0.2974320678710938, 0.297416748046875, 0.29766244506835937, 0.29807000732421873, 0.2972119140625, 0.2973388671875, 0.2967439270019531, 0.2972999572753906, 0.2976030578613281, 0.2986280822753906, 0.29623602294921875, 0.29600152587890627, 0.2964695739746094, 0.2963936767578125, 0.2961868896484375, 0.29702349853515625, 0.29616024780273437, 0.29621148681640624, 0.2964879150390625, 0.29709210205078124, 0.2963056640625, 0.2973767700195312, 0.2969722900390625, 0.29783346557617185, 0.2965718994140625, 0.2977576904296875, 0.2961418151855469, 0.2967091064453125, 0.2973644714355469, 0.29671218872070315, 0.29666201782226564, 0.296838134765625, 0.2965801086425781, 0.2962391052246094, 0.296089599609375, 0.2963804016113281, 0.29701119995117187, 0.2961469421386719, 0.29617767333984374, 0.296384521484375, 0.296369140625, 0.29728460693359376, 0.2963015747070312, 0.2967838745117187, 0.29621554565429686, 0.2963097534179687, 0.29686373901367186, 0.2963947448730469, 0.2962995300292969, 0.29706649780273436, 0.2967142333984375, 0.29672857666015623, 0.2966722412109375, 0.2982359008789062, 0.2979543151855469, 0.6189783325195313, 0.2962237548828125, 0.2969938049316406, 0.29632614135742186, 0.2961541748046875, 0.2965380554199219, 0.2963343505859375, 0.29643777465820315, 0.29634866333007814, 0.2962872314453125, 0.2963630065917969, 0.29629644775390623, 0.29665484619140625, 0.296310791015625, 0.29620120239257813, 0.29598513793945314, 0.29617767333984374, 0.2959667053222656, 0.2962749328613281, 0.2967930908203125, 0.29673779296875, 0.29692825317382815, 0.29633331298828125, 0.29688934326171873, 0.2962821044921875, 0.29612954711914063, 0.2987386779785156, 0.2962083740234375, 0.296374267578125, 0.2962135009765625, 0.29653402709960935, 0.2968299560546875, 0.2963097534179687, 0.2981970520019531, 0.2961192321777344, 0.2962135009765625, 0.2961069946289063, 0.295920654296875, 0.2961459655761719, 0.29606500244140627, 0.29621453857421876, 0.29600564575195315, 0.29583566284179685, 0.2993377380371094, 0.29691802978515625, 0.2961561584472656, 0.2958428039550781, 0.2965329895019531, 0.29672549438476564, 0.29595547485351564, 0.2960772705078125, 0.29593496704101563, 0.2966548767089844, 0.2969466552734375, 0.2978447265625, 0.296627197265625, 0.29650433349609373, 0.2975867004394531, 0.2982461853027344, 0.29639166259765626, 0.29643161010742186, 0.29704702758789064, 0.29672344970703124]",tokens/s,3.3182134958203435,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1868.201984,3349.676032,0.0,2703.228928,2578.238464,s,10,1.410495346069336,0.1410495346069336,0.0017689689177857657,0.14056432342529296,0.14309543151855467,0.14402269134521484,0.14476449920654297,"[0.144949951171875, 0.1412518768310547, 0.1397279357910156, 0.13956182861328126, 0.13982972717285155, 0.13892410278320313, 0.13987677001953125, 0.14288937377929686, 0.1413212127685547, 0.14216256713867187]",tokens/s,1814.9652227737006,kWh,1.6439394082552122e-06,9.00797525271931e-07,6.874183740083411e-06,9.418920673610555e-06,tokens/kWh,27179334.96533712,MB,1868.201984,3349.676032,0.0,2703.228928,2667.098624,s,10,83.44500195312501,8.3445001953125,0.03302115511867202,8.3336259765625,8.37593193359375,8.403280419921876,8.425159208984375,"[8.33203515625, 8.335076171875, 8.333888671875, 8.3144501953125, 8.32368359375, 8.3150146484375, 8.3698544921875, 8.43062890625, 8.3570068359375, 8.33336328125]",tokens/s,7.549882979856609,kWh,9.832901556366757e-05,5.38916025818177e-05,0.0003962753054459165,0.0005484959235914018,tokens/kWh,114859.55918777513,,s,629,84.57831109619147,0.13446472352335676,0.016756456539684012,0.13199974060058595,0.1339588592529297,0.13428590698242188,0.27203026611328124,"[0.131915771484375, 0.13208883666992188, 0.13340570068359375, 0.13261517333984374, 0.1320437774658203, 0.13189222717285157, 0.13191372680664062, 0.13180621337890625, 0.13191270446777345, 0.1318174743652344, 0.13188607788085938, 0.13165977478027344, 0.13220761108398438, 0.13279539489746095, 0.132642822265625, 0.1319086151123047, 0.13183590698242187, 0.13183692932128907, 0.13191372680664062, 0.1319710693359375, 0.13185536193847655, 0.13166387939453125, 0.13191888427734375, 0.13172015380859375, 0.1317058563232422, 0.13175704956054687, 0.1318830108642578, 0.13170687866210937, 0.1325506591796875, 0.13195468139648436, 0.13218412780761718, 0.13190444946289062, 0.1321625671386719, 0.13238067626953126, 0.13378150939941405, 0.13233255004882813, 0.13199667358398437, 0.13365965270996094, 0.13226495361328125, 0.13205401611328124, 0.13234483337402345, 0.13211033630371094, 0.13187481689453126, 0.13180210876464843, 0.13335859680175782, 0.13234176635742187, 0.13278311157226563, 0.13272679138183593, 0.1324042205810547, 0.13250457763671875, 0.13250662231445312, 0.13336883544921874, 0.13242880249023437, 0.13245030212402345, 0.13245234680175783, 0.13265408325195313, 0.1332162628173828, 0.13244825744628907, 0.1322782745361328, 0.13222093200683593, 0.13194239807128907, 0.1318461456298828, 0.2743060607910156, 0.13452493286132813, 0.13292338562011718, 0.13269094848632812, 0.13221580505371094, 0.13191372680664062, 0.13204888916015625, 0.1319393310546875, 0.13207244873046875, 0.13193624877929688, 0.1332346954345703, 0.13184307861328126, 0.13165977478027344, 0.13174169921875, 0.1319331817626953, 0.13257011413574218, 0.13245234680175783, 0.13182669067382813, 0.13220249938964843, 0.1319772186279297, 0.13190451049804688, 0.13176934814453126, 0.13176832580566405, 0.13202943420410157, 0.131842041015625, 0.13187992858886718, 0.13245132446289062, 0.1319014434814453, 0.13174374389648438, 0.13188096618652342, 0.1329971160888672, 0.1325875244140625, 0.1321922607421875, 0.13208473205566407, 0.1319751739501953, 0.13210829162597656, 0.132173828125, 0.1320202178955078, 0.13244108581542968, 0.1323520050048828, 0.13193624877929688, 0.1318707275390625, 0.13215335083007812, 0.1319833526611328, 0.13199871826171874, 0.13258546447753905, 0.1363056640625, 0.13410917663574218, 0.132347900390625, 0.13224557495117187, 0.13346809387207031, 0.1320499267578125, 0.1319823303222656, 0.13196595764160157, 0.13193113708496093, 0.13199974060058595, 0.13258444213867188, 0.13302787780761718, 0.13199562072753906, 0.13191885375976561, 0.13196493530273437, 0.1318461456298828, 0.13196800231933595, 0.27239935302734375, 0.1318348846435547, 0.13182975769042968, 0.13207449340820313, 0.13185638427734375, 0.1317724151611328, 0.13244415283203126, 0.13195263671875, 0.1320099792480469, 0.13171302795410156, 0.13188505554199217, 0.13182054138183594, 0.13208575439453124, 0.1338101806640625, 0.13198439025878905, 0.13201919555664063, 0.13190348815917968, 0.13199871826171874, 0.13165977478027344, 0.13169357299804688, 0.13205708312988282, 0.13223014831542967, 0.13255679321289063, 0.13336166381835937, 0.1323274230957031, 0.1321175079345703, 0.13262745666503906, 0.13243597412109376, 0.13281074523925782, 0.13219737243652344, 0.13270425415039064, 0.1319403839111328, 0.13225782775878905, 0.13328172302246094, 0.13259266662597657, 0.13259056091308594, 0.13216152954101562, 0.1320622100830078, 0.13242469787597655, 0.1341696014404297, 0.13195263671875, 0.13167718505859374, 0.13170278930664062, 0.13178675842285156, 0.13180108642578126, 0.13196902465820312, 0.13176319885253907, 0.1331517791748047, 0.1319188232421875, 0.13173248291015624, 0.13183795166015624, 0.13228440856933593, 0.1320273895263672, 0.13176934814453126, 0.13183897399902345, 0.13179600524902343, 0.13171708679199218, 0.13180108642578126, 0.13352243041992187, 0.1350635528564453, 0.13252915954589845, 0.13388493347167968, 0.1324451904296875, 0.2718658447265625, 0.1323663330078125, 0.1318041534423828, 0.1317375946044922, 0.13224755859375, 0.13172837829589842, 0.13183692932128907, 0.13180825805664062, 0.13196493530273437, 0.13201612854003905, 0.13178060913085937, 0.13186866760253907, 0.13176422119140624, 0.13189222717285157, 0.13185331726074218, 0.13174476623535156, 0.13189631652832032, 0.1319833526611328, 0.13183180236816405, 0.13173965454101563, 0.13178880310058594, 0.13191474914550783, 0.13178163146972657, 0.13190553283691406, 0.132674560546875, 0.13195161437988281, 0.13180313110351563, 0.13180108642578126, 0.13176524353027344, 0.13174578857421876, 0.1317232666015625, 0.13185740661621093, 0.1315359344482422, 0.13187271118164062, 0.13189427185058594, 0.13176217651367186, 0.13183999633789062, 0.131842041015625, 0.13198028564453124, 0.1318461456298828, 0.13185740661621093, 0.13189222717285157, 0.1318656005859375, 0.13177548217773438, 0.13433549499511718, 0.1318778839111328, 0.13178163146972657, 0.1320099792480469, 0.13189529418945312, 0.13173043823242186, 0.13199974060058595, 0.1317908477783203, 0.13167718505859374, 0.1318707580566406, 0.1319085693359375, 0.1320079345703125, 0.1317375946044922, 0.1329776611328125, 0.13234278869628907, 0.13224960327148438, 0.13266841125488282, 0.13238578796386719, 0.13234994506835937, 0.2727505798339844, 0.13219532775878906, 0.1322977294921875, 0.1321318359375, 0.13251072692871094, 0.13448908996582032, 0.13275852966308593, 0.1344153594970703, 0.13294796752929688, 0.1335828552246094, 0.13194137573242187, 0.1318604736328125, 0.13178469848632812, 0.13185023498535156, 0.13183282470703125, 0.13243084716796874, 0.13204582214355468, 0.13287628173828125, 0.13193522644042968, 0.13247999572753907, 0.13198028564453124, 0.13178880310058594, 0.1318225860595703, 0.13189529418945312, 0.1317969970703125, 0.1317898254394531, 0.13174783325195313, 0.13180825805664062, 0.1317611541748047, 0.13181951904296876, 0.13173043823242186, 0.1318901824951172, 0.1316822967529297, 0.1317908477783203, 0.13169664001464843, 0.1318656005859375, 0.1333289031982422, 0.1319536590576172, 0.13187890625, 0.13186457824707032, 0.13181951904296876, 0.1317969970703125, 0.1317580871582031, 0.13171200561523438, 0.13176934814453126, 0.13164851379394532, 0.13176524353027344, 0.13194239807128907, 0.13188812255859375, 0.13191270446777345, 0.1322117156982422, 0.13192909240722656, 0.13183999633789062, 0.1317693786621094, 0.13185020446777343, 0.13187583923339843, 0.1317611541748047, 0.13403237915039062, 0.13205503845214844, 0.13186151123046874, 0.13183795166015624, 0.13185536193847655, 0.13172531127929688, 0.2711296081542969, 0.13194137573242187, 0.131852294921875, 0.13176217651367186, 0.13171609497070313, 0.13177548217773438, 0.13163827514648438, 0.1318164520263672, 0.1318707275390625, 0.13187277221679689, 0.13203762817382814, 0.13187174987792968, 0.13186968994140624, 0.13186866760253907, 0.13186972045898437, 0.13196592712402344, 0.1320099792480469, 0.1333217315673828, 0.13214207458496094, 0.13328793334960937, 0.13255679321289063, 0.13287628173828125, 0.1319505920410156, 0.13182566833496093, 0.13165158081054687, 0.13177650451660156, 0.13287423706054688, 0.13261311340332033, 0.13195878601074218, 0.1316505584716797, 0.1316495361328125, 0.13174989318847657, 0.13171302795410156, 0.13187686157226564, 0.13190553283691406, 0.13175296020507812, 0.13174989318847657, 0.13191474914550783, 0.13393820190429687, 0.13187989807128905, 0.13174681091308593, 0.1316864013671875, 0.13167205810546875, 0.13172019958496095, 0.13174783325195313, 0.1315952606201172, 0.13168435668945314, 0.13165362548828125, 0.13327769470214842, 0.13195161437988281, 0.1319024963378906, 0.1318594207763672, 0.13180313110351563, 0.13170381164550782, 0.13171507263183593, 0.13190451049804688, 0.13232640075683594, 0.13192294311523436, 0.13172940063476563, 0.1317406768798828, 0.13174578857421876, 0.13170381164550782, 0.13169561767578125, 0.2720942077636719, 0.1323653106689453, 0.1319772186279297, 0.13174887084960937, 0.1318461456298828, 0.13169049072265626, 0.13180825805664062, 0.13170994567871094, 0.13199974060058595, 0.13190348815917968, 0.13172940063476563, 0.13172735595703125, 0.13171612548828124, 0.13159523010253907, 0.13188914489746092, 0.13179904174804688, 0.13198130798339844, 0.13195578002929687, 0.13164845275878906, 0.13183590698242187, 0.13194752502441406, 0.13176524353027344, 0.13180621337890625, 0.13325619506835937, 0.1319086456298828, 0.13182666015625, 0.13163929748535155, 0.13185125732421876, 0.1320396728515625, 0.13438668823242186, 0.13385317993164061, 0.13466111755371094, 0.13423411560058593, 0.133897216796875, 0.1338357696533203, 0.1337620849609375, 0.13375177001953126, 0.1338419189453125, 0.1338470458984375, 0.13377127075195314, 0.13373440551757812, 0.1337507781982422, 0.13362892150878905, 0.13362687683105468, 0.133718017578125, 0.1337139129638672, 0.13433139038085937, 0.1335930938720703, 0.13229158020019532, 0.13321932983398438, 0.1331998748779297, 0.1333289031982422, 0.13277183532714842, 0.13196185302734376, 0.13300531005859376, 0.13356031799316406, 0.13304013061523437, 0.13298892211914062, 0.1336432647705078, 0.13445120239257813, 0.13455258178710938, 0.1344040985107422, 0.13423922729492188, 0.27732583618164064, 0.1339084777832031, 0.13382144165039062, 0.1346867218017578, 0.13416653442382812, 0.13392076110839843, 0.13295001220703126, 0.13354495239257813, 0.13410508728027343, 0.1338173370361328, 0.13408869934082032, 0.1340200958251953, 0.1339043884277344, 0.13345074462890624, 0.1340712890625, 0.133653564453125, 0.1348218231201172, 0.13366886901855468, 0.13357466125488282, 0.1335029754638672, 0.1333729248046875, 0.13342617797851564, 0.1340518341064453, 0.1341563262939453, 0.13418496704101562, 0.13429244995117187, 0.13391769409179688, 0.13419314575195312, 0.13331149291992186, 0.13398121643066407, 0.1340333709716797, 0.13403135681152345, 0.13413682556152343, 0.13416447448730467, 0.1341071319580078, 0.13356748962402343, 0.13255885314941407, 0.1329449005126953, 0.13408767700195312, 0.13382655334472657, 0.13446556091308592, 0.134451171875, 0.1342740478515625, 0.13432524108886718, 0.13397196960449217, 0.133928955078125, 0.13388800048828126, 0.13408154296875, 0.1339463653564453, 0.13414707946777343, 0.1341204528808594, 0.1346938934326172, 0.13456895446777345, 0.13381427001953125, 0.13382553100585937, 0.13402316284179688, 0.13395558166503907, 0.13448602294921874, 0.13234994506835937, 0.13215437316894532, 0.13213081359863282, 0.13221376037597657, 0.13244825744628907, 0.27383807373046876, 0.13208677673339844, 0.1318748779296875, 0.13186143493652344, 0.1319342041015625, 0.1316986846923828, 0.13210009765625, 0.13194956970214844, 0.1318338623046875, 0.13179904174804688, 0.13189427185058594, 0.1324390411376953, 0.1331261444091797, 0.13414399719238282, 0.13440205383300782, 0.13397196960449217, 0.13388394165039064, 0.13371900939941406, 0.13379379272460937, 0.1336944580078125, 0.13435395812988282, 0.133012451171875, 0.1330391082763672, 0.13337496948242186, 0.13211033630371094, 0.1332316131591797, 0.1331374053955078, 0.1329827880859375, 0.13283839416503906, 0.13333401489257812, 0.1330401611328125, 0.1329510040283203, 0.13266636657714845, 0.132642822265625, 0.1325813751220703, 0.1328547821044922, 0.13313536071777343, 0.13244313049316406, 0.13259365844726562, 0.13253018188476562, 0.1329459228515625, 0.13234585571289062, 0.13299200439453124, 0.13250764465332032, 0.13244210815429688, 0.13175296020507812, 0.1317580871582031, 0.1318338623046875, 0.1318666229248047, 0.1317580871582031, 0.13186358642578125, 0.13192803955078125, 0.13181849670410156, 0.13414501953125, 0.13427609252929687, 0.13285580444335937, 0.13174476623535156, 0.1318656005859375, 0.13187277221679689, 0.13214002990722656, 0.13324185180664064, 0.13186151123046874, 0.13166592407226563, 0.27445761108398437, 0.13168025207519532, 0.1318492126464844, 0.13196595764160157, 0.13203353881835939, 0.13289573669433594, 0.13374771118164064, 0.13365350341796875, 0.13377433776855469, 0.13357772827148437, 0.13372621154785155, 0.1335900115966797, 0.1336494140625, 0.13360946655273437, 0.1339095001220703, 0.13365863037109374, 0.13385317993164061, 0.1333053436279297, 0.13203660583496094, 0.13213388061523437, 0.13183897399902345, 0.132063232421875, 0.13191372680664062, 0.13187992858886718, 0.13165875244140626, 0.1317560272216797, 0.13234994506835937, 0.13201510620117188, 0.13194342041015625, 0.13241856384277345, 0.13182566833496093, 0.1321871337890625, 0.13201715087890625, 0.13212979125976562, 0.13185331726074218, 0.13186764526367187, 0.1317969970703125, 0.13172940063476563, 0.13185433959960938, 0.1317959747314453, 0.13192909240722656, 0.1318144073486328, 0.1319772186279297, 0.13174783325195313, 0.13176422119140624, 0.13171916198730468, 0.13182464599609375, 0.1319086151123047, 0.13198745727539063, 0.13206431579589845, 0.13207244873046875, 0.1318757781982422, 0.13179391479492186, 0.13187379455566406, 0.13178060913085937, 0.1318113250732422, 0.13175196838378905, 0.13170889282226564, 0.13177754211425782, 0.13175091552734375, 0.1317611541748047, 0.13186968994140624, 0.1329213409423828]",tokens/s,7.43689477654188,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2170.855424,7227.31008,0.0,6580.862976,6226.036224,s,10,5.757403198242186,0.5757403198242188,0.0009922788511109175,0.5756337280273438,0.5767303344726562,0.5773405700683594,0.5778287585449219,"[0.5763906860351562, 0.5754805908203126, 0.5742902221679688, 0.5758182373046875, 0.5751873168945313, 0.574752685546875, 0.575786865234375, 0.5751510620117187, 0.5779508056640625, 0.5765947265625]",tokens/s,444.6449053249566,kWh,6.784125003549789e-06,3.717420948032668e-06,3.101488592300367e-05,4.151643187458613e-05,tokens/kWh,6166233.1862557735,MB,2170.855424,7227.31008,0.0,6580.862976,6478.658048,s,10,336.90312500000005,33.6903125,0.0038578817961585758,33.690537109375,33.6951453125,33.6965375,33.69765125,"[33.6891484375, 33.6979296875, 33.68431640625, 33.69015234375, 33.69138671875, 33.68830859375, 33.690921875, 33.6910078125, 33.6948359375, 33.6851171875]",tokens/s,1.8699737498724598,kWh,0.00039786197528243067,0.0002180628312197768,0.001800652746076575,0.0024165775525787823,tokens/kWh,26069.926840448938,,s,629,341.54916522216826,0.5430034423245914,0.06841490320905963,0.5347440795898437,0.5353432861328125,0.5355950317382813,1.1101828955078124,"[0.5346764526367187, 0.534593505859375, 0.53607421875, 0.5346488037109375, 0.5349376220703125, 0.5348045043945312, 0.5348710327148437, 0.534361083984375, 0.5351505737304687, 0.5344501953125, 0.5342750854492188, 0.5343682861328125, 0.5348853759765625, 0.5342218017578125, 0.5351854248046874, 0.5346846923828125, 0.5351168212890625, 0.5340159912109375, 0.5345996704101562, 0.5340569458007812, 0.5348679809570313, 0.5342392578125, 0.5346948852539063, 0.5346427001953125, 0.5350113525390625, 0.5345361938476563, 0.534276123046875, 0.5349284057617187, 0.5341531982421875, 0.5347072143554688, 0.534413330078125, 0.5346447143554688, 0.5343323974609375, 0.534677490234375, 0.5346836547851562, 0.5348843383789063, 0.5341747436523437, 0.53469287109375, 0.5349488525390625, 0.5349376220703125, 0.5349048461914062, 0.5350922241210937, 0.5349293823242187, 0.5348905029296875, 0.5348187866210937, 0.5352499389648437, 0.5351372680664063, 0.5351874389648438, 0.5344020385742188, 0.5347307739257813, 0.534319091796875, 0.5348607788085937, 0.53452490234375, 0.5352724609375, 0.5355673828125, 0.53477685546875, 0.5353441162109375, 0.5347553100585938, 0.535225341796875, 0.5340364990234375, 0.5348362426757812, 0.53452392578125, 1.1103118896484374, 0.5353318481445313, 0.5341173706054687, 0.5345740966796875, 0.5343201293945312, 0.5346703491210938, 0.5341255493164062, 0.5354373168945312, 0.534645751953125, 0.5345771484375, 0.5340641479492187, 0.535025634765625, 0.5342791748046875, 0.5352877807617188, 0.5341102294921874, 0.5349335327148438, 0.53408154296875, 0.5348935546875, 0.5341614379882812, 0.5348187866210937, 0.5348075561523438, 0.5348945922851562, 0.5341737060546875, 0.5358602294921875, 0.5343109130859375, 0.53522021484375, 0.5344286499023437, 0.5350133666992187, 0.5340877075195313, 0.5351638793945312, 0.5342811889648438, 0.535573486328125, 0.5346611328125, 0.5351854248046874, 0.5346078491210937, 0.53532568359375, 0.5343160400390625, 0.53486181640625, 0.5339535522460938, 0.5352847290039062, 0.5343467407226562, 0.5350491943359375, 0.5341419677734375, 0.5348751220703125, 0.534150146484375, 0.5347573852539063, 0.5344307250976562, 0.5354004516601563, 0.5344286499023437, 0.5352489013671875, 0.5350963134765625, 0.5354454956054687, 0.537660400390625, 0.5361489868164062, 0.53553662109375, 0.5356922607421875, 0.5343662109375, 0.5351168212890625, 0.535394287109375, 0.5363414916992187, 0.5349365844726562, 0.5353850708007812, 0.5346826171875, 1.110350830078125, 0.5346856689453126, 0.5343385620117187, 0.5345433349609375, 0.5344020385742188, 0.5345515747070313, 0.5340989379882812, 0.5348003540039062, 0.5341388549804688, 0.53479833984375, 0.5344071655273438, 0.5343672485351563, 0.5352960205078126, 0.5340405883789062, 0.5349048461914062, 0.5344491577148438, 0.535067626953125, 0.5343518676757812, 0.5349970092773437, 0.534128662109375, 0.5347153930664063, 0.53432421875, 0.5345413208007812, 0.5351311645507812, 0.5347737426757813, 0.534719482421875, 0.5343641357421876, 0.5350553588867187, 0.5344020385742188, 0.5347225341796875, 0.5345587158203124, 0.5351331787109375, 0.5342013549804687, 0.5347911376953125, 0.5345372314453125, 0.5347225341796875, 0.5342412719726563, 0.5348792114257812, 0.5342689208984375, 0.5345525512695313, 0.5345115966796875, 0.5345474853515625, 0.53495703125, 0.53488232421875, 0.535014404296875, 0.5344368896484375, 0.53495703125, 0.534287353515625, 0.5348126831054687, 0.5351044921875, 0.535436279296875, 0.53448193359375, 0.5350891723632812, 0.5339913940429688, 0.5349908447265626, 0.5347307739257813, 0.5351055297851562, 0.5346611328125, 0.5345126342773437, 0.5350236206054687, 0.5345863647460938, 0.5352509155273437, 0.5350174560546875, 1.1105382080078126, 0.5355950317382813, 0.5344696044921875, 0.5347164306640625, 0.5345341186523438, 0.5346948852539063, 0.5343866577148437, 0.535103515625, 0.5347359008789062, 0.5351536865234375, 0.5343518676757812, 0.5350891723632812, 0.5348259887695312, 0.5349007568359375, 0.5342996215820313, 0.5354127197265625, 0.5349365844726562, 0.5348239135742188, 0.5341798095703125, 0.534992919921875, 0.534329345703125, 0.5348515625, 0.5342095336914062, 0.53524072265625, 0.5343549194335937, 0.534803466796875, 0.534345703125, 0.534719482421875, 0.5346047973632813, 0.5349662475585938, 0.5341337890625, 0.5350051879882812, 0.5342566528320313, 0.5347942504882812, 0.5347379150390625, 0.5350942993164063, 0.5344153442382813, 0.5352182006835937, 0.534846435546875, 0.5348956298828125, 0.5342689208984375, 0.5350553588867187, 0.5349601440429688, 0.53513525390625, 0.534561767578125, 0.535309326171875, 0.5342945556640625, 0.5351065673828125, 0.5344860229492188, 0.53475634765625, 0.534240234375, 0.5348341674804687, 0.5342648315429688, 0.534993896484375, 0.5347952880859375, 0.5356380004882813, 0.534824951171875, 0.5348976440429688, 0.5344174194335938, 0.5349017333984375, 0.5350840454101562, 0.5351629028320313, 0.5344470825195312, 1.1098358154296875, 0.5345259399414063, 0.5348362426757812, 0.534181884765625, 0.5347102661132812, 0.534091796875, 0.5347164306640625, 0.534054931640625, 0.5361285400390625, 0.5355950317382813, 0.5359862060546875, 0.5356984252929687, 0.5359247436523438, 0.5353123779296876, 0.5349468383789062, 0.53486181640625, 0.5354373168945312, 0.5347164306640625, 0.5351342163085937, 0.5345413208007812, 0.5349837036132813, 0.53418701171875, 0.5347440795898437, 0.5342218017578125, 0.5345996704101562, 0.5342843017578125, 0.5351854248046874, 0.5345218505859375, 0.5348915405273438, 0.5342116088867187, 0.5350205688476563, 0.534297607421875, 0.534667236328125, 0.5343784790039062, 0.5350491943359375, 0.5344635009765625, 0.535109619140625, 0.5345679321289063, 0.5353430786132812, 0.5343150024414063, 0.5344952392578125, 0.5345228881835937, 0.5347676391601562, 0.53477783203125, 0.534740966796875, 0.5348997192382813, 0.5346795654296875, 0.53429248046875, 0.534645751953125, 0.5347973022460938, 0.5346099243164063, 0.5341378784179688, 0.5352919311523437, 0.5342832641601563, 0.5348894653320313, 0.53421875, 0.5347788696289062, 0.534635498046875, 0.5353768920898437, 0.5347850341796875, 0.5351485595703125, 0.5345423583984374, 0.5350031127929687, 1.1105545654296876, 0.5355919189453126, 0.5343006591796875, 0.5348720703125, 0.53427197265625, 0.5345126342773437, 0.5340037231445313, 0.535520263671875, 0.534739990234375, 0.535130126953125, 0.5343836059570313, 0.5350440673828125, 0.5346948852539063, 0.5351239624023437, 0.5344174194335938, 0.5351260375976562, 0.5345014038085938, 0.5347993774414063, 0.534582275390625, 0.5357168579101562, 0.5343109130859375, 0.5353820190429688, 0.5346990356445313, 0.5351629028320313, 0.5347451171875, 0.5354332275390625, 0.5348679809570313, 0.5348095703125, 0.5343733520507813, 0.5351966552734375, 0.5344102172851563, 0.5353021240234375, 0.5340569458007812, 0.534813720703125, 0.5341829223632812, 0.5347389526367188, 0.5341173706054687, 0.5348935546875, 0.5345310668945312, 0.535056396484375, 0.5342576904296875, 0.5349898071289062, 0.5346007080078125, 0.5354352416992187, 0.53414501953125, 0.5347205200195313, 0.5340692749023438, 0.5347993774414063, 0.5341737060546875, 0.53488330078125, 0.5342361450195312, 0.5347962646484375, 0.53441845703125, 0.53486181640625, 0.5344409790039063, 0.5348505859375, 0.5344778442382813, 0.5346652221679687, 0.5343754272460938, 0.5350758666992188, 0.5344163818359375, 0.5350686645507813, 0.5343856811523438, 1.1107747802734376, 0.5348556518554688, 0.5345413208007812, 0.5346017456054688, 0.5344235229492188, 0.5347625122070313, 0.534202392578125, 0.5346734008789062, 0.534513671875, 0.5345167236328126, 0.5345884399414063, 0.5348331298828125, 0.534603759765625, 0.5347123413085938, 0.535151611328125, 0.5338849487304688, 0.5345567016601562, 0.53408154296875, 0.5351454467773438, 0.5341777954101562, 0.5348731079101563, 0.534076416015625, 0.5348925170898438, 0.5345361938476563, 0.534635498046875, 0.534403076171875, 0.5345423583984374, 0.5348433837890625, 0.5344429931640625, 0.5346826171875, 0.5351076049804687, 0.5349652709960937, 0.5341634521484375, 0.5351157836914062, 0.534329345703125, 0.5346119384765625, 0.5346826171875, 0.5350000610351563, 0.5348444213867187, 0.5346211547851563, 0.5343887329101562, 0.5350768432617188, 0.53532568359375, 0.5349631958007812, 0.5353236694335938, 0.534540283203125, 0.5354454956054687, 0.5352796020507813, 0.5357506713867187, 0.5357987670898438, 0.5363568725585938, 0.5354977416992187, 0.5355079956054688, 0.5346631469726563, 0.5348648681640625, 0.53481982421875, 0.5350850830078125, 0.5346867065429688, 0.534488037109375, 0.5345198364257813, 0.5341511840820312, 0.5348761596679688, 0.5343672485351563, 1.1098511962890625, 0.5344020385742188, 0.5343488159179688, 0.5351004028320312, 0.5345003662109375, 0.5340426025390625, 0.5349335327148438, 0.534129638671875, 0.5345115966796875, 0.534624267578125, 0.5344778442382813, 0.5346898193359375, 0.5347532958984375, 0.5341439819335938, 0.5348208618164062, 0.5342740478515625, 0.534593505859375, 0.5343160400390625, 0.5345218505859375, 0.5341788330078125, 0.5349027709960937, 0.5340282592773438, 0.5352099609375, 0.5348515625, 0.5344050903320312, 0.534667236328125, 0.534846435546875, 0.5345792236328125, 0.5350031127929687, 0.5352366333007812, 0.5349683227539063, 0.5348577270507813, 0.5350656127929687, 0.5350123291015625, 0.5349437255859375, 0.5353164672851562, 0.535109619140625, 0.5345955810546875, 0.5346232299804687, 0.5350543212890625, 0.5351209106445313, 0.5352017822265625, 0.5351526489257813, 0.534813720703125, 0.5347174682617187, 0.53507275390625, 0.5347962646484375, 0.535278564453125, 0.5350348510742188, 0.5344235229492188, 0.5348864135742187, 0.5350287475585938, 0.5347123413085938, 0.5351557006835937, 0.5351854248046874, 0.534824951171875, 0.5352898559570313, 0.5345700073242188, 0.5350369262695313, 0.5348126831054687, 0.5350420532226563, 0.5343068237304688, 0.5352109985351563, 1.1140484619140625, 0.5357752075195312, 0.5347020874023437, 0.5347215576171875, 0.5340579833984375, 0.5347225341796875, 0.5339064331054687, 0.5348648681640625, 0.5343109130859375, 0.5346764526367187, 0.5340579833984375, 0.534751220703125, 0.5342074584960937, 0.53477685546875, 0.5345331420898437, 0.5362677612304687, 0.5344603881835938, 0.535562255859375, 0.534560791015625, 0.5345679321289063, 0.53437646484375, 0.5348444213867187, 0.5341480712890625, 0.5348433837890625, 0.5341439819335938, 0.5348792114257812, 0.5344849853515625, 0.5353707275390625, 0.534392822265625, 0.5349959716796875, 0.534276123046875, 0.5353820190429688, 0.5345740966796875, 0.5356973876953125, 0.534592529296875, 0.5353021240234375, 0.5345812377929687, 0.5350471801757812, 0.5342730102539063, 0.5356431274414063, 0.5344461059570312, 0.5352222900390625, 0.534319091796875, 0.5352243041992187, 0.5342218017578125, 0.5354874877929687, 0.5344635009765625, 0.5353492431640625, 0.5346590576171875, 0.5352969970703125, 0.5345413208007812, 0.5364439086914062, 0.5349744873046876, 0.5354383544921875, 0.5349171142578125, 0.5353850708007812, 0.5352386474609375, 0.5347727661132813, 0.5347440795898437, 0.5347891235351563, 0.53429248046875, 0.5350348510742188, 0.5342208251953126, 1.1111966552734376, 0.5346058349609375, 0.5340743408203125, 0.5344491577148438, 0.5340753784179687, 0.5344573364257813, 0.5351997680664062, 0.5346652221679687, 0.5347676391601562, 0.5348843383789063, 0.5340805053710938, 0.5348229370117188, 0.5342904052734375, 0.5346806030273438, 0.5341122436523438, 0.5346744384765625, 0.5341951904296875, 0.53463037109375, 0.5346017456054688, 0.5344931640625, 0.5348495483398438, 0.5341675415039062, 0.534192138671875, 0.5346129760742188, 0.5353421020507813, 0.5345730590820312, 0.5347010498046875, 0.5343364868164062, 0.53553564453125, 0.5343969116210937, 0.534887451171875, 0.5349539794921875, 0.5349130249023437, 0.5349970092773437, 0.5343682861328125, 0.534813720703125, 0.5341798095703125, 0.5349908447265626, 0.5340886840820313, 0.5348864135742187, 0.53427197265625, 0.5349468383789062, 0.5341091918945312, 0.5351629028320313, 0.5349376220703125, 0.5348556518554688, 0.53496728515625, 0.5342669067382813, 0.5347440795898437, 0.53432421875, 0.53492529296875, 0.5341470947265625, 0.5350174560546875, 0.5343016967773437, 0.5352642822265625, 0.5343908081054688, 0.535689208984375, 0.535041015625, 0.5345894165039062, 0.5347891235351563, 0.5347962646484375, 0.5358428344726562, 0.5355755615234375]",tokens/s,1.8416089513521527,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1585.569792,9676.783616,0.0,9030.336512,8583.573504,s,10,9.573078491210937,0.9573078491210938,0.0009060771854647526,0.9570158996582031,0.9584730651855469,0.958728140258789,0.9589322003173828,"[0.9589832153320312, 0.956114501953125, 0.95637109375, 0.9565009765625, 0.9567921142578125, 0.9570035400390625, 0.9570282592773437, 0.95793017578125, 0.957938232421875, 0.9584163818359375]",tokens/s,267.41658938139295,kWh,1.1295311258296775e-05,6.189367993647465e-06,5.412797259527177e-05,7.161265184721602e-05,tokens/kWh,3574787.323141869,MB,1585.569792,9676.783616,0.0,9030.336512,8872.967168,s,10,567.96033984375,56.796033984375,0.005905783414625482,56.796150390625,56.802221875,56.803097265625,56.803797578125,"[56.78812109375, 56.796453125, 56.795734375, 56.80109765625, 56.800734375, 56.79584765625, 56.80202734375, 56.80397265625, 56.7909765625, 56.785375]",tokens/s,1.1092323808618707,kWh,0.0006704276505549146,0.00036745356669733154,0.0031837375343617275,0.004221618751613973,tokens/kWh,14923.185561442368,,s,629,575.6730700073241,0.9152195071658572,0.113749717069217,0.901496826171875,0.9019905639648437,0.9021755371093749,1.8586116552734375,"[0.9012899780273438, 0.9015009155273438, 0.9013851928710938, 0.9014036254882812, 0.9017272338867187, 0.9014722290039062, 0.9017364501953125, 0.9016832275390625, 0.901676025390625, 0.9014937744140625, 0.9013677978515625, 0.90115380859375, 0.9009315795898437, 0.9009326171875, 0.9006499633789062, 0.9008291625976562, 0.9008558349609375, 0.9008486328125, 0.9011026000976563, 0.9014824829101562, 0.901602294921875, 0.901296142578125, 0.9016278686523438, 0.9010196533203125, 0.901043212890625, 0.9009264526367188, 0.9010216674804687, 0.9008496704101563, 0.901128173828125, 0.9008875732421875, 0.901443603515625, 0.9013667602539063, 0.9012459716796875, 0.90106982421875, 0.9016514282226562, 0.9009028930664063, 0.9010933837890625, 0.9009028930664063, 0.9010616455078125, 0.9009520874023438, 0.901349365234375, 0.9014415283203125, 0.901707763671875, 0.9015367431640625, 0.9014681396484375, 0.9018613891601562, 0.901622802734375, 0.9018931274414063, 0.901707763671875, 0.9014057006835937, 0.901602294921875, 0.90210302734375, 0.9018941650390625, 0.9015654296875, 0.9017302856445313, 0.9016248168945312, 0.9020457153320313, 0.9013043212890625, 0.9014906616210937, 0.9013822021484375, 0.9013165283203125, 0.901349365234375, 1.86075439453125, 0.901201904296875, 0.9012777099609375, 0.9009407958984375, 0.9011107788085938, 0.9012059936523438, 0.901411865234375, 0.9011742553710937, 0.9014906616210937, 0.9008762817382813, 0.9011650390625, 0.9011548461914063, 0.9017159423828125, 0.9009899291992187, 0.9010339965820312, 0.9014087524414063, 0.901095458984375, 0.9012418212890625, 0.9011476440429688, 0.90099609375, 0.901496826171875, 0.9016145629882812, 0.9013197021484375, 0.9020405883789062, 0.9015439453125, 0.9012941284179687, 0.9013165893554688, 0.9014087524414063, 0.9009602661132813, 0.9014149169921875, 0.901086181640625, 0.901359619140625, 0.9010995483398437, 0.9015562133789062, 0.9015613403320313, 0.9021737060546875, 0.9016637573242188, 0.9020088500976563, 0.9014948120117188, 0.90166064453125, 0.901233642578125, 0.901607421875, 0.90140673828125, 0.9016708984375, 0.9019443359375, 0.9020333862304688, 0.90393701171875, 0.9017435913085937, 0.90208154296875, 0.9017467041015625, 0.901749755859375, 0.9017620239257812, 0.9014108276367188, 0.901612548828125, 0.9014630126953125, 0.901897216796875, 0.9016043701171875, 0.9021522216796874, 0.9015429077148438, 0.9017200927734375, 0.9018798217773437, 0.901327880859375, 0.9020098266601563, 1.8579189453125, 0.9009674072265625, 0.9019514770507813, 0.9013217163085937, 0.9013043212890625, 0.9010616455078125, 0.901369873046875, 0.9010882568359375, 0.9010647583007813, 0.9008700561523437, 0.901254150390625, 0.9011005249023437, 0.9012029418945312, 0.901781494140625, 0.9017036743164063, 0.9013206787109375, 0.901180419921875, 0.9015664672851562, 0.9012275390625, 0.901060546875, 0.9013350219726562, 0.9012828369140625, 0.9011179809570312, 0.902530029296875, 0.901454833984375, 0.9017108764648437, 0.9014998779296876, 0.9013156127929688, 0.9013903198242188, 0.901664794921875, 0.9012408447265625, 0.9015879516601563, 0.9012008666992187, 0.9010565185546875, 0.9010083618164062, 0.9016893310546875, 0.9017681884765625, 0.9018941650390625, 0.9012459716796875, 0.9014087524414063, 0.9018050537109376, 0.9015992431640625, 0.9015572509765625, 0.9015726318359375, 0.9014558715820312, 0.9014138793945312, 0.901760009765625, 0.9032969970703125, 0.9014159545898438, 0.9015982055664062, 0.90126953125, 0.9023529052734375, 0.901634033203125, 0.9018787841796875, 0.9014353637695313, 0.9016453247070313, 0.9016279296875, 0.901738525390625, 0.9017559204101563, 0.902043701171875, 0.9015582275390625, 0.9019248657226563, 0.9020723266601562, 1.8581309814453124, 0.9010073852539062, 0.9013156127929688, 0.9008977661132812, 0.9008128051757812, 0.9016401977539062, 0.9013319702148438, 0.9011097412109375, 0.9010739135742187, 0.9013370971679687, 0.9015664672851562, 0.9017160034179688, 0.9023712768554687, 0.9015736083984375, 0.9013422241210938, 0.901080078125, 0.9018327026367188, 0.90148046875, 0.9011190185546875, 0.901212158203125, 0.9018624267578125, 0.9014783935546875, 0.9012479858398438, 0.9013688354492188, 0.90138623046875, 0.9015767211914063, 0.90112109375, 0.901693359375, 0.901707763671875, 0.9020232543945312, 0.901571533203125, 0.9019771118164063, 0.901349365234375, 0.9013718872070312, 0.9014671630859376, 0.9014220581054687, 0.9012838134765625, 0.9017538452148437, 0.9016350708007812, 0.9019105224609375, 0.9021767578125, 0.901813232421875, 0.9018296508789062, 0.9015695190429688, 0.903024658203125, 0.902118408203125, 0.9014558715820312, 0.9017098388671875, 0.901375, 0.901796875, 0.90182861328125, 0.9018388671875, 0.901644287109375, 0.9020845947265625, 0.9015941162109375, 0.9019043579101562, 0.9019207763671875, 0.901602294921875, 0.9014589233398438, 0.9020272827148438, 0.9018408813476563, 0.9020088500976563, 0.9018572998046875, 1.8591446533203124, 0.9011109008789062, 0.9014906005859376, 0.9012500610351563, 0.9013309326171876, 0.9015132446289063, 0.9010267944335938, 0.90169140625, 0.9015234375, 0.9012705078125, 0.90149169921875, 0.90165869140625, 0.90100830078125, 0.9009920043945312, 0.9015848999023437, 0.9015347290039063, 0.9014989013671875, 0.9024532470703125, 0.9021880493164063, 0.9016944580078124, 0.9015951538085938, 0.9013145751953126, 0.90147021484375, 0.9014896850585937, 0.9010872192382813, 0.9012111206054687, 0.9012377319335938, 0.9011046142578125, 0.90134423828125, 0.9021696166992188, 0.9015510864257813, 0.9017293090820313, 0.9014609985351563, 0.9018982543945312, 0.9016985473632813, 0.9015664672851562, 0.9012459716796875, 0.9016145629882812, 0.9012357177734375, 0.9017845458984375, 0.9017640991210938, 0.9023355102539062, 0.9016002807617187, 0.9013688354492188, 0.9013800659179687, 0.9019535522460937, 0.9013340454101563, 0.9017763671875, 0.9015776977539063, 0.90165966796875, 0.9015429077148438, 0.9014620361328125, 0.9018777465820312, 0.9020886840820312, 0.9014589233398438, 0.9016187133789062, 0.901876708984375, 0.903267333984375, 0.9016299438476563, 0.9016832275390625, 0.901592041015625, 0.9018306274414063, 0.9016535034179688, 1.8594969482421875, 0.90172314453125, 0.901623779296875, 0.9014261474609375, 0.9010974731445313, 0.9016514282226562, 0.9012612915039062, 0.9012162475585938, 0.9010811157226563, 0.900874267578125, 0.9016196899414063, 0.9018163452148438, 0.9015050048828125, 0.9010811157226563, 0.9016350708007812, 0.9010995483398437, 0.9011435546875, 0.9012008666992187, 0.9010196533203125, 0.90096337890625, 0.9009110717773438, 0.9015501098632812, 0.9013790893554687, 0.901444580078125, 0.9009868774414063, 0.9019638061523437, 0.9014251708984375, 0.9013463134765625, 0.9010083618164062, 0.9014886474609375, 0.9014528198242188, 0.9012612915039062, 0.902129638671875, 0.9023948974609375, 0.9013688354492188, 0.9017241821289063, 0.90169140625, 0.9015562133789062, 0.9032264404296875, 0.90172509765625, 0.9013514404296875, 0.9015767211914063, 0.9013986206054687, 0.9023374633789063, 0.9012766723632812, 0.90123876953125, 0.9009448852539063, 0.9015643920898437, 0.9012295532226563, 0.9011148681640625, 0.9011896362304688, 0.9011199951171875, 0.9018316650390625, 0.9017835693359375, 0.9014640502929687, 0.9016084594726562, 0.90189208984375, 0.901802978515625, 0.9020620727539063, 0.9019801635742187, 0.9017937622070312, 0.9018091430664062, 0.90191357421875, 1.858798583984375, 0.9011435546875, 0.9014241333007813, 0.90132275390625, 0.9010053100585937, 0.9013289184570312, 0.9013585815429688, 0.9011660766601562, 0.9014292602539062, 0.901317626953125, 0.901381103515625, 0.901228515625, 0.9015480346679687, 0.9016514282226562, 0.9015521240234375, 0.90144873046875, 0.9019647827148437, 0.9018716430664062, 0.9012531127929687, 0.9012572021484375, 0.901212158203125, 0.9014169311523438, 0.901138427734375, 0.9026416625976562, 0.9016320190429687, 0.9017907104492188, 0.9014876098632812, 0.9016504516601562, 0.901855224609375, 0.9019913940429688, 0.9016053466796875, 0.9015654296875, 0.9016565551757812, 0.9016135864257813, 0.901591064453125, 0.9022617797851562, 0.901928955078125, 0.9018091430664062, 0.901897216796875, 0.9014537963867187, 0.9010780029296875, 0.9012725830078125, 0.9011988525390625, 0.9014630126953125, 0.90176416015625, 0.9015735473632812, 0.9017302856445313, 0.9021542358398438, 0.9014528198242188, 0.9019176635742188, 0.9015859375, 0.9015582275390625, 0.901423095703125, 0.9016873168945313, 0.9015879516601563, 0.9017313232421875, 0.9018091430664062, 0.902240234375, 0.9021122436523438, 0.9021419677734375, 0.9018674926757813, 0.9018511352539063, 0.9022258911132812, 1.8597550048828124, 0.9009326171875, 0.90142822265625, 0.901043212890625, 0.9013135375976562, 0.9019094848632813, 0.9014773559570313, 0.9016790771484375, 0.9016719360351563, 0.9012367553710937, 0.9012531127929687, 0.9010974731445313, 0.901064697265625, 0.90119677734375, 0.9015040283203125, 0.901970947265625, 0.90260888671875, 0.9018121948242187, 0.9016063842773437, 0.9018624267578125, 0.9015449829101563, 0.901992431640625, 0.9017988891601563, 0.901696533203125, 0.9012930297851562, 0.9012428588867187, 0.90197607421875, 0.9014159545898438, 0.90151220703125, 0.9018674926757813, 0.9016678466796875, 0.9016135864257813, 0.9015623779296875, 0.9015643920898437, 0.9014937744140625, 0.902150146484375, 0.9015090942382813, 0.9018839111328125, 0.9017354125976562, 0.9015787353515625, 0.9013851928710938, 0.9017507934570312, 0.9015869140625, 0.9017579345703125, 0.901917724609375, 0.9019903564453124, 0.90163916015625, 0.9023303833007813, 0.901960693359375, 0.9023068237304688, 0.9017538452148437, 0.9017886962890626, 0.901560302734375, 0.9016637573242188, 0.9014384765625, 0.9020631103515625, 0.9018910522460938, 0.9014251708984375, 0.9013934326171875, 0.9016893310546875, 0.9019873657226563, 0.9017251586914062, 0.901432373046875, 1.8595020751953124, 0.9010237426757812, 0.9012930297851562, 0.9010022583007813, 0.9012469482421875, 0.9015675048828125, 0.9011814575195313, 0.9016350708007812, 0.9019985961914062, 0.9014097900390625, 0.9013739624023438, 0.9014691772460938, 0.901528564453125, 0.9014323120117187, 0.90157568359375, 0.9015634155273438, 0.9018613891601562, 0.9021675415039062, 0.9016709594726563, 0.9019002685546875, 0.9019852905273438, 0.9018562622070313, 0.900979736328125, 0.90132373046875, 0.901507080078125, 0.9013309936523437, 0.9011680908203125, 0.9011712036132813, 0.9011783447265626, 0.9020948486328125, 0.9016350708007812, 0.9018470458984374, 0.9016832275390625, 0.901180419921875, 0.9011251220703125, 0.901518310546875, 0.9013657836914063, 0.9012398071289063, 0.90106982421875, 0.9011405029296875, 0.901254150390625, 0.9014476928710937, 0.9010718994140625, 0.9012920532226563, 0.90113330078125, 0.9011609497070312, 0.901159912109375, 0.9013043212890625, 0.9009213256835937, 0.9009336547851563, 0.90114453125, 0.9015357666015625, 0.9015296020507813, 0.9014456176757812, 0.9014261474609375, 0.9017907104492188, 0.9016565551757812, 0.9016832275390625, 0.90138623046875, 0.9014343872070313, 0.9012725830078125, 0.9016371459960938, 0.902470703125, 1.8604583740234375, 0.9015695190429688, 0.9012644653320312, 0.9009939575195313, 0.9012254638671875, 0.9013217163085937, 0.9012008666992187, 0.9012828369140625, 0.9010513916015624, 0.9015951538085938, 0.9012674560546875, 0.9009285278320313, 0.9009203491210938, 0.9009633178710937, 0.9009141845703125, 0.9008394165039062, 0.9009039306640625, 0.901094482421875, 0.9009048461914062, 0.9008230590820312, 0.9013002319335938, 0.9019085083007813, 0.9010206909179688, 0.9013463134765625, 0.9014251708984375, 0.9011159057617187, 0.9022166748046875, 0.9010892944335938, 0.900806640625, 0.9012715454101563, 0.9008599243164063, 0.9013524780273438, 0.9015029907226563, 0.9012828369140625, 0.9010565185546875, 0.9016555786132813, 0.9012265014648437, 0.90136474609375, 0.9013237915039063, 0.9012940673828125, 0.90121728515625, 0.9013165893554688, 0.901396484375, 0.9020753784179687, 0.9011875610351563, 0.9012091064453125, 0.9015101318359375, 0.9015930786132812, 0.90141796875, 0.9013585815429688, 0.9015480346679687, 0.9018736572265625, 0.9019412231445313, 0.9015439453125, 0.9019586791992188, 0.9018480834960938, 0.9013289184570312, 0.9018142700195313, 0.9015265502929688, 0.901791748046875, 0.9013944091796875, 0.9017569580078125, 0.9013258056640625]",tokens/s,1.0926340535470893,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2628.173824,8389.132288,0.0,7742.685184,7007.145472,s,10,5.8011931152343745,0.5801193115234377,0.0010375594919882804,0.5799924316406251,0.5812619140624999,0.5815937561035156,0.5818592297363281,"[0.5800941772460938, 0.5819255981445313, 0.5788403930664062, 0.5789609375, 0.5798906860351563, 0.5787822265625, 0.57971533203125, 0.5806090698242188, 0.5811865234375, 0.5811881713867187]",tokens/s,441.28853309110576,kWh,6.83268827420694e-06,3.744032605603327e-06,3.3321369249667614e-05,4.3898090129477875e-05,tokens/kWh,5831688.787483131,MB,2628.173824,8389.132288,0.0,7742.685184,7283.985408,s,10,337.06498046875,33.706498046875,0.006009418787724905,33.706857421875,33.7119703125,33.715088671875,33.717583359375006,"[33.70903515625, 33.70971875, 33.7039609375, 33.70134375, 33.71127734375, 33.70996875, 33.71820703125, 33.69992578125, 33.69686328125, 33.7046796875]",tokens/s,1.869075805869452,kWh,0.0003979287963388143,0.0002180992035304977,0.0019156344121359354,0.0025316624120052473,tokens/kWh,24884.8344476149,,s,629,341.74955133056636,0.5433220211932693,0.06898772267341372,0.5349846801757813,0.5355790161132813,0.5358112670898437,1.1147944775390626,"[0.5351260375976562, 0.535109619140625, 0.534408203125, 0.5347737426757813, 0.5349273681640625, 0.5355181884765625, 0.5343795166015625, 0.5353154296875, 0.5344747314453125, 0.5348731079101563, 0.5341849365234375, 0.5352171630859375, 0.5348259887695312, 0.5350338745117188, 0.5346682739257812, 0.5355919189453126, 0.5350399780273437, 0.535109619140625, 0.53498779296875, 0.5350379638671875, 0.5342330932617188, 0.534898681640625, 0.5348997192382813, 0.535445556640625, 0.5350767822265625, 0.5357158203125, 0.5351649169921875, 0.53486181640625, 0.5343006591796875, 0.5351065673828125, 0.5353646240234375, 0.5352591552734375, 0.53469091796875, 0.5347808227539063, 0.5347430419921875, 0.5353215942382813, 0.5346262817382812, 0.5358970947265626, 0.5353533325195312, 0.535372802734375, 0.5349212646484375, 0.5354249877929688, 0.5347993774414063, 0.5352724609375, 0.5352263793945312, 0.5359462280273437, 0.5349867553710937, 0.5358069458007813, 0.5356390380859375, 0.5357721557617188, 0.5345842895507813, 0.5350502319335938, 0.5345679321289063, 0.5353901977539063, 0.534408203125, 0.5348423461914062, 0.5349427490234375, 0.5352744750976562, 0.5348515625, 0.5350000610351563, 0.5347225341796875, 0.5350891723632812, 1.117781005859375, 0.5343908081054688, 0.5352744750976562, 0.5350615234375, 0.5358694458007812, 0.5350860595703125, 0.5349007568359375, 0.5344706420898437, 0.5346253051757812, 0.534213623046875, 0.5346007080078125, 0.534765625, 0.5350972900390625, 0.5347195434570312, 0.5356298217773438, 0.5349805908203125, 0.5357711181640625, 0.5348290405273437, 0.5349857177734375, 0.5353421020507813, 0.5351044921875, 0.5350942993164063, 0.5355233154296875, 0.5351280517578125, 0.536111083984375, 0.53477783203125, 0.5354240112304688, 0.5351700439453125, 0.5359093627929687, 0.5350686645507813, 0.5352868041992187, 0.5346743774414062, 0.534993896484375, 0.534455322265625, 0.5347471313476563, 0.5344000244140625, 0.5349601440429688, 0.5342822265625, 0.5353840942382813, 0.5345341186523438, 0.5348598022460938, 0.5351505737304687, 0.5354915771484375, 0.5350184936523438, 0.535066650390625, 0.5345361328125, 0.53806591796875, 0.5347102661132812, 0.535362548828125, 0.5352243041992187, 0.5350154418945312, 0.5345475463867188, 0.5348187255859375, 0.5344368896484375, 0.5351710815429688, 0.5345422973632813, 0.5351874389648438, 0.5350225830078125, 0.5353594970703125, 0.5346467895507813, 0.53593701171875, 0.5349805908203125, 0.535109619140625, 1.11461376953125, 0.5346682739257812, 0.5355612182617188, 0.535363525390625, 0.5350051879882812, 0.5345147094726562, 0.5349580688476563, 0.5345648803710937, 0.5349765014648438, 0.5342883911132813, 0.534782958984375, 0.5348720703125, 0.5348485107421875, 0.5344696044921875, 0.5351680297851562, 0.535014404296875, 0.5347891235351563, 0.5343958740234375, 0.534729736328125, 0.5343323974609375, 0.5347850341796875, 0.534245361328125, 0.5351393432617187, 0.5362525024414062, 0.53484130859375, 0.5343948974609375, 0.53480859375, 0.5345147094726562, 0.535352294921875, 0.5345781860351563, 0.5349120483398437, 0.5347604370117187, 0.5352069091796875, 0.5347593994140625, 0.5351116943359375, 0.5349611206054687, 0.5350625, 0.5345709838867188, 0.535088134765625, 0.5350850830078125, 0.535235595703125, 0.53505126953125, 0.5352868041992187, 0.5351475219726562, 0.5351701049804688, 0.535184326171875, 0.5351956176757813, 0.536447998046875, 0.5354301147460937, 0.53513427734375, 0.5351146850585937, 0.5348946533203125, 0.5352826538085937, 0.5349775390625, 0.5352734985351563, 0.5348341674804687, 0.5351444702148438, 0.5347440185546875, 0.53526220703125, 0.5349683227539063, 0.5353143920898438, 0.5344860229492188, 0.534940673828125, 1.1146219482421875, 0.5341777954101562, 0.5348720703125, 0.5346262817382812, 0.5350717163085937, 0.5346693725585937, 0.5351198120117188, 0.5349754638671875, 0.53484033203125, 0.5343754272460938, 0.5346856689453126, 0.534470703125, 0.5363936767578125, 0.5344901123046875, 0.5354598388671875, 0.5345167236328126, 0.535103515625, 0.5345525512695313, 0.5347041015625, 0.5342689208984375, 0.53492431640625, 0.5343795166015625, 0.5349335327148438, 0.5343723754882812, 0.53469287109375, 0.5351454467773438, 0.5353369750976562, 0.5347051391601563, 0.5347532958984375, 0.5346580200195312, 0.5352058715820313, 0.5351024780273438, 0.5352191772460938, 0.53522021484375, 0.5348075561523438, 0.534297607421875, 0.534724609375, 0.5345833129882812, 0.5349539794921875, 0.5344808959960937, 0.53492529296875, 0.5351454467773438, 0.5350830078125, 0.5344010009765625, 0.5376593627929688, 0.5356707763671875, 0.5352120361328125, 0.5348014526367187, 0.5355078735351563, 0.5344962768554687, 0.5350451049804688, 0.5349385986328125, 0.5351188354492188, 0.5344829711914062, 0.5351536865234375, 0.5346395874023437, 0.5351792602539063, 0.534635498046875, 0.5352366333007812, 0.534640625, 0.5355222778320312, 0.53492431640625, 0.5352960205078126, 1.1157667236328126, 0.5344829711914062, 0.5347286987304688, 0.53443994140625, 0.5350502319335938, 0.5349959716796875, 0.5357506713867187, 0.5345075073242187, 0.5348935546875, 0.5344307250976562, 0.5347952880859375, 0.5344050903320312, 0.5348792114257812, 0.5342566528320313, 0.5350963745117188, 0.5344285888671875, 0.534782958984375, 0.5359862060546875, 0.5356195678710938, 0.53477783203125, 0.5348853759765625, 0.534413330078125, 0.5349529418945312, 0.5347205200195313, 0.5349427490234375, 0.5349754638671875, 0.5349099731445313, 0.5344389038085937, 0.5352949829101562, 0.5348628540039062, 0.5350799560546875, 0.5344368896484375, 0.5352714233398438, 0.5348720703125, 0.5350604858398438, 0.534603759765625, 0.5353707275390625, 0.5352437744140625, 0.535731201171875, 0.5351393432617187, 0.535103515625, 0.5343733520507813, 0.534950927734375, 0.53507275390625, 0.535462890625, 0.5348690185546875, 0.5352427368164062, 0.5344849853515625, 0.5353912353515625, 0.5345740966796875, 0.5349765014648438, 0.5348229370117188, 0.5356441650390625, 0.5349171142578125, 0.5356072998046875, 0.5357066040039062, 0.5356267700195313, 0.5386843872070313, 0.535677978515625, 0.53519970703125, 0.5360302124023437, 0.5351813354492188, 0.5363681030273437, 1.114861572265625, 0.534814697265625, 0.5358141479492188, 0.5348894653320313, 0.535541748046875, 0.5344706420898437, 0.5349130249023437, 0.5350021362304688, 0.5350850830078125, 0.534813720703125, 0.5355775756835938, 0.5344389038085937, 0.53571484375, 0.5351659545898437, 0.5355847778320313, 0.53517822265625, 0.5354332275390625, 0.53538818359375, 0.5351044921875, 0.5344112548828125, 0.5350000610351563, 0.5343201293945312, 0.5348331298828125, 0.5342853393554687, 0.5347276611328124, 0.5348331298828125, 0.534877197265625, 0.534382568359375, 0.5348782348632812, 0.5349212036132812, 0.5350440673828125, 0.5346990356445313, 0.5352007446289062, 0.5345945434570313, 0.5349151000976563, 0.5347184448242187, 0.5349703979492187, 0.5343866577148437, 0.5375672607421875, 0.5345842895507813, 0.535604248046875, 0.5345730590820312, 0.5350553588867187, 0.5347706909179688, 0.5352007446289062, 0.5347727661132813, 0.535562255859375, 0.5352611694335937, 0.5354332275390625, 0.5348392944335938, 0.5353809814453125, 0.5349007568359375, 0.5352796020507813, 0.5347973022460938, 0.535889892578125, 0.535098388671875, 0.5354475708007812, 0.5351219482421875, 0.5354977416992187, 0.5351229248046875, 0.5353697509765625, 0.5347573852539063, 0.5351884765625, 1.11545751953125, 0.534709228515625, 0.5355346069335938, 0.5345218505859375, 0.53505126953125, 0.5347532958984375, 0.5353421020507813, 0.5347676391601562, 0.535130126953125, 0.53502978515625, 0.5353748779296875, 0.5347379150390625, 0.53549462890625, 0.5350656127929687, 0.53538818359375, 0.5347123413085938, 0.53515673828125, 0.5347584228515625, 0.5352509155273437, 0.5351055297851562, 0.5358919677734375, 0.5351751708984375, 0.535287841796875, 0.5365626831054687, 0.5354188842773437, 0.5353696899414062, 0.5352734985351563, 0.5352540283203125, 0.5353543701171875, 0.5349550170898437, 0.5355130615234375, 0.5348792114257812, 0.535568359375, 0.5348311767578126, 0.5353768310546875, 0.5346519165039062, 0.53517822265625, 0.5345648803710937, 0.5352017822265625, 0.535309326171875, 0.5357240600585937, 0.5350317993164062, 0.5355181884765625, 0.5348699951171875, 0.5352345581054687, 0.5348505859375, 0.5359411010742188, 0.5351085815429687, 0.5355888671875, 0.5350133666992187, 0.5354229736328125, 0.5347727661132813, 0.5353400268554688, 0.5348945922851562, 0.5355601806640625, 0.5356461791992188, 0.5350768432617188, 0.5353328857421875, 0.5356503295898437, 0.5349284057617187, 0.53524169921875, 0.53501953125, 0.5352315063476563, 1.1161282958984375, 0.5346826171875, 0.53496630859375, 0.5344050903320312, 0.5346508178710937, 0.5344050903320312, 0.5348168334960938, 0.5343477172851563, 0.5350819702148437, 0.5347593994140625, 0.5352212524414063, 0.534572021484375, 0.5355878295898437, 0.53490380859375, 0.5348966674804687, 0.5346734008789062, 0.5364859008789062, 0.53452392578125, 0.5349498901367188, 0.5348843383789063, 0.5350963134765625, 0.5344942016601563, 0.5349078979492188, 0.5345894165039062, 0.5349498901367188, 0.5344747314453125, 0.53500927734375, 0.5347010498046875, 0.535119873046875, 0.5345147094726562, 0.53530419921875, 0.5354342651367188, 0.535141357421875, 0.5348782348632812, 0.5348505859375, 0.5344890747070312, 0.5351444702148438, 0.53507275390625, 0.535056396484375, 0.5347092895507812, 0.5349498291015625, 0.534508544921875, 0.5350236206054687, 0.5345904541015625, 0.535593994140625, 0.5347225341796875, 0.5352581176757812, 0.5349776000976563, 0.5351546020507812, 0.5346416625976562, 0.5357752075195312, 0.5347604370117187, 0.5349846801757813, 0.5344286499023437, 0.5350154418945312, 0.5346324462890625, 0.5349918823242188, 0.5349908447265626, 0.5351976928710938, 0.5346375732421875, 0.5349151000976563, 0.5348065185546875, 0.5351188354492188, 1.1169342041015624, 0.534445068359375, 0.5351669921875, 0.5345443725585938, 0.53528369140625, 0.534782958984375, 0.5349293823242187, 0.5344174194335938, 0.5348372192382812, 0.5343057861328125, 0.53471435546875, 0.534593505859375, 0.5350717163085937, 0.5345064697265625, 0.5349273681640625, 0.5345054931640625, 0.5349867553710937, 0.535751708984375, 0.5350215454101562, 0.5345771484375, 0.535593994140625, 0.534761474609375, 0.5349488525390625, 0.5347625122070313, 0.5349222412109375, 0.5344276733398438, 0.53481982421875, 0.5344440307617188, 0.5348536376953125, 0.5344583740234375, 0.5348925170898438, 0.5345833129882812, 0.5350532836914063, 0.5344010009765625, 0.5347758178710937, 0.5343866577148437, 0.5347593994140625, 0.534382568359375, 0.5348362426757812, 0.53475634765625, 0.5349376831054687, 0.5345422973632813, 0.5351393432617187, 0.53452490234375, 0.5351270141601563, 0.5344603881835938, 0.5351065673828125, 0.5345259399414063, 0.5350799560546875, 0.5351802978515625, 0.5355919189453126, 0.5348382568359376, 0.5353246459960938, 0.5350021362304688, 0.535816162109375, 0.5346324462890625, 0.5352212524414063, 0.5346836547851562, 0.5353799438476563, 0.5346682739257812, 0.5351454467773438, 0.5346416625976562, 0.53532568359375, 1.116949462890625, 0.5343743896484375, 0.5350113525390625, 0.5348218994140626, 0.5349816284179687, 0.5344235229492188, 0.5348731079101563, 0.534445068359375, 0.534877197265625, 0.53441943359375, 0.5347359008789062, 0.5345535888671875, 0.5347891235351563, 0.5345515747070313, 0.5351004028320312, 0.53458740234375, 0.5349119873046875, 0.5347153930664063, 0.5349099731445313, 0.5344542846679687, 0.5348812866210938, 0.5343672485351563, 0.5352898559570313, 0.5349918823242188, 0.5351209106445313, 0.5350277099609375, 0.5352345581054687, 0.5350748291015625, 0.5352857666015625, 0.5347102661132812, 0.5349519653320313, 0.5349539794921875, 0.5350645751953125, 0.534740966796875, 0.5353809814453125, 0.5346959228515625, 0.5358069458007813, 0.53460888671875, 0.5353052368164063, 0.5350338745117188, 0.5352212524414063, 0.53507275390625, 0.5351802978515625, 0.5347666015625, 0.5349642333984375, 0.53477783203125, 0.535235595703125, 0.5350543212890625, 0.5352120361328125, 0.5350082397460938, 0.5354352416992187, 0.5352120361328125, 0.5354669799804688, 0.535140380859375, 0.535362548828125, 0.534782958984375, 0.5354803466796875, 0.5349232788085938, 0.5352908935546875, 0.535625732421875, 0.5352755126953125, 0.53500732421875, 0.5353860473632812]",tokens/s,1.8405291171591998,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1767.067648,22129.672192,0.0,21483.225088,20799.168,s,10,28.05056616210938,2.805056616210938,0.0027095714336187707,2.804907958984375,2.8082408447265625,2.808313903808594,2.808372351074219,"[2.805282470703125, 2.808224609375, 2.803196533203125, 2.80234619140625, 2.80277294921875, 2.800419677734375, 2.804533447265625, 2.80722705078125, 2.80817626953125, 2.808386962890625]",tokens/s,91.26375507735885,kWh,3.3081144508388305e-05,1.8129723654910774e-05,0.00015725384802519894,0.000208464716188498,tokens/kWh,1228025.5607789264,MB,1772.261376,22129.672192,0.0,21483.225088,20902.144,s,10,1667.162484375,166.71624843749996,0.015085835868376713,166.71732031250002,166.73279531249997,166.73349140624998,166.73404828124998,"[166.730125, 166.715734375, 166.68440625, 166.724703125, 166.7341875, 166.71721875, 166.708265625, 166.717421875, 166.69778125, 166.732640625]",tokens/s,0.377887581987055,kWh,0.0019678901955319777,0.0010785770549249355,0.009270038554913802,0.012316505805370715,tokens/kWh,5115.087103074991,,s,629,1689.7617126464822,2.686425616290118,0.33327430639158717,2.64620947265625,2.6474256835937497,2.647894970703125,5.4507228125000005,"[2.646453369140625, 2.646373291015625, 2.64631591796875, 2.6454580078125, 2.646531982421875, 2.647617431640625, 2.64826171875, 2.647658447265625, 2.647793701171875, 2.64637646484375, 2.64711376953125, 2.64620947265625, 2.646602783203125, 2.646426513671875, 2.64635498046875, 2.64557568359375, 2.644958251953125, 2.647402587890625, 2.64601904296875, 2.646287353515625, 2.647202880859375, 2.64688525390625, 2.646246337890625, 2.646867919921875, 2.64715478515625, 2.647743408203125, 2.646212646484375, 2.647287841796875, 2.648404052734375, 2.64638671875, 2.64559912109375, 2.647835693359375, 2.648161376953125, 2.6474189453125, 2.647456787109375, 2.6468076171875, 2.646687744140625, 2.64502880859375, 2.646285400390625, 2.646274169921875, 2.64652490234375, 2.64616455078125, 2.64710546875, 2.6460908203125, 2.646740966796875, 2.645139404296875, 2.646036376953125, 2.646411376953125, 2.64627099609375, 2.64452197265625, 2.64525830078125, 2.647901123046875, 2.64601806640625, 2.645980224609375, 2.64719677734375, 2.64639990234375, 2.64538720703125, 2.645042236328125, 2.64540576171875, 2.646120361328125, 2.646118408203125, 2.64612353515625, 5.45506494140625, 2.64526025390625, 2.6462626953125, 2.64690576171875, 2.646042724609375, 2.64546826171875, 2.646393798828125, 2.647192626953125, 2.644919189453125, 2.644935791015625, 2.645579833984375, 2.647086181640625, 2.646035400390625, 2.644955078125, 2.645644287109375, 2.645537841796875, 2.644991943359375, 2.645560302734375, 2.646414306640625, 2.645199951171875, 2.646506591796875, 2.646548583984375, 2.64702880859375, 2.64570166015625, 2.644788330078125, 2.64700732421875, 2.646330322265625, 2.64543017578125, 2.645243896484375, 2.646043701171875, 2.645831787109375, 2.6459453125, 2.64614697265625, 2.64631494140625, 2.64669091796875, 2.646116455078125, 2.647741455078125, 2.6478857421875, 2.645671875, 2.647362548828125, 2.646822998046875, 2.648393798828125, 2.647374755859375, 2.64627294921875, 2.646519775390625, 2.645937255859375, 2.6523720703125, 2.647458740234375, 2.646916015625, 2.6467646484375, 2.644612060546875, 2.645245849609375, 2.64698779296875, 2.646232177734375, 2.645567626953125, 2.645909423828125, 2.64679541015625, 2.64620947265625, 2.645465087890625, 2.646067138671875, 2.6467060546875, 2.6458798828125, 2.6457109375, 5.4513837890625, 2.647235595703125, 2.645803955078125, 2.64654443359375, 2.64496533203125, 2.64464892578125, 2.645158935546875, 2.646266845703125, 2.64560546875, 2.646096923828125, 2.64475537109375, 2.64629052734375, 2.644760498046875, 2.64480859375, 2.6463427734375, 2.64760009765625, 2.645761962890625, 2.645760986328125, 2.646381591796875, 2.646036376953125, 2.64513134765625, 2.646679443359375, 2.6458369140625, 2.6495498046875, 2.646917236328125, 2.644770751953125, 2.645843017578125, 2.644592529296875, 2.6452919921875, 2.6457333984375, 2.64617578125, 2.64542822265625, 2.645474365234375, 2.644977783203125, 2.6475703125, 2.645327880859375, 2.64601611328125, 2.64604052734375, 2.644948974609375, 2.645347412109375, 2.64539453125, 2.64572314453125, 2.645024658203125, 2.64540771484375, 2.64523681640625, 2.64541796875, 2.644529052734375, 2.649079833984375, 2.645666748046875, 2.646816650390625, 2.645528564453125, 2.645729248046875, 2.6472724609375, 2.646464599609375, 2.644356201171875, 2.644589599609375, 2.64574267578125, 2.645088134765625, 2.6443017578125, 2.644094970703125, 2.64595556640625, 2.645572509765625, 2.644812744140625, 5.4503955078125, 2.645255126953125, 2.645544921875, 2.64656884765625, 2.648037353515625, 2.645263427734375, 2.64638671875, 2.6460732421875, 2.6460478515625, 2.645971923828125, 2.645147705078125, 2.646581298828125, 2.6468291015625, 2.64502880859375, 2.645927978515625, 2.64683935546875, 2.64574365234375, 2.645297119140625, 2.64578662109375, 2.645689453125, 2.64529296875, 2.645930908203125, 2.64618798828125, 2.64557373046875, 2.645583984375, 2.64784375, 2.6465341796875, 2.645923828125, 2.648280029296875, 2.647232421875, 2.64726220703125, 2.646688720703125, 2.646456298828125, 2.647128173828125, 2.64785009765625, 2.6451201171875, 2.645867431640625, 2.645412841796875, 2.647185302734375, 2.644790283203125, 2.645506103515625, 2.64781005859375, 2.6459228515625, 2.64651171875, 2.6510693359375, 2.647773193359375, 2.6475908203125, 2.648330322265625, 2.646329345703125, 2.64608349609375, 2.64578759765625, 2.64604150390625, 2.646233154296875, 2.647012451171875, 2.644991943359375, 2.6461533203125, 2.646445068359375, 2.6459423828125, 2.646255615234375, 2.647103515625, 2.64721826171875, 2.6459013671875, 2.646496337890625, 5.45085009765625, 2.646131591796875, 2.647123046875, 2.64625244140625, 2.6469755859375, 2.645900146484375, 2.64694384765625, 2.6482001953125, 2.647193603515625, 2.646340576171875, 2.646576171875, 2.647742431640625, 2.645885986328125, 2.64563916015625, 2.645662841796875, 2.646833251953125, 2.64673291015625, 2.648217529296875, 2.645130126953125, 2.648642578125, 2.64700732421875, 2.6464482421875, 2.645592041015625, 2.646584228515625, 2.64501953125, 2.64555224609375, 2.64648388671875, 2.6452490234375, 2.646088623046875, 2.64539453125, 2.646591552734375, 2.645818359375, 2.6455, 2.645107666015625, 2.647025634765625, 2.64500830078125, 2.647201904296875, 2.6471865234375, 2.647762939453125, 2.6468916015625, 2.646411376953125, 2.645792724609375, 2.647160888671875, 2.64576611328125, 2.645645263671875, 2.646044677734375, 2.647015380859375, 2.64671435546875, 2.645159912109375, 2.646703125, 2.646371337890625, 2.646978515625, 2.6460908203125, 2.648290283203125, 2.646096923828125, 2.646834228515625, 2.6468515625, 2.654116943359375, 2.646571044921875, 2.646265869140625, 2.647033935546875, 2.645507080078125, 2.64506884765625, 5.44818896484375, 2.646950927734375, 2.64728466796875, 2.64570068359375, 2.647045166015625, 2.645780517578125, 2.64650341796875, 2.647185302734375, 2.647396240234375, 2.64726416015625, 2.645887939453125, 2.646220947265625, 2.646308837890625, 2.6452724609375, 2.645916748046875, 2.6456513671875, 2.64530029296875, 2.645833740234375, 2.64532275390625, 2.646921142578125, 2.645572509765625, 2.646182861328125, 2.646026123046875, 2.646593505859375, 2.646531005859375, 2.646508544921875, 2.646131591796875, 2.647275634765625, 2.646612060546875, 2.646274169921875, 2.645583984375, 2.646960205078125, 2.647244873046875, 2.6451845703125, 2.6467060546875, 2.646052001953125, 2.64616650390625, 2.645667724609375, 2.652001220703125, 2.64635693359375, 2.645919677734375, 2.646162353515625, 2.64641845703125, 2.645307373046875, 2.644989013671875, 2.646035400390625, 2.645531494140625, 2.645157958984375, 2.645906494140625, 2.645951416015625, 2.646036376953125, 2.645796875, 2.64467041015625, 2.645960693359375, 2.64549267578125, 2.64652294921875, 2.64587255859375, 2.647626708984375, 2.646477783203125, 2.646447021484375, 2.646128662109375, 2.64745263671875, 2.646246337890625, 5.45296484375, 2.646928466796875, 2.647333984375, 2.646259765625, 2.64629443359375, 2.64618701171875, 2.6448466796875, 2.646443115234375, 2.646686767578125, 2.6447626953125, 2.64540576171875, 2.64494287109375, 2.645835693359375, 2.645675048828125, 2.64589404296875, 2.64648095703125, 2.64646044921875, 2.646432861328125, 2.646467529296875, 2.645669921875, 2.64584814453125, 2.6454580078125, 2.64523486328125, 2.650271728515625, 2.647341064453125, 2.64589306640625, 2.64591064453125, 2.64768505859375, 2.64700830078125, 2.64631201171875, 2.645865478515625, 2.647047119140625, 2.646507568359375, 2.64665087890625, 2.647560302734375, 2.645760986328125, 2.645769287109375, 2.645876708984375, 2.645303466796875, 2.646839111328125, 2.646023193359375, 2.645792724609375, 2.64610400390625, 2.646148193359375, 2.646067138671875, 2.64551416015625, 2.645665771484375, 2.64494482421875, 2.645905517578125, 2.645917724609375, 2.645688232421875, 2.64422705078125, 2.64477587890625, 2.64602001953125, 2.646118408203125, 2.645832763671875, 2.64635888671875, 2.646981689453125, 2.645694580078125, 2.6462197265625, 2.6456484375, 2.647057373046875, 2.646053955078125, 5.45440771484375, 2.64606103515625, 2.64624853515625, 2.645525390625, 2.6462197265625, 2.646288330078125, 2.64601806640625, 2.6458369140625, 2.6465341796875, 2.64682080078125, 2.6460498046875, 2.645517333984375, 2.646379638671875, 2.645887939453125, 2.645572509765625, 2.645886962890625, 2.6497626953125, 2.645772216796875, 2.64593603515625, 2.64641748046875, 2.64673681640625, 2.645414794921875, 2.644895751953125, 2.6456689453125, 2.646624267578125, 2.64646142578125, 2.645294921875, 2.646984619140625, 2.646635498046875, 2.64559619140625, 2.644748291015625, 2.64690576171875, 2.64658642578125, 2.645821533203125, 2.6462392578125, 2.647307373046875, 2.64610205078125, 2.647371826171875, 2.64780078125, 2.64745263671875, 2.646867919921875, 2.64559814453125, 2.646322265625, 2.6461328125, 2.6461328125, 2.6467666015625, 2.64734619140625, 2.647150634765625, 2.6465341796875, 2.646138916015625, 2.645937255859375, 2.64439697265625, 2.645222412109375, 2.646958984375, 2.646992919921875, 2.647083984375, 2.64567822265625, 2.646795166015625, 2.645927978515625, 2.646295654296875, 2.64530224609375, 2.646948974609375, 2.646352783203125, 5.45413623046875, 2.646625244140625, 2.6464501953125, 2.647415771484375, 2.64749462890625, 2.64658837890625, 2.645314453125, 2.647509033203125, 2.647132080078125, 2.64675537109375, 2.646547607421875, 2.646182861328125, 2.64610302734375, 2.6463896484375, 2.646077392578125, 2.64599853515625, 2.646762451171875, 2.64707080078125, 2.646077392578125, 2.64540576171875, 2.646277099609375, 2.64477392578125, 2.645159912109375, 2.6459638671875, 2.64671240234375, 2.646478759765625, 2.645445556640625, 2.646115234375, 2.646445068359375, 2.645536865234375, 2.64454443359375, 2.646118408203125, 2.645505126953125, 2.644704345703125, 2.6460517578125, 2.64701953125, 2.645275634765625, 2.64492431640625, 2.645380126953125, 2.645751708984375, 2.64485888671875, 2.64445654296875, 2.645286865234375, 2.646550537109375, 2.646221923828125, 2.645315673828125, 2.6454169921875, 2.64634375, 2.645382080078125, 2.6473984375, 2.646042724609375, 2.64547119140625, 2.644828125, 2.64538720703125, 2.646898681640625, 2.646388671875, 2.64596484375, 2.6477158203125, 2.64626171875, 2.6452685546875, 2.645350341796875, 2.6460498046875, 2.644905029296875, 5.4557060546875, 2.64730419921875, 2.64740966796875, 2.646350830078125, 2.645677978515625, 2.6465546875, 2.64831689453125, 2.64722119140625, 2.646279052734375, 2.645445556640625, 2.646408203125, 2.646245361328125, 2.646921142578125, 2.64698974609375, 2.646667236328125, 2.646288330078125, 2.64876953125, 2.647458740234375, 2.645729248046875, 2.645494873046875, 2.646150146484375, 2.6453955078125, 2.6452890625, 2.6467861328125, 2.647785400390625, 2.646699951171875, 2.65012744140625, 2.64613671875, 2.646697998046875, 2.6466845703125, 2.646642578125, 2.6476943359375, 2.64681787109375, 2.645788818359375, 2.646158203125, 2.647160888671875, 2.645675048828125, 2.64652490234375, 2.64439306640625, 2.64669189453125, 2.646427734375, 2.64625146484375, 2.646435791015625, 2.645409912109375, 2.646538330078125, 2.64618505859375, 2.6468310546875, 2.647814208984375, 2.645505126953125, 2.645622802734375, 2.6466826171875, 2.646696044921875, 2.6457548828125, 2.6469130859375, 2.6469990234375, 2.64591259765625, 2.64690185546875, 2.64622705078125, 2.64720703125, 2.646657958984375, 2.645370849609375, 2.645916748046875, 2.646266845703125]",tokens/s,0.3722418346281902,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa98c-2707b1f61d036e837cb54abf;30c4bd9e-012e-4e4d-85c7-b872135caccb) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1740.263424,9941.024768,0.0,9294.577664,8910.102528,s,10,10.642888305664064,1.0642888305664062,0.0008435144191809837,1.06450927734375,1.0651936889648437,1.065327557373047,1.0654346520996094,"[1.065136474609375, 1.06546142578125, 1.063631591796875, 1.063218505859375, 1.06368701171875, 1.0628612060546876, 1.0644658203125, 1.064552734375, 1.0647095947265626, 1.0651639404296875]",tokens/s,240.5362084498799,kWh,1.2557922783825134e-05,6.880571250694628e-06,6.097363211219897e-05,8.041212614671872e-05,tokens/kWh,3183599.4428614704,MB,1740.86144,9941.024768,0.0,9294.577664,9220.867072,s,10,630.7354296875,63.07354296875,0.00490218127220892,63.07426171875,63.081011718750005,63.081064453125,63.081106640625,"[63.0750703125, 63.07307421875, 63.081, 63.075046875, 63.07416015625, 63.0811171875, 63.07436328125, 63.0665078125, 63.06765234375, 63.0674375]",tokens/s,0.9988340124037992,kWh,0.0007445980064074199,0.0004081038876869388,0.003603297882636,0.004755999776730358,tokens/kWh,13246.426189555263,,s,629,639.312124511719,1.016394474581429,0.12643667238363906,1.0011002807617186,1.0017415283203124,1.002017578125,2.065029130859375,"[1.0008678588867188, 1.0004398193359374, 1.001291748046875, 1.0011023559570313, 1.0010675048828126, 1.0010203857421875, 1.0008780517578124, 1.000616943359375, 1.0006917114257812, 1.0008555297851562, 1.0007982177734376, 1.000685546875, 1.0008023071289063, 1.000985595703125, 1.0008780517578124, 1.0010081787109375, 1.0006251220703124, 1.0007255249023437, 1.000806396484375, 1.0009149169921876, 1.0006405029296874, 1.0007285766601564, 1.0012835693359374, 1.0011812133789062, 1.0010265502929687, 1.0012303466796875, 1.0011586303710938, 1.0011637573242187, 1.0010664672851564, 1.0014730224609374, 1.0011238403320313, 1.0014033813476562, 1.0011033325195313, 1.0013900756835938, 1.0010501098632814, 1.0017617797851563, 1.0014638061523438, 1.0016256103515624, 1.0009415893554687, 1.0013726806640626, 1.000985595703125, 1.0008145751953126, 1.001359375, 1.0016143188476563, 1.0011217651367188, 1.0013255615234375, 1.0009302978515624, 1.001133056640625, 1.0009508056640626, 1.001533447265625, 1.0019143676757813, 1.0015897827148437, 1.0020361938476563, 1.0020464477539062, 1.00206591796875, 1.0012640991210937, 1.0014044189453124, 1.001734130859375, 1.0015846557617187, 1.00234033203125, 1.0014207763671874, 1.0013255615234375, 2.067092529296875, 1.0008361206054688, 1.0011576538085938, 1.0006384887695312, 1.001079833984375, 1.0007879638671875, 1.0012293090820312, 1.0010501098632814, 1.001322509765625, 1.0013265991210938, 1.0013900756835938, 1.000838134765625, 1.001486328125, 1.0010675048828126, 1.0011484375, 1.0016091918945313, 1.001449462890625, 1.0011023559570313, 1.0005330200195313, 1.0008074340820312, 1.0010623779296874, 1.0006220703125, 1.0009927978515625, 1.0007716064453125, 1.001026611328125, 1.0008770141601562, 1.0014136352539063, 1.0009508056640626, 1.0009508056640626, 1.00097021484375, 1.001290771484375, 1.00075830078125, 1.0016583862304687, 1.0007131958007813, 1.0006619873046876, 1.0005339965820312, 1.0010706176757813, 1.0007859497070313, 1.0006456298828126, 1.0009036865234375, 1.00139111328125, 1.0010776977539062, 1.0015672607421875, 1.0008135375976563, 1.0011463623046875, 1.000784912109375, 1.0035189819335937, 1.00113818359375, 1.0015252685546876, 1.0012507934570312, 1.0016573486328124, 1.0014893798828124, 1.001533447265625, 1.0010839233398436, 1.0011525268554688, 1.0011566162109375, 1.0015396118164062, 1.0014955444335938, 1.0011074829101563, 1.0009037475585938, 1.0016132202148438, 1.0009343872070313, 1.002029052734375, 2.06504150390625, 1.0005913696289062, 1.0010398559570313, 1.00097021484375, 1.0005196533203125, 1.0015057983398437, 1.0011084594726563, 1.0007337036132813, 1.0012190551757814, 1.0008473510742188, 1.0012252197265625, 1.0008402099609375, 1.0011617431640625, 1.0010194091796876, 1.0008719482421875, 1.0009927978515625, 1.0008237915039062, 1.00075927734375, 1.0011688842773439, 1.000806396484375, 1.0008606567382812, 1.0010132446289062, 1.0011740112304688, 1.00221337890625, 1.0005995483398438, 1.000721435546875, 1.00090673828125, 1.0007039794921875, 1.001343994140625, 1.001470947265625, 1.0014852905273437, 1.00143408203125, 1.0017413330078124, 1.002018798828125, 1.0018693237304688, 1.0017587280273437, 1.0011954956054687, 1.0007203979492187, 1.0011064453125, 1.00105419921875, 1.0013839111328124, 1.0012108764648437, 1.001881591796875, 1.0012119140625, 1.0016245727539062, 1.0019154052734376, 1.0013501586914062, 1.0026547241210937, 1.0016737060546874, 1.0011678466796874, 1.00153857421875, 1.0014085083007813, 1.0018191528320313, 1.001275390625, 1.0014157104492187, 1.0018908081054687, 1.0011791381835937, 1.0009927978515625, 1.0019522705078125, 1.0013368530273437, 1.0016307373046875, 1.0016552734375, 1.001829345703125, 2.064997314453125, 1.001175048828125, 1.0011033935546876, 1.0011688232421876, 1.001164794921875, 1.0007736206054687, 1.001064453125, 1.0007973022460936, 1.00076025390625, 1.0006077270507812, 1.0007183227539063, 1.0011361083984376, 1.0011351318359376, 1.0006005859375, 1.0008688354492188, 1.000642578125, 1.00075830078125, 1.0010562744140625, 1.001421875, 1.0009476318359376, 1.000995849609375, 1.0008811645507814, 1.001438232421875, 1.000816650390625, 1.0011361083984376, 1.0006569213867187, 1.0008370971679688, 1.0008350830078125, 1.00099072265625, 1.0007787475585936, 1.0014464111328125, 1.0013634643554687, 1.0017720336914062, 1.001069580078125, 1.001523193359375, 1.0014351196289062, 1.002029052734375, 1.001865234375, 1.0020095825195312, 1.0007572631835937, 1.0010081176757812, 1.0010040283203125, 1.000943603515625, 1.001080810546875, 1.0031155395507811, 1.000857666015625, 1.00139208984375, 1.001006103515625, 1.0015139770507813, 1.0011719970703126, 1.0016010131835937, 1.0014443359375, 1.0011443481445312, 1.0013952026367188, 1.0011340942382811, 1.0010603637695312, 1.0010203857421875, 1.0011033935546876, 1.0015077514648438, 1.00125390625, 1.002071044921875, 1.0015109252929688, 1.0014412841796876, 2.065207275390625, 1.0004561767578124, 1.0008197021484375, 1.0008104858398437, 1.0015027465820312, 1.0008872680664063, 1.0009497680664063, 1.0020311279296874, 1.0013726806640626, 1.0015405883789064, 1.00103271484375, 1.0013511962890624, 1.0011668701171874, 1.0011986083984374, 1.00210791015625, 1.0007920532226562, 1.0011351318359376, 1.0023803100585937, 1.0009579467773437, 1.0010460205078124, 1.0012252197265625, 1.0007500610351563, 1.001091064453125, 1.0007879638671875, 1.0017576904296874, 1.0010828857421874, 1.0014474487304688, 1.0011094970703125, 1.0007890014648437, 1.0009169921875, 1.0007787475585936, 1.0004838256835937, 1.0008545532226563, 1.0006609497070313, 1.0009682006835938, 1.0007777099609374, 1.0011300048828125, 1.00124365234375, 1.000796142578125, 1.0009343872070313, 1.0009251708984375, 1.000722412109375, 1.0007971801757813, 1.0006896362304687, 1.0014924926757813, 1.0007080688476562, 1.0007705688476562, 1.0013204345703124, 1.001218017578125, 1.0010511474609376, 1.0009517822265626, 1.0008607177734374, 1.0009199829101563, 1.00119140625, 1.0017146606445313, 1.0010194091796876, 1.0013358154296874, 1.003936767578125, 1.001802734375, 1.0014443359375, 1.0012477416992187, 1.0015396118164062, 1.0018938598632812, 2.064691162109375, 1.0003916625976563, 1.0009784545898437, 1.0010337524414064, 1.0008944702148437, 1.00107470703125, 1.0008616943359374, 1.001354248046875, 1.00143408203125, 1.0012088623046875, 1.0020802612304687, 1.0009651489257811, 1.001312255859375, 1.0014505004882812, 1.0007481079101563, 1.0010009155273438, 1.0012723388671876, 1.0009712524414063, 1.0011033325195313, 1.0011371459960938, 1.0015723266601562, 1.0016163940429688, 1.0008381958007813, 1.0017801513671876, 1.0015897827148437, 1.0014259033203126, 1.0019799194335937, 1.0012518310546874, 1.0014515380859375, 1.0014484252929687, 1.001759765625, 1.00107568359375, 1.0014505004882812, 1.0014423217773438, 1.001112548828125, 1.0011678466796874, 1.0010286254882812, 1.0009210815429688, 1.0033643798828125, 1.0010767211914062, 1.001469970703125, 1.0009139404296874, 1.001143310546875, 1.00132763671875, 1.0011791381835937, 1.001169921875, 1.0012620849609375, 1.0009456787109374, 1.0010347290039063, 1.00092724609375, 1.0015047607421874, 1.00096923828125, 1.0009251708984375, 1.0013870239257812, 1.0013132934570312, 1.0014105834960938, 1.0011658325195312, 1.0012948608398438, 1.0014935302734376, 1.0009978637695311, 1.0015344848632812, 1.001290771484375, 1.0020157470703126, 2.0664228515625, 1.001302001953125, 1.000827880859375, 1.0009938354492187, 1.0006937255859376, 1.0006784057617188, 1.000911865234375, 1.0006487426757813, 1.0011852416992189, 1.0008043823242188, 1.0012477416992187, 1.0013726806640626, 1.0013101806640625, 1.0008023071289063, 1.0008514404296875, 1.0008248291015625, 1.0011658325195312, 1.001006103515625, 1.0012498168945312, 1.0011207885742188, 1.0012610473632813, 1.0012415771484375, 1.0010951538085937, 1.0023833618164062, 1.0010685424804688, 1.0005288696289063, 1.000853515625, 1.000543212890625, 1.0008811645507814, 1.0009784545898437, 1.0013153076171875, 1.0010859375, 1.0009425659179687, 1.0009343872070313, 1.000748046875, 1.000453125, 1.0013388671875, 1.00075927734375, 1.000826904296875, 1.00143310546875, 1.0011207885742188, 1.0015027465820312, 1.001175048828125, 1.0009395141601563, 1.0011279296875, 1.0010224609375, 1.0013798217773437, 1.0013460693359375, 1.00193896484375, 1.00166552734375, 1.0014197998046874, 1.0018252563476562, 1.0011893920898438, 1.0011268920898437, 1.0015764770507813, 1.0016574096679687, 1.0018969116210938, 1.0011760864257813, 1.0016829223632813, 1.0013952026367188, 1.0015078125, 1.0017791748046876, 1.001365478515625, 2.065933349609375, 1.00049609375, 1.0008944702148437, 1.00080126953125, 1.0008135375976563, 1.0008340454101563, 1.0008822021484376, 1.001302001953125, 1.0008237915039062, 1.0006292724609376, 1.0011105346679687, 1.0010654907226562, 1.0013255615234375, 1.0006692504882813, 1.0007797241210938, 1.0010091552734375, 1.0015211791992187, 1.0007367553710937, 1.0007234497070312, 1.0005473022460938, 1.0006712036132812, 1.00077978515625, 1.0011351318359376, 1.001279541015625, 1.0010480346679687, 1.0010921020507813, 1.000892333984375, 1.001080810546875, 1.00067431640625, 1.0010296020507812, 1.0010767822265625, 1.0005595703125, 1.0013173828125, 1.0010501098632814, 1.0013409423828126, 1.0012006225585937, 1.0011443481445312, 1.0012139282226562, 1.0008053588867187, 1.0007408447265624, 1.0009384765625, 1.0005883178710937, 1.0010715942382813, 1.0009569091796875, 1.0010900268554688, 1.0009027099609376, 1.000849365234375, 1.0009651489257811, 1.0008524780273438, 1.001027587890625, 1.0014033813476562, 1.0012498168945312, 1.00135009765625, 1.001059326171875, 1.0014095458984376, 1.0020269775390624, 1.0020638427734374, 1.0020095825195312, 1.0013235473632813, 1.0012037353515626, 1.0014484252929687, 1.0009476928710936, 1.0012733154296876, 2.066282470703125, 1.0010859375, 1.0004767456054688, 1.0010264892578125, 1.0008135375976563, 1.0007572631835937, 1.0011146240234374, 1.0009620361328124, 1.0013931274414063, 1.0009630737304687, 1.0009375, 1.0007900390625, 1.0008340454101563, 1.0009886474609375, 1.0007900390625, 1.0007756958007812, 1.0007982177734376, 1.0019952392578124, 1.0009794311523437, 1.0006138916015626, 1.0008309936523438, 1.0008893432617187, 1.0010368041992188, 1.0011248779296875, 1.0005238037109374, 1.0006343383789063, 1.0008145751953126, 1.0007039794921875, 1.0007521362304688, 1.0009896850585938, 1.0010880126953126, 1.0012303466796875, 1.00091796875, 1.0013450317382813, 1.000784912109375, 1.0007367553710937, 1.0009825439453126, 1.0009108276367187, 1.001238525390625, 1.0008944702148437, 1.0009989013671876, 1.0009886474609375, 1.0009651489257811, 1.0011422729492188, 1.0012374877929688, 1.0012569580078126, 1.00099072265625, 1.0008780517578124, 1.0009682006835938, 1.0007080688476562, 1.001248779296875, 1.0015518798828125, 1.0010582885742187, 1.0014033813476562, 1.001016357421875, 1.0009476318359376, 1.0012354736328124, 1.0020833129882813, 1.0019143676757813, 1.0018539428710938, 1.0019850463867188, 1.0015999755859375, 1.0015641479492188, 2.0665712890625, 1.0007060546875, 1.0006599731445311, 1.00056884765625, 1.000685546875, 1.0005678100585937, 1.0009722900390625, 1.000806396484375, 1.0007203979492187, 1.0008207397460938, 1.0005872802734375, 1.0005545043945312, 1.00069384765625, 1.0008534545898438, 1.0013716430664064, 1.0008811645507814, 1.0013511962890624, 1.0014893798828124, 1.0010726318359375, 1.0017157592773438, 1.0011299438476562, 1.00086376953125, 1.00071728515625, 1.0005729370117187, 1.0008995971679688, 1.0005995483398438, 1.0024898681640626, 1.0008678588867188, 1.000722412109375, 1.0012149658203124, 1.0008237915039062, 1.00071728515625, 1.00101220703125, 1.0010562744140625, 1.0010572509765625, 1.00170751953125, 1.00140234375, 1.000974365234375, 1.0007828369140626, 1.0009139404296874, 1.0011300048828125, 1.0010675048828126, 1.0013562622070313, 1.000953857421875, 1.0009722900390625, 1.0011146240234374, 1.0015631103515625, 1.000975341796875, 1.0017495727539063, 1.0013224487304688, 1.0012518310546874, 1.001439208984375, 1.0012415771484375, 1.0010685424804688, 1.0010286254882812, 1.00105419921875, 1.0016696166992187, 1.0011002807617186, 1.0011443481445312, 1.0017423095703124, 1.0011033325195313, 1.0011924438476563, 1.0012406005859376]",tokens/s,0.9838699688049951,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1310.69952,921.174016,0.0,274.726912,220.646912,s,10,0.36048070526123044,0.03604807052612304,0.0010977874344766398,0.03588955307006836,0.036477619552612306,0.03779876155853271,0.03885567516326904,"[0.03911990356445313, 0.03553478240966797, 0.03469347381591797, 0.035817951202392576, 0.036184032440185546, 0.035873409271240234, 0.03543529510498047, 0.03591993713378906, 0.03590569686889648, 0.03599622344970703]",tokens/s,7101.628360787961,kWh,4.1762897090252873e-07,2.2884077887720709e-07,9.141624902616015e-07,1.5606322400413373e-06,tokens/kWh,164036083.2179266,MB,1311.0272,921.174016,0.0,274.726912,250.723328,s,10,21.866080322265628,2.186608032226563,0.01854961703991556,2.191078857421875,2.2041080810546876,2.2073608032226564,2.2099629809570316,"[2.1803974609375, 2.17111474609375, 2.154228759765625, 2.200171142578125, 2.210613525390625, 2.181986572265625, 2.161680419921875, 2.20071923828125, 2.20338525390625, 2.201783203125]",tokens/s,28.81174818325753,kWh,2.578141039891902e-05,1.4128932798644376e-05,5.28597971955373e-05,9.27701403931007e-05,tokens/kWh,679097.8188999841,,s,629,22.151173107147223,0.03521649142630718,0.004356343174673542,0.03486105728149414,0.035241778564453125,0.03557929000854492,0.07092830078125002,"[0.03474124908447265, 0.03529420852661133, 0.03632339096069336, 0.03650860977172852, 0.036083713531494144, 0.037152767181396484, 0.03645132827758789, 0.036311038970947264, 0.03472588729858399, 0.0354252815246582, 0.03531980895996094, 0.03479657745361328, 0.03465929412841797, 0.03616358566284179, 0.035585025787353515, 0.03515903854370117, 0.03486105728149414, 0.034651134490966795, 0.034900993347167966, 0.03449958419799805, 0.03328102493286133, 0.03349708938598633, 0.033560577392578124, 0.03324620819091797, 0.033328128814697267, 0.033255424499511715, 0.033754112243652344, 0.033223743438720706, 0.03327481460571289, 0.033438720703125, 0.0335175666809082, 0.03376332855224609, 0.03339468765258789, 0.03345305633544922, 0.03355033493041992, 0.034203647613525394, 0.03342233657836914, 0.03335372924804687, 0.0336732177734375, 0.03367116928100586, 0.03360870361328125, 0.03362508773803711, 0.03436441421508789, 0.03604377746582031, 0.03504127883911133, 0.03473920059204102, 0.0348037109375, 0.03472895812988281, 0.03472895812988281, 0.03479865646362305, 0.03479955291748047, 0.03479449462890625, 0.03474739074707031, 0.03478220748901367, 0.0347064323425293, 0.034907135009765625, 0.03474431991577148, 0.03473715209960938, 0.03485388946533203, 0.034683902740478514, 0.035019775390625, 0.0350750732421875, 0.0711178207397461, 0.0347955207824707, 0.034948097229003904, 0.03439616012573242, 0.03447808074951172, 0.03496755218505859, 0.034920448303222655, 0.0348590087890625, 0.034699264526367186, 0.0346163215637207, 0.03491430282592774, 0.03490304183959961, 0.03508019256591797, 0.034423809051513675, 0.03472793579101562, 0.03449446487426758, 0.03476377487182617, 0.03476582336425781, 0.03468902587890625, 0.03387494277954101, 0.0342210578918457, 0.034008094787597656, 0.03374998474121094, 0.03369062423706055, 0.033459201812744144, 0.03354937744140625, 0.03469715118408203, 0.03489484786987305, 0.03487027359008789, 0.03510067367553711, 0.03503513717651367, 0.03503923034667969, 0.03469619369506836, 0.03455590438842773, 0.03517852783203125, 0.03441353607177734, 0.03464704132080078, 0.03492659378051758, 0.034506752014160154, 0.03476070404052734, 0.03487846374511719, 0.03493478393554687, 0.03499212646484375, 0.034945056915283206, 0.035192798614501954, 0.03450265502929688, 0.03448524856567383, 0.03455487823486328, 0.034563072204589845, 0.03503104019165039, 0.03474431991577148, 0.03377872085571289, 0.03339875030517578, 0.03340800094604492, 0.03347558212280274, 0.03381350326538086, 0.033650688171386715, 0.033535999298095705, 0.033334270477294925, 0.03363123321533203, 0.03390771102905273, 0.03361996841430664, 0.03386880111694336, 0.06886093139648437, 0.03341823959350586, 0.03395993423461914, 0.03393228912353516, 0.03374387359619141, 0.03333529663085937, 0.0334919662475586, 0.03423027038574219, 0.03382476806640625, 0.03374387359619141, 0.0336824951171875, 0.033724353790283206, 0.03363532638549805, 0.033941505432128906, 0.03381760025024414, 0.033503231048583985, 0.03344998550415039, 0.033301502227783206, 0.033445888519287106, 0.033957889556884766, 0.03387801742553711, 0.03366092681884766, 0.03360153579711914, 0.03381043243408203, 0.033825790405273434, 0.03360870361328125, 0.03370703887939453, 0.03348271942138672, 0.03366502380371094, 0.03378995132446289, 0.03319193649291992, 0.03369267272949219, 0.03357900619506836, 0.033358848571777344, 0.0341104621887207, 0.03495731353759766, 0.03543961715698242, 0.03482316970825195, 0.034435073852539064, 0.034557952880859374, 0.034900993347167966, 0.034678783416748044, 0.03517542266845703, 0.03521023941040039, 0.035046398162841795, 0.03481702423095703, 0.03453235244750977, 0.03470131301879883, 0.03486412811279297, 0.03474739074707031, 0.03477196884155274, 0.03457843017578125, 0.034781185150146485, 0.03486310577392578, 0.035125247955322264, 0.03486617660522461, 0.03496243286132812, 0.03503513717651367, 0.03523891067504883, 0.03469823837280273, 0.0339128303527832, 0.03414220809936523, 0.035117088317871095, 0.0711659164428711, 0.03465318298339844, 0.03465727996826172, 0.03458355331420898, 0.034283519744873044, 0.034304000854492187, 0.03577958297729492, 0.03595161437988281, 0.03544473648071289, 0.03500543975830078, 0.03521843338012695, 0.03495935821533203, 0.0349194221496582, 0.0354252815246582, 0.03515903854370117, 0.0349306869506836, 0.03492454528808594, 0.036211711883544925, 0.03564441680908203, 0.03498086547851562, 0.034841598510742186, 0.034941951751708986, 0.03537612915039062, 0.034994174957275394, 0.0350382080078125, 0.03486105728149414, 0.03484672164916992, 0.03494297790527344, 0.03524095916748047, 0.03526041412353516, 0.03520614242553711, 0.03488153457641602, 0.03502796936035156, 0.03500543975830078, 0.03404185485839844, 0.033903617858886716, 0.03378073501586914, 0.03472281646728516, 0.03411251068115234, 0.03379097747802735, 0.03348787307739258, 0.0343633918762207, 0.03492659378051758, 0.03500339126586914, 0.03493788909912109, 0.03485590362548828, 0.034895870208740236, 0.034948097229003904, 0.03518771362304687, 0.034479103088378905, 0.03459174346923828, 0.035253280639648436, 0.034963424682617185, 0.034976768493652347, 0.03491843032836914, 0.034911201477050784, 0.035046398162841795, 0.03507199859619141, 0.03536383819580078, 0.03499929428100586, 0.03523379135131836, 0.03531161499023437, 0.03527679824829102, 0.07160934448242187, 0.0352624626159668, 0.03535769653320313, 0.03481087875366211, 0.03554921722412109, 0.034968544006347656, 0.03489491271972656, 0.034890689849853516, 0.03483238220214844, 0.034993152618408206, 0.035932159423828124, 0.03508838272094727, 0.03496755218505859, 0.03476377487182617, 0.03488358306884766, 0.03527884674072266, 0.03521228790283203, 0.035040256500244144, 0.03476684951782227, 0.03499622344970703, 0.03482316970825195, 0.03518259048461914, 0.03503411102294922, 0.036178943634033206, 0.039564289093017575, 0.035097599029541016, 0.03457843017578125, 0.03505561447143555, 0.034756607055664065, 0.0350013427734375, 0.03477811050415039, 0.035004417419433595, 0.03501772689819336, 0.0348671989440918, 0.03473100662231445, 0.0349224967956543, 0.03482931137084961, 0.03578777694702148, 0.0350115852355957, 0.03483443069458008, 0.03486412811279297, 0.03486207962036133, 0.034938880920410156, 0.03505561447143555, 0.035004417419433595, 0.034887680053710936, 0.034531326293945314, 0.03493580627441406, 0.03510067367553711, 0.03492454528808594, 0.034887680053710936, 0.03578879928588867, 0.034974720001220705, 0.03519180679321289, 0.034850879669189455, 0.03495315170288086, 0.035060768127441404, 0.034809825897216796, 0.03377766418457031, 0.03550620651245117, 0.034968544006347656, 0.034915328979492184, 0.03489177703857422, 0.07134719848632813, 0.03493273544311523, 0.03501465606689453, 0.03539148712158203, 0.03512319946289062, 0.034813953399658204, 0.03508019256591797, 0.03486515045166016, 0.035079166412353514, 0.03493580627441406, 0.034939903259277344, 0.034925567626953126, 0.03743027114868164, 0.034576385498046876, 0.03397836685180664, 0.03377356719970703, 0.03403878402709961, 0.03371724700927734, 0.03380223846435547, 0.03486310577392578, 0.03486412811279297, 0.03496243286132812, 0.03475046539306641, 0.03492659378051758, 0.03499622344970703, 0.034783233642578126, 0.033949695587158206, 0.033876991271972655, 0.03427123260498047, 0.03477811050415039, 0.03472076797485352, 0.034127872467041014, 0.03329740905761719, 0.0333834228515625, 0.03379814529418945, 0.03392409515380859, 0.03363532638549805, 0.03386982345581055, 0.03498086547851562, 0.034928638458251955, 0.03496345520019531, 0.03495423889160156, 0.03523891067504883, 0.03508838272094727, 0.034769920349121096, 0.03489279937744141, 0.03482624053955078, 0.034955265045166016, 0.03508838272094727, 0.03486412811279297, 0.03486515045166016, 0.03471769714355469, 0.034560001373291016, 0.034579456329345705, 0.03481292724609375, 0.03479244613647461, 0.034802688598632815, 0.03475254440307617, 0.03517948913574219, 0.03433884811401367, 0.033702880859375, 0.03359539031982422, 0.034111488342285154, 0.07044096374511719, 0.03437670516967774, 0.03483238220214844, 0.0348671989440918, 0.03470438385009766, 0.035230720520019534, 0.034816001892089846, 0.03467366409301758, 0.0347770881652832, 0.03501055908203125, 0.03429580688476563, 0.03443609619140625, 0.03509964752197266, 0.03524505615234375, 0.0347248649597168, 0.034753536224365236, 0.03496345520019531, 0.03496448135375976, 0.034770942687988284, 0.034799617767333986, 0.03484467315673828, 0.03481190490722656, 0.034353153228759765, 0.034062335968017575, 0.03349094390869141, 0.033584129333496096, 0.033587200164794925, 0.034141185760498044, 0.0347740478515625, 0.0345906867980957, 0.0344268798828125, 0.03489484786987305, 0.03420979309082031, 0.03364352035522461, 0.03377356719970703, 0.033686527252197264, 0.033290241241455076, 0.0337520637512207, 0.03401932907104492, 0.03371417617797851, 0.03375513458251953, 0.033691646575927735, 0.03370809555053711, 0.03350316619873047, 0.03325439834594727, 0.03360255813598633, 0.033860607147216795, 0.03376230239868164, 0.033691646575927735, 0.033783809661865234, 0.033797119140625, 0.03425177764892578, 0.03381452941894531, 0.033791999816894534, 0.033675262451171875, 0.03391385650634766, 0.034165760040283204, 0.03502899169921875, 0.03486822509765625, 0.034702335357666016, 0.035046398162841795, 0.03521535873413086, 0.034825214385986326, 0.07150080108642579, 0.03484364700317383, 0.034351104736328124, 0.03453747177124023, 0.035064830780029296, 0.03482931137084961, 0.03483238220214844, 0.03492659378051758, 0.034974720001220705, 0.03501465606689453, 0.034869247436523435, 0.034830337524414064, 0.034277374267578126, 0.034797569274902344, 0.03501363372802734, 0.03530342483520508, 0.0351016960144043, 0.03491635131835937, 0.03483238220214844, 0.03484672164916992, 0.03495116806030273, 0.03500646209716797, 0.03499827194213867, 0.03480678558349609, 0.03486207962036133, 0.03505152130126953, 0.034784255981445314, 0.034783233642578126, 0.034835456848144535, 0.03493580627441406, 0.034770942687988284, 0.035037185668945314, 0.035019775390625, 0.0342476806640625, 0.03505049514770508, 0.034315265655517575, 0.03477913665771484, 0.034791454315185544, 0.0353023681640625, 0.03562700653076172, 0.03510374450683594, 0.03493580627441406, 0.035156993865966796, 0.03526144027709961, 0.03488256072998047, 0.034933761596679686, 0.03517337417602539, 0.0348590087890625, 0.0352911376953125, 0.035888126373291016, 0.03522048187255859, 0.03506073760986328, 0.03483955383300781, 0.03486105728149414, 0.03498400115966797, 0.03498387145996094, 0.03485494232177734, 0.03438998413085938, 0.03479244613647461, 0.03507712173461914, 0.03500339126586914, 0.03471257781982422, 0.03476684951782227, 0.07122022247314454, 0.03482316970825195, 0.03523276901245117, 0.034941951751708986, 0.03477196884155274, 0.034991104125976565, 0.03495731353759766, 0.034939903259277344, 0.03488153457641602, 0.034900993347167966, 0.0347955207824707, 0.0350300178527832, 0.03487948989868164, 0.03520614242553711, 0.035297279357910154, 0.03513651275634765, 0.03494297790527344, 0.0349378547668457, 0.034948097229003904, 0.03488051223754883, 0.03481497573852539, 0.03454873657226563, 0.03495935821533203, 0.035062782287597655, 0.035043327331542966, 0.0349306869506836, 0.03497881698608398, 0.034909183502197266, 0.03493791961669922, 0.03483539199829101, 0.034869247436523435, 0.035031105041503904, 0.03493983840942383, 0.03483340835571289, 0.03487744140625, 0.03489484786987305, 0.03516108703613281, 0.03484364700317383, 0.034969600677490234, 0.035146751403808595, 0.035194881439208986, 0.03479244613647461, 0.03557068634033203, 0.03522969436645508, 0.035337215423583986, 0.03480473709106445, 0.03436236953735351, 0.033745918273925785, 0.03496755218505859, 0.03541401672363281, 0.03512934494018555, 0.035163135528564454, 0.0348487663269043, 0.03500339126586914, 0.03484467315673828, 0.03498495864868164, 0.03576115036010742, 0.03517030334472656, 0.03490816116333008, 0.034825214385986326, 0.0347586555480957, 0.03516723251342774, 0.035053569793701174, 0.07151315307617187, 0.03519071960449219, 0.03483852767944336, 0.03482419204711914, 0.03502592086791992, 0.03529216003417969, 0.035125247955322264, 0.034872318267822264, 0.034939903259277344, 0.0347658576965332, 0.03481699371337891, 0.03497267150878906, 0.03488665771484375, 0.034958335876464845, 0.035007488250732424, 0.03504537582397461, 0.03489382553100586, 0.03520716857910156, 0.034988033294677735, 0.034874366760253905, 0.03459481430053711, 0.03430297470092773, 0.0346879997253418, 0.03478732681274414, 0.03508736038208008, 0.0354856948852539, 0.034981952667236325, 0.0348732795715332, 0.03489382553100586, 0.03475558471679688, 0.03478732681274414, 0.03488358306884766, 0.03473920059204102, 0.03476172637939453, 0.034685951232910156, 0.035111934661865234, 0.03508736038208008, 0.03526553726196289, 0.03492147064208984, 0.03475763320922851, 0.034767871856689454, 0.03482316970825195, 0.03498092651367188, 0.03539142227172851, 0.03607961654663086, 0.03499008178710938, 0.03491123199462891, 0.034990142822265625, 0.03488249588012695, 0.03484467315673828, 0.03486207962036133, 0.0349378547668457, 0.034953216552734374, 0.03488051223754883, 0.03490816116333008, 0.0349378547668457, 0.03494604873657227, 0.03508838272094727, 0.03482726287841797, 0.03486003112792969, 0.03481702423095703, 0.034729984283447264, 0.0349224967956543]",tokens/s,28.395787300179112,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2935.840768,9548.857344,0.0,8902.41024,8265.321472,s,10,10.732376098632812,1.0732376098632814,0.0018882051518454717,1.0727980346679686,1.0744577026367188,1.076272918701172,1.0777250915527343,"[1.078088134765625, 1.0709527587890626, 1.071809814453125, 1.072758056640625, 1.0723739013671876, 1.0717447509765625, 1.0728380126953125, 1.07379296875, 1.0740543212890625, 1.07396337890625]",tokens/s,238.53058972897117,kWh,1.2652489013142056e-05,6.933077765861525e-06,5.8725796980607646e-05,7.831136375961123e-05,tokens/kWh,3269001.939307702,MB,2940.198912,9548.857344,0.0,8902.41024,8556.582912,s,10,636.0469335937499,63.60469335937499,0.008753951962243356,63.60478125,63.61318203125,63.615917187499996,63.6181053125,"[63.59721875, 63.61257421875, 63.61096484375, 63.6103046875, 63.61865234375, 63.5982421875, 63.6112421875, 63.58923828125, 63.5992578125, 63.59923828125]",tokens/s,0.9904929443499021,kWh,0.0007508853809701072,0.00041155162046943583,0.003498196520777186,0.00466063352221673,tokens/kWh,13517.475617785843,,s,629,644.6920160522462,1.0249475612913292,0.12743236447478887,1.0094981079101562,1.01038447265625,1.0106333129882812,2.0813571875,"[1.0103838500976563, 1.011209228515625, 1.0106275634765625, 1.010282470703125, 1.0090455322265626, 1.00902294921875, 1.0092001342773438, 1.0092001342773438, 1.0087454833984375, 1.0091202392578125, 1.0090209350585937, 1.0097459106445312, 1.009122314453125, 1.0094325561523438, 1.0090762329101564, 1.008990234375, 1.009164306640625, 1.0090936279296876, 1.0088233032226563, 1.009649658203125, 1.0089727783203124, 1.0095328979492189, 1.009112060546875, 1.0095298461914062, 1.0089553833007812, 1.0094817504882811, 1.0095001831054689, 1.0089779052734376, 1.0089512939453125, 1.0092308349609376, 1.0093475952148439, 1.0095062866210938, 1.0100172729492187, 1.0097244262695313, 1.0091735229492187, 1.0095360107421876, 1.0095134887695312, 1.0093209838867188, 1.009239013671875, 1.00967529296875, 1.0096640014648437, 1.00973974609375, 1.0092687377929688, 1.009269775390625, 1.0091714477539062, 1.0091786499023438, 1.0094950561523437, 1.0094622802734374, 1.0091345825195313, 1.009670166015625, 1.0100039672851562, 1.0096732177734375, 1.0092994384765626, 1.0093557739257812, 1.0091253662109374, 1.00938134765625, 1.0091427612304686, 1.0090465087890625, 1.0089072875976564, 1.0098462524414062, 1.010165771484375, 1.0093465576171874, 2.08412060546875, 1.0087465209960937, 1.0093834228515626, 1.0092031860351562, 1.0096558227539063, 1.0095739135742188, 1.0104360961914063, 1.0100264892578126, 1.0097346801757812, 1.0098964233398438, 1.0101534423828125, 1.00982373046875, 1.009934326171875, 1.0099998779296875, 1.0098063354492188, 1.0099056396484376, 1.0100838623046875, 1.0100254516601563, 1.00965478515625, 1.0096107788085937, 1.0098421630859375, 1.0097664184570312, 1.0100264892578126, 1.0098104248046875, 1.0099118041992188, 1.0101432495117189, 1.010134033203125, 1.0106337280273439, 1.0090772705078126, 1.0097838134765624, 1.0102476806640626, 1.0099251098632813, 1.0096466064453125, 1.00933837890625, 1.0092185668945313, 1.0092902221679687, 1.0095257568359375, 1.00948583984375, 1.0100172729492187, 1.010408447265625, 1.0100695190429687, 1.0098995361328125, 1.0105394897460938, 1.00971826171875, 1.0095902709960938, 1.0096414794921875, 1.009565673828125, 1.00931787109375, 1.0096066284179688, 1.0097407836914063, 1.009343505859375, 1.0096947021484375, 1.0099138793945313, 1.009455078125, 1.0092472534179688, 1.0096680908203124, 1.0096250610351563, 1.0100101318359376, 1.0093414306640625, 1.0092892456054687, 1.009311767578125, 1.009322998046875, 1.0097203369140626, 2.081078369140625, 1.009328125, 1.0090977172851563, 1.0092052612304687, 1.0094520263671876, 1.0091028442382812, 1.0091796264648438, 1.0086420288085938, 1.0094274291992187, 1.0089891967773437, 1.0090137329101563, 1.0094161987304688, 1.0096568603515625, 1.0095421142578125, 1.009850341796875, 1.0090895385742187, 1.0094059448242187, 1.009059814453125, 1.0094509887695313, 1.0088161010742187, 1.008974853515625, 1.0091888427734375, 1.0095595703125, 1.009016845703125, 1.0097346801757812, 1.0089810180664063, 1.00931787109375, 1.0090147705078125, 1.0099138793945313, 1.0100213623046874, 1.0104381713867188, 1.010629638671875, 1.010713623046875, 1.0101360473632812, 1.0105651245117186, 1.0090690307617187, 1.0092144775390626, 1.009344482421875, 1.01054052734375, 1.0098954467773438, 1.0101473388671875, 1.010071533203125, 1.0109757690429688, 1.0099885864257812, 1.010386962890625, 1.0099199829101562, 1.0098933715820313, 1.0100418701171876, 1.0103040161132812, 1.0101207275390625, 1.0105702514648438, 1.0102599487304686, 1.00997119140625, 1.0098646850585937, 1.0102732543945312, 1.0096066284179688, 1.00906494140625, 1.0095984497070312, 1.0102210693359375, 1.0098585815429688, 1.0101514282226562, 1.010039794921875, 1.0104360961914063, 2.082943115234375, 1.009787841796875, 1.0099476318359375, 1.0097244262695313, 1.0104217529296875, 1.0104340209960938, 1.0096947021484375, 1.0098309326171875, 1.009544189453125, 1.0100009155273437, 1.01024560546875, 1.0097223510742188, 1.0100695190429687, 1.0102835083007813, 1.009902587890625, 1.0104699096679688, 1.0107535400390626, 1.0102671508789063, 1.010640869140625, 1.0102630615234376, 1.0101463012695313, 1.0099834594726562, 1.0103336791992188, 1.0099384155273436, 1.0103685302734375, 1.0097254638671875, 1.0096906127929688, 1.0098125, 1.0103756713867187, 1.0102271728515626, 1.010640869140625, 1.0090096435546876, 1.0095994873046874, 1.0097633056640625, 1.0101729125976562, 1.0090198974609375, 1.0095718383789063, 1.0087034912109376, 1.009523681640625, 1.00916943359375, 1.0090352783203125, 1.0087833862304687, 1.0096363525390626, 1.0092359619140625, 1.0092656860351563, 1.0091233520507812, 1.00946533203125, 1.008932861328125, 1.009027099609375, 1.0091468505859376, 1.0092257080078124, 1.0089072875976564, 1.008837646484375, 1.0091827392578125, 1.0094110717773437, 1.0090751953125, 1.0094848022460938, 1.0091868286132812, 1.009480712890625, 1.0097039184570313, 1.0093670654296876, 1.0094059448242187, 1.0094376831054688, 2.081429443359375, 1.009227783203125, 1.0094479370117186, 1.010234375, 1.0106552124023438, 1.010882568359375, 1.0105589599609375, 1.0087034912109376, 1.0110679321289062, 1.0107822265625, 1.0108876953125, 1.0104412231445312, 1.0089584350585938, 1.0093711547851563, 1.0090792846679688, 1.0109276123046875, 1.0108334350585937, 1.0103521118164063, 1.0105712890625, 1.0102968139648438, 1.0089482421875, 1.0090025024414062, 1.0100203247070312, 1.0104258422851562, 1.009818603515625, 1.0096087036132813, 1.0098165893554687, 1.0094642944335936, 1.0101575927734374, 1.0103203735351562, 1.0114457397460936, 1.0106500854492189, 1.010361328125, 1.010103271484375, 1.0099415283203126, 1.0093311767578126, 1.0099465942382813, 1.0090475463867188, 1.0093302001953126, 1.0094981079101562, 1.0090823974609375, 1.0102753295898437, 1.0096322631835937, 1.0092564697265625, 1.0091519775390625, 1.0096640014648437, 1.009281005859375, 1.0090864868164062, 1.0091448364257813, 1.0099537963867187, 1.0093250732421875, 1.0095186157226563, 1.0094940185546875, 1.00933837890625, 1.0094080200195312, 1.0091744995117187, 1.0092451782226564, 1.0095390625, 1.0101217041015624, 1.01006640625, 1.0101780395507813, 1.009744873046875, 1.0093660278320313, 2.08117138671875, 1.0091130981445313, 1.009100830078125, 1.0086492309570312, 1.0088734741210938, 1.0091294555664063, 1.0090188598632812, 1.0089410400390626, 1.009344482421875, 1.0095523681640626, 1.0093916015625, 1.0090680541992187, 1.00940185546875, 1.009154052734375, 1.0091970825195313, 1.0093629150390624, 1.0088673095703125, 1.0088601684570313, 1.009817626953125, 1.009006591796875, 1.0100746459960936, 1.009349609375, 1.0093670654296876, 1.0091519775390625, 1.0093035278320313, 1.0091581420898437, 1.0092021484375, 1.0091058959960937, 1.01058251953125, 1.0097705078125, 1.0106183471679688, 1.0098779907226563, 1.0096998291015624, 1.0104105224609374, 1.0094213256835938, 1.009344482421875, 1.0096097412109375, 1.0097407836914063, 1.0099261474609376, 1.009217529296875, 1.0095718383789063, 1.0091837158203125, 1.0093905639648437, 1.0092267456054687, 1.0091878662109375, 1.0096301879882812, 1.0098125, 1.0094929809570312, 1.009860595703125, 1.0093219604492187, 1.0101544799804687, 1.009301513671875, 1.0094714965820313, 1.009460205078125, 1.0093588256835937, 1.0097899780273438, 1.0095984497070312, 1.0096742553710938, 1.010693115234375, 1.0096957397460937, 1.010155517578125, 1.010134033203125, 1.00997119140625, 2.082757568359375, 1.0091796264648438, 1.0095452270507812, 1.0092155151367188, 1.0096373901367188, 1.00949609375, 1.0099005737304687, 1.0090813598632813, 1.009080322265625, 1.008932861328125, 1.0091448364257813, 1.008964599609375, 1.00949609375, 1.0093455200195312, 1.0097909545898438, 1.0094417724609375, 1.00999267578125, 1.0092533569335937, 1.009428466796875, 1.0093148193359376, 1.0098770141601563, 1.0099015502929687, 1.0105159912109376, 1.0107003173828124, 1.0108948364257813, 1.009523681640625, 1.010176025390625, 1.009207275390625, 1.0096517333984374, 1.0096107788085937, 1.010208740234375, 1.0101217041015624, 1.0100725708007812, 1.0099415283203126, 1.01014013671875, 1.009311767578125, 1.0097100830078125, 1.0091714477539062, 1.0097858276367186, 1.0092830810546876, 1.0094745483398437, 1.009259521484375, 1.0102118530273438, 1.0100234375, 1.0097469482421875, 1.010239501953125, 1.0100633544921875, 1.0095165405273439, 1.0095984497070312, 1.0094940185546875, 1.0099507446289062, 1.0094642944335936, 1.0101094360351563, 1.0097879028320313, 1.0109890747070311, 1.0111426391601563, 1.0094315795898436, 1.0090772705078126, 1.009227783203125, 1.0092001342773438, 1.009723388671875, 1.00982373046875, 1.0097694702148436, 2.08317333984375, 1.0093322143554688, 1.00935986328125, 1.0087188720703124, 1.0093055419921875, 1.008848876953125, 1.0092216186523437, 1.0090895385742187, 1.0095001831054689, 1.0094622802734374, 1.0091847534179688, 1.0095789794921874, 1.0093875122070313, 1.0095748901367188, 1.0098646850585937, 1.0092850952148438, 1.0101329956054688, 1.0093475952148439, 1.0094888916015625, 1.00940185546875, 1.0091079711914062, 1.0090505981445312, 1.0093823852539063, 1.0089502563476562, 1.009269775390625, 1.0090301513671875, 1.00952783203125, 1.0098544921875, 1.0100684814453125, 1.0095472412109374, 1.0094541015625, 1.0092247314453124, 1.0096537475585938, 1.0091100463867186, 1.0094458618164062, 1.0091038818359375, 1.0096301879882812, 1.0089943237304688, 1.0096271362304687, 1.009623046875, 1.0094694213867188, 1.009554443359375, 1.0093568115234375, 1.0092083129882812, 1.0091427612304686, 1.0093660278320313, 1.010423828125, 1.0095114135742187, 1.0090711059570312, 1.0094817504882811, 1.009312744140625, 1.0092687377929688, 1.009132568359375, 1.0089052124023437, 1.0089922485351563, 1.0089625854492188, 1.0091192016601562, 1.00922265625, 1.0094295043945312, 1.0091376342773437, 1.0093772583007812, 1.0091202392578125, 1.0095810546875, 2.08344580078125, 1.0095042724609375, 1.0109685668945312, 1.0099476928710938, 1.01062548828125, 1.0095011596679688, 1.0104494018554688, 1.008996337890625, 1.0095462646484374, 1.008911376953125, 1.00917041015625, 1.0089707641601562, 1.00910693359375, 1.0093240356445312, 1.0093403930664062, 1.0089093017578126, 1.009713134765625, 1.0089676513671875, 1.0090844116210937, 1.0088786010742188, 1.00897998046875, 1.0090782470703126, 1.0091878662109375, 1.0094356689453126, 1.009923095703125, 1.0092708129882813, 1.0097356567382811, 1.008616455078125, 1.0090946655273438, 1.0091868286132812, 1.0094305419921874, 1.0093619384765624, 1.0100254516601563, 1.0098093872070313, 1.0101801147460938, 1.0097049560546876, 1.010398193359375, 1.0095513305664063, 1.0100848388671875, 1.0106326904296874, 1.011041259765625, 1.010619384765625, 1.0100930786132813, 1.0094878540039063, 1.0097684326171874, 1.0093609008789062, 1.0095360107421876, 1.009154052734375, 1.0090618896484376, 1.009249267578125, 1.009196044921875, 1.0090895385742187, 1.0093352661132813, 1.0091714477539062, 1.0094796752929687, 1.0091049194335937, 1.0094694213867188, 1.0094202880859375, 1.009227783203125, 1.0093311767578126, 1.0093629150390624, 1.0093004760742188, 1.0094376831054688, 2.083451904296875, 1.0096097412109375, 1.0098380737304689, 1.009165283203125, 1.0100848388671875, 1.0104432373046874, 1.0090741577148437, 1.0095851440429688, 1.0092666625976563, 1.0088263549804688, 1.0094428100585937, 1.0090045166015624, 1.0094745483398437, 1.0092830810546876, 1.0091581420898437, 1.0089246826171876, 1.0092728271484375, 1.009238037109375, 1.0095165405273439, 1.0092728271484375, 1.009455078125, 1.00973876953125, 1.009924072265625, 1.0094458618164062, 1.0094541015625, 1.0086737670898438, 1.009306640625, 1.009659912109375, 1.0100695190429687, 1.0101934204101561, 1.0098739013671876, 1.0097745971679688, 1.0097152099609374, 1.0097776489257813, 1.0097192993164064, 1.0093025512695313, 1.0096865234375, 1.009438720703125, 1.0101616821289063, 1.0098831176757812, 1.0098104248046875, 1.0097673950195312, 1.01049853515625, 1.0099834594726562, 1.0104944458007812, 1.0097633056640625, 1.0092708129882813, 1.009201171875, 1.0093056030273437, 1.0092236938476562, 1.0090127563476563, 1.0091929321289062, 1.00919091796875, 1.0090670166015625, 1.0091888427734375, 1.008996337890625, 1.0091161499023438, 1.0092789916992186, 1.0097572021484376, 1.0099056396484376, 1.0091837158203125, 1.0094663696289063, 1.0093506469726563]",tokens/s,0.9756596705690017,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaa90-0a22c30224ea2bc804eea870;cc92f1c3-cd4c-4719-ba14-ff7660a5b350) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1390.702592,1340.604416,0.0,694.157312,598.499328,s,10,0.6226632919311523,0.062266329193115234,0.002317832326909016,0.06196879959106445,0.06357897109985351,0.06612861976623535,0.06816833869934083,"[0.06867826843261719, 0.06029948806762695, 0.06301238250732422, 0.06221964645385742, 0.06201440048217773, 0.06192319869995117, 0.06178224182128906, 0.06027980804443359, 0.06031568145751953, 0.06213817596435547]",tokens/s,4111.3713192571795,kWh,7.181983443623039e-07,3.9353970671072603e-07,2.1138383914060147e-06,3.2255764424790445e-06,tokens/kWh,79365658.99621,MB,1391.030272,1340.604416,0.0,694.157312,659.032576,s,10,37.91430688476563,3.791430688476563,0.033463632163375424,3.808232421875,3.8238944580078122,3.8254893432617187,3.8267652514648436,"[3.8235400390625, 3.807294921875, 3.809169921875, 3.815871337890625, 3.827084228515625, 3.813744140625, 3.745643798828125, 3.73081494140625, 3.753345703125, 3.7877978515625]",tokens/s,16.616418754924958,kWh,4.459946840148583e-05,2.4442959093412353e-05,0.00012145201046119378,0.00019049443795609197,tokens/kWh,330718.3174267859,,s,629,38.408798217773445,0.06106327220631707,0.007411049818688859,0.06058700942993164,0.06105620422363281,0.061356236267089845,0.1198044711303711,"[0.05935715103149414, 0.059200511932373044, 0.06025625610351563, 0.06658048248291015, 0.06115430450439453, 0.06075187301635742, 0.06081740951538086, 0.06116556930541992, 0.06187519836425781, 0.060846080780029295, 0.0618015022277832, 0.061259742736816405, 0.06081024169921875, 0.06082559967041016, 0.06123110580444336, 0.06096486282348633, 0.06082559967041016, 0.06090752029418945, 0.06103142547607422, 0.05951590347290039, 0.05919027328491211, 0.060930049896240235, 0.06138163375854492, 0.06130278396606445, 0.0615464973449707, 0.061396991729736325, 0.06103859329223633, 0.060919807434082034, 0.06081228637695312, 0.06087680053710937, 0.06095257568359375, 0.06107648086547852, 0.06047641754150391, 0.05975551986694336, 0.060652542114257815, 0.060800033569335936, 0.06089315032958984, 0.060319744110107425, 0.06068940734863281, 0.06150451278686524, 0.06105190277099609, 0.061037567138671874, 0.059990016937255856, 0.060765182495117184, 0.06077849578857422, 0.06057881546020508, 0.060082176208496096, 0.0589219856262207, 0.059184127807617185, 0.05976166534423828, 0.060783649444580076, 0.06094025421142578, 0.06090854263305664, 0.060641281127929686, 0.05964799880981445, 0.05914112091064453, 0.05936537551879883, 0.06067507171630859, 0.06081740951538086, 0.060158977508544924, 0.06099660873413086, 0.05982624053955078, 0.11980588531494141, 0.06112768173217773, 0.06088294219970703, 0.060576766967773435, 0.06073548889160156, 0.06021529769897461, 0.06116966247558594, 0.06180044937133789, 0.06070483016967773, 0.05943494415283203, 0.059440128326416014, 0.060862464904785155, 0.060211200714111325, 0.05941452789306641, 0.06035456085205078, 0.060521472930908204, 0.06083174514770508, 0.06059929656982422, 0.060902400970458986, 0.06084096145629883, 0.06026649475097656, 0.05905920028686523, 0.059622398376464845, 0.06106521606445312, 0.06076313781738281, 0.05981081771850586, 0.05929369735717773, 0.059216896057128904, 0.061050880432128904, 0.06076620864868164, 0.059224063873291016, 0.06019583892822265, 0.06055321502685547, 0.060796928405761716, 0.061064193725585934, 0.06083071899414062, 0.060793857574462894, 0.05939507293701172, 0.058877952575683595, 0.05913702392578125, 0.05904793548583984, 0.05907865524291992, 0.06065049743652344, 0.06088294219970703, 0.06102732849121094, 0.06089215850830078, 0.06101504135131836, 0.060728321075439455, 0.06069760131835938, 0.060909599304199216, 0.06107542419433594, 0.060761089324951174, 0.06076416015625, 0.060911617279052734, 0.06071091079711914, 0.06042009735107422, 0.06087372970581055, 0.06067302322387695, 0.06060851287841797, 0.060984321594238285, 0.06090956878662109, 0.06082867050170898, 0.060943359375, 0.12434432220458984, 0.059063297271728515, 0.05924863815307617, 0.059273216247558595, 0.059202560424804686, 0.0593807373046875, 0.05934284973144531, 0.06072115325927734, 0.060693504333496094, 0.06077439880371094, 0.0608798713684082, 0.05987430572509766, 0.05933158493041992, 0.05966438293457031, 0.05923430252075195, 0.06037811279296875, 0.060916736602783204, 0.06084505462646484, 0.060793857574462894, 0.06016204833984375, 0.06031257629394531, 0.060813312530517576, 0.06093008041381836, 0.06087472152709961, 0.06037811279296875, 0.06069964981079102, 0.06112972640991211, 0.06072524642944336, 0.060851200103759766, 0.06091468811035156, 0.0610334701538086, 0.0608901138305664, 0.06094847869873047, 0.06083071899414062, 0.060846080780029295, 0.06088806533813477, 0.06078566360473633, 0.06029625701904297, 0.06034220886230469, 0.06101094436645508, 0.06097919845581055, 0.061350910186767575, 0.05945548629760742, 0.05929779052734375, 0.059101184844970706, 0.059858943939208986, 0.05947903823852539, 0.05919948959350586, 0.059469825744628904, 0.06209024047851563, 0.06109183883666992, 0.06083071899414062, 0.06089318466186523, 0.06092083358764649, 0.06089113616943359, 0.06088601684570313, 0.06026956939697266, 0.06099763107299805, 0.06088499069213867, 0.06075494384765625, 0.06154547119140625, 0.0613570556640625, 0.06097919845581055, 0.12348928070068359, 0.060916736602783204, 0.060777473449707034, 0.0607303695678711, 0.06054297637939453, 0.06096694564819336, 0.06082352066040039, 0.060835838317871094, 0.06073855972290039, 0.060864513397216796, 0.05947903823852539, 0.059015167236328124, 0.059270145416259766, 0.05909404754638672, 0.05935817718505859, 0.05992550277709961, 0.06107955169677735, 0.061110271453857425, 0.06092083358764649, 0.06084505462646484, 0.06058803176879883, 0.06083891296386719, 0.06079487991333008, 0.060827648162841794, 0.060832767486572265, 0.06084403228759765, 0.06105702209472656, 0.06056345748901367, 0.06058598327636719, 0.06076006317138672, 0.06095462417602539, 0.0609617919921875, 0.06127206420898437, 0.05955788803100586, 0.06099967956542969, 0.06095667266845703, 0.06068633651733398, 0.060887039184570314, 0.06139187240600586, 0.061454334259033204, 0.06054912185668945, 0.06096281433105469, 0.06029414367675781, 0.06135500717163086, 0.062328830718994144, 0.06116147232055664, 0.0603770866394043, 0.05916262435913086, 0.059469825744628904, 0.0593438720703125, 0.05933055877685547, 0.05895884704589844, 0.05933670425415039, 0.06046003341674805, 0.06081024169921875, 0.06076620864868164, 0.061451263427734375, 0.06086348724365234, 0.06087168121337891, 0.06075699234008789, 0.060631038665771485, 0.0605296630859375, 0.06076006317138672, 0.12325580596923828, 0.060826625823974606, 0.06067612838745117, 0.06101910400390625, 0.06079283142089844, 0.06072115325927734, 0.06085529708862305, 0.06092902374267578, 0.0608983039855957, 0.060955646514892575, 0.06091059112548828, 0.060889087677001956, 0.06080716705322266, 0.059873279571533204, 0.05963673782348633, 0.06089932632446289, 0.061224960327148435, 0.06137139129638672, 0.06110105514526367, 0.06112870407104492, 0.06075699234008789, 0.0609249267578125, 0.0607457275390625, 0.06121267318725586, 0.060843006134033206, 0.06098124694824219, 0.061001728057861325, 0.06096588897705078, 0.06096895980834961, 0.06099558258056641, 0.06101708984375, 0.06083174514770508, 0.06094643020629883, 0.06118297576904297, 0.06099148941040039, 0.06102937698364258, 0.06078771209716797, 0.06082457733154297, 0.059865089416503904, 0.060216320037841796, 0.059087871551513675, 0.05941452789306641, 0.06077439880371094, 0.06028083038330078, 0.06068121719360352, 0.06069760131835938, 0.06096691131591797, 0.06012416076660156, 0.06075494384765625, 0.06013849639892578, 0.06086963272094727, 0.060609535217285154, 0.06098739242553711, 0.06088294219970703, 0.06077337646484375, 0.06087782287597656, 0.060805118560791016, 0.06075801467895508, 0.060829696655273435, 0.060837886810302735, 0.060200958251953124, 0.060911617279052734, 0.06101094436645508, 0.1231247329711914, 0.060744705200195315, 0.06079897689819336, 0.0600002555847168, 0.059115520477294924, 0.0603351058959961, 0.06076620864868164, 0.060727294921875, 0.060744705200195315, 0.060527614593505856, 0.06128844833374023, 0.06094438552856445, 0.060677120208740234, 0.06176768112182617, 0.062110721588134764, 0.0609249267578125, 0.060832767486572265, 0.060432384490966794, 0.06098636627197265, 0.06112972640991211, 0.060895233154296874, 0.062491649627685546, 0.060980224609375, 0.06099558258056641, 0.06110310363769531, 0.06090956878662109, 0.060767230987548826, 0.06076211166381836, 0.06077542495727539, 0.06094847869873047, 0.06072115325927734, 0.06075392150878906, 0.060911617279052734, 0.06179123306274414, 0.06146355056762695, 0.06151174545288086, 0.05984864044189453, 0.0592097282409668, 0.059241470336914064, 0.0594442253112793, 0.05915238571166992, 0.05912063980102539, 0.059140094757080076, 0.05916159820556641, 0.05927219009399414, 0.059033599853515625, 0.059902976989746094, 0.06051942443847656, 0.060739585876464844, 0.06076313781738281, 0.060609535217285154, 0.059551742553710936, 0.05947699356079102, 0.06035257720947266, 0.060338111877441404, 0.06081126403808594, 0.06072524642944336, 0.060757022857666015, 0.06074979019165039, 0.060472320556640625, 0.06065459060668945, 0.06086656188964844, 0.06003609466552735, 0.12312576293945313, 0.06074060821533203, 0.06058700942993164, 0.060955646514892575, 0.060897281646728516, 0.060668926239013675, 0.05904793548583984, 0.059138046264648435, 0.0591984977722168, 0.059074527740478514, 0.05915545654296875, 0.05922918319702149, 0.05921279907226563, 0.05931417465209961, 0.05925068664550781, 0.05924665451049805, 0.05927315139770508, 0.062034942626953124, 0.06089625549316406, 0.05932646560668945, 0.05910630416870117, 0.05935615921020508, 0.059224063873291016, 0.0590909423828125, 0.05909299087524414, 0.05916159820556641, 0.05918105697631836, 0.05901004791259765, 0.05914828872680664, 0.05935718536376953, 0.059216896057128904, 0.05914214324951172, 0.05914214324951172, 0.05903564834594727, 0.05917388916015625, 0.059701248168945314, 0.05926399993896484, 0.05921177673339844, 0.059167774200439456, 0.05920764923095703, 0.05906227111816406, 0.05908992004394531, 0.059063297271728515, 0.05934694290161133, 0.05926707077026367, 0.05919027328491211, 0.05903462219238281, 0.0592988166809082, 0.05978316879272461, 0.06010367965698242, 0.05926502227783203, 0.059202560424804686, 0.05930092620849609, 0.05915334320068359, 0.059085823059082034, 0.059358207702636716, 0.05939199829101562, 0.05905929565429688, 0.05903247833251953, 0.05921382522583008, 0.059084800720214846, 0.05944319915771484, 0.059240447998046876, 0.11974553680419922, 0.059086910247802736, 0.05899875259399414, 0.05906224060058594, 0.059066368103027345, 0.059218944549560545, 0.05904281616210937, 0.05910220718383789, 0.05922304153442383, 0.059017215728759766, 0.05900185775756836, 0.05915750503540039, 0.059101184844970706, 0.05916262435913086, 0.05922099304199219, 0.059033599853515625, 0.05897625732421875, 0.05926604843139648, 0.059210750579833986, 0.059121662139892575, 0.05920358276367187, 0.05924556732177735, 0.059066368103027345, 0.05899776077270508, 0.05931008148193359, 0.06076620864868164, 0.059312126159667966, 0.059284481048583984, 0.059046913146972656, 0.05910425567626953, 0.059017215728759766, 0.05894041442871094, 0.05906227111816406, 0.05915955352783203, 0.059063297271728515, 0.05913600158691406, 0.05902233505249024, 0.059099136352539064, 0.059030529022216796, 0.05913497543334961, 0.0589035530090332, 0.05907353591918945, 0.059066368103027345, 0.058962944030761716, 0.059486209869384764, 0.059453441619873044, 0.058998783111572264, 0.058987518310546876, 0.05903564834594727, 0.05900185775756836, 0.05933977508544922, 0.05908070373535156, 0.059009025573730466, 0.06041702270507812, 0.05994598388671875, 0.058993663787841794, 0.05916262435913086, 0.059085823059082034, 0.05953638458251953, 0.05912473678588867, 0.05950054550170898, 0.060255264282226564, 0.05989884948730469, 0.11980083465576172, 0.059218944549560545, 0.05915238571166992, 0.059079681396484375, 0.05905715179443359, 0.059115520477294924, 0.05936025619506836, 0.0590643196105957, 0.05912575912475586, 0.05918207931518555, 0.06097919845581055, 0.059486209869384764, 0.05931622314453125, 0.059044864654541014, 0.05903363037109375, 0.059218910217285155, 0.059025409698486325, 0.05917491149902344, 0.05916364669799805, 0.05909401702880859, 0.0590489616394043, 0.05904076766967774, 0.059096065521240235, 0.059121662139892575, 0.05902438354492188, 0.05902131271362305, 0.05898854446411133, 0.05910015869140625, 0.059300865173339844, 0.05925785446166992, 0.06097612762451172, 0.060662784576416016, 0.060878849029541014, 0.06074060821533203, 0.06073446273803711, 0.06080412673950195, 0.06049481582641601, 0.06058700942993164, 0.060581886291503906, 0.0608092155456543, 0.059243518829345705, 0.059063297271728515, 0.05907251358032226, 0.05892403030395508, 0.05899776077270508, 0.05919744110107422, 0.05904793548583984, 0.059481086730957033, 0.05923942565917969, 0.05914419174194336, 0.05912985610961914, 0.059145217895507814, 0.05928550338745117, 0.059261951446533206, 0.05939199829101562, 0.05910323333740235, 0.05878988647460937, 0.059832321166992185, 0.061055999755859375, 0.060690433502197265, 0.06087174224853516, 0.06086547088623047, 0.060862464904785155, 0.12235059356689452, 0.05892607879638672, 0.05911859130859375, 0.05902950286865234, 0.05892403030395508, 0.058929153442382816, 0.05973811340332031, 0.059853824615478515, 0.058992641448974606, 0.059038719177246096, 0.05915545654296875, 0.05913190460205078, 0.058912769317626956, 0.05937152099609375, 0.060902400970458986, 0.06070272064208984, 0.06099353790283203, 0.060695552825927736, 0.059014144897460936, 0.05908172988891602, 0.05928140640258789, 0.059154430389404294, 0.06019583892822265, 0.06073241424560547, 0.0606668815612793, 0.060614654541015625, 0.06086252975463867, 0.06079072189331055, 0.06075801467895508, 0.06062899017333984, 0.06069247817993164, 0.060826625823974606, 0.061156352996826174, 0.06073241424560547, 0.06075392150878906, 0.060655616760253904, 0.06082252883911133, 0.06071705627441406, 0.060818431854248046, 0.060819454193115234, 0.06071091079711914, 0.06072012710571289, 0.0607723503112793, 0.061059070587158204, 0.06127308654785156, 0.060447742462158206, 0.05913907241821289, 0.05907251358032226, 0.06083174514770508, 0.06084505462646484, 0.06077644729614258, 0.060709888458251954, 0.0603054084777832, 0.059133953094482425, 0.05898342514038086, 0.06038323211669922, 0.06078668975830078, 0.059170848846435545, 0.060978145599365235, 0.06088601684570313, 0.06031568145751953, 0.05907247924804687, 0.05908889770507812]",tokens/s,16.3764561555309,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4421.566464,24111.480832,0.0,23465.033728,21691.057664,s,10,26.201972412109374,2.6201972412109376,0.0014392080916773324,2.620363525390625,2.621498388671875,2.622221728515625,2.622800400390625,"[2.62030126953125, 2.620416259765625, 2.620310791015625, 2.621337646484375, 2.620593017578125, 2.619945556640625, 2.622945068359375, 2.618053466796875, 2.6175732421875, 2.62049609375]",tokens/s,97.70256833095829,kWh,3.092599951558643e-05,1.6948578058490964e-05,0.00015162839908040572,0.00019950297665448312,tokens/kWh,1283188.874135765,MB,4421.566464,24111.480832,0.0,23465.033728,21890.213376,s,10,1554.3346406249998,155.43346406249998,0.013784362336340894,155.428859375,155.4548328125,155.45787734375,155.46031296875,"[155.44153125, 155.42890625, 155.45415625, 155.426875, 155.41909375, 155.416984375, 155.4288125, 155.42403125, 155.460921875, 155.433328125]",tokens/s,0.4053181236099044,kWh,0.0018348197203377882,0.0010056435987538315,0.008919082301926796,0.011759545621018413,tokens/kWh,5357.349852650515,,s,629,1575.449455322266,2.504689118159405,0.3111804707409938,2.46706884765625,2.4683868164062504,2.4691709960937502,5.086480859375,"[2.467560546875, 2.468384765625, 2.46702587890625, 2.467464111328125, 2.46763427734375, 2.468833251953125, 2.467610595703125, 2.46744580078125, 2.467310546875, 2.4678994140625, 2.46620263671875, 2.466724853515625, 2.46737109375, 2.46790234375, 2.466193359375, 2.46736279296875, 2.4661279296875, 2.467852294921875, 2.469380126953125, 2.468475830078125, 2.467101806640625, 2.46930322265625, 2.4674384765625, 2.46818505859375, 2.46793115234375, 2.468096923828125, 2.466873291015625, 2.466653076171875, 2.466144287109375, 2.467576904296875, 2.466427978515625, 2.46687744140625, 2.467862548828125, 2.468832275390625, 2.46677001953125, 2.46746533203125, 2.4666767578125, 2.467177490234375, 2.4664228515625, 2.466610107421875, 2.466058349609375, 2.46723388671875, 2.470784912109375, 2.466979736328125, 2.466400146484375, 2.4673740234375, 2.467078125, 2.466891845703125, 2.46717041015625, 2.467664794921875, 2.4676455078125, 2.467311767578125, 2.4667421875, 2.46812060546875, 2.467968017578125, 2.467040283203125, 2.466512939453125, 2.4665068359375, 2.467083251953125, 2.466418701171875, 2.465967041015625, 2.466157470703125, 5.087494140625, 2.466189208984375, 2.466884521484375, 2.46639501953125, 2.46648828125, 2.466607177734375, 2.467287109375, 2.46839599609375, 2.46702587890625, 2.467313720703125, 2.467323974609375, 2.467330078125, 2.466697265625, 2.46641455078125, 2.4657080078125, 2.46809912109375, 2.46658984375, 2.467029052734375, 2.46753076171875, 2.468769775390625, 2.467987548828125, 2.468222900390625, 2.4690595703125, 2.469578857421875, 2.46847900390625, 2.4680283203125, 2.46656298828125, 2.46801513671875, 2.46639013671875, 2.4670986328125, 2.46615234375, 2.46773974609375, 2.4667216796875, 2.46727685546875, 2.4663193359375, 2.46763525390625, 2.46688671875, 2.467874755859375, 2.46723779296875, 2.46824853515625, 2.466218017578125, 2.466986083984375, 2.46668505859375, 2.4669931640625, 2.4665302734375, 2.465965087890625, 2.468173828125, 2.467091552734375, 2.4666787109375, 2.466904052734375, 2.466720703125, 2.466922607421875, 2.46651806640625, 2.466711669921875, 2.46662158203125, 2.4666767578125, 2.466606201171875, 2.466504638671875, 2.46702294921875, 2.467114990234375, 2.46698291015625, 2.466469970703125, 2.46719384765625, 5.0873857421875, 2.46711181640625, 2.4676669921875, 2.4682373046875, 2.46677294921875, 2.465900634765625, 2.466285400390625, 2.46675244140625, 2.4686142578125, 2.467852294921875, 2.468147216796875, 2.469138427734375, 2.468021240234375, 2.46839501953125, 2.467322998046875, 2.46803857421875, 2.4694794921875, 2.467284912109375, 2.467267578125, 2.46729736328125, 2.471918701171875, 2.4674765625, 2.467620849609375, 2.468518798828125, 2.46965966796875, 2.468581298828125, 2.467287109375, 2.4676025390625, 2.46909130859375, 2.4678798828125, 2.466585693359375, 2.46625390625, 2.468111328125, 2.466431884765625, 2.4665302734375, 2.46625390625, 2.46766796875, 2.46662646484375, 2.46632763671875, 2.466314208984375, 2.468085693359375, 2.467567626953125, 2.47077490234375, 2.466901123046875, 2.46776416015625, 2.466997314453125, 2.468117431640625, 2.471785400390625, 2.468166748046875, 2.46658154296875, 2.467800048828125, 2.4673310546875, 2.46755029296875, 2.466908203125, 2.4666357421875, 2.4657724609375, 2.4668466796875, 2.46647509765625, 2.46641455078125, 2.4653935546875, 2.46607861328125, 2.465657958984375, 2.46717041015625, 5.08862353515625, 2.46618017578125, 2.46677001953125, 2.465965087890625, 2.4666962890625, 2.46651806640625, 2.466512939453125, 2.4657919921875, 2.46704833984375, 2.466091064453125, 2.4665322265625, 2.466198486328125, 2.466378662109375, 2.4663388671875, 2.466697265625, 2.4661689453125, 2.467324951171875, 2.46691748046875, 2.46706884765625, 2.46692041015625, 2.466785400390625, 2.467284912109375, 2.467230712890625, 2.4668681640625, 2.4700732421875, 2.46681396484375, 2.466840576171875, 2.46628857421875, 2.4691845703125, 2.469140380859375, 2.468798583984375, 2.46765869140625, 2.4666748046875, 2.46800390625, 2.466842529296875, 2.46696044921875, 2.46841552734375, 2.46970166015625, 2.468170654296875, 2.467095458984375, 2.46616259765625, 2.468423583984375, 2.46754296875, 2.467493896484375, 2.469568603515625, 2.467686279296875, 2.467119140625, 2.46765576171875, 2.4675615234375, 2.46753076171875, 2.46786962890625, 2.467504150390625, 2.466217041015625, 2.46670751953125, 2.466275390625, 2.46649951171875, 2.465642578125, 2.4665927734375, 2.466114501953125, 2.46563134765625, 2.466107421875, 2.466840576171875, 2.46594775390625, 5.0872607421875, 2.466586669921875, 2.467085205078125, 2.466087890625, 2.46624658203125, 2.468030517578125, 2.466228271484375, 2.46631201171875, 2.465871826171875, 2.466620361328125, 2.466470947265625, 2.4659375, 2.465469482421875, 2.467073974609375, 2.467386474609375, 2.466375732421875, 2.46649658203125, 2.469547119140625, 2.46750634765625, 2.466642822265625, 2.46601123046875, 2.466673583984375, 2.466423828125, 2.466227294921875, 2.466232421875, 2.467124267578125, 2.466873291015625, 2.46641162109375, 2.466154541015625, 2.46636962890625, 2.466863037109375, 2.46698291015625, 2.467325927734375, 2.46662744140625, 2.4677744140625, 2.466999267578125, 2.467263427734375, 2.466723876953125, 2.46856201171875, 2.46679150390625, 2.466046875, 2.46635107421875, 2.46673828125, 2.467541015625, 2.4671416015625, 2.467443603515625, 2.466511962890625, 2.467031982421875, 2.46706787109375, 2.4669921875, 2.468369384765625, 2.467056640625, 2.46666455078125, 2.467033203125, 2.468075439453125, 2.468128662109375, 2.466417724609375, 2.471729248046875, 2.467071044921875, 2.466891845703125, 2.46647509765625, 2.466440185546875, 2.46868798828125, 5.08691357421875, 2.466028564453125, 2.4677333984375, 2.467010498046875, 2.467205078125, 2.4664013671875, 2.4672451171875, 2.4667392578125, 2.46585546875, 2.466406494140625, 2.46685693359375, 2.4661708984375, 2.4663828125, 2.46592822265625, 2.466884521484375, 2.466239501953125, 2.46583203125, 2.465574951171875, 2.46738330078125, 2.467092529296875, 2.4664677734375, 2.467158935546875, 2.467737548828125, 2.4660244140625, 2.4675830078125, 2.46628662109375, 2.467053466796875, 2.4666142578125, 2.46684375, 2.466788330078125, 2.467851318359375, 2.46658056640625, 2.468338623046875, 2.467275634765625, 2.46681201171875, 2.466661376953125, 2.466303955078125, 2.46763720703125, 2.473092041015625, 2.46727685546875, 2.467389404296875, 2.467167236328125, 2.467786865234375, 2.4663837890625, 2.4663388671875, 2.465721435546875, 2.467124267578125, 2.4679638671875, 2.46670751953125, 2.46626416015625, 2.467136474609375, 2.466255859375, 2.4674990234375, 2.466595947265625, 2.466490478515625, 2.4665712890625, 2.46690087890625, 2.466754638671875, 2.4670341796875, 2.467210205078125, 2.467547119140625, 2.467244140625, 2.466747314453125, 5.09081201171875, 2.467222412109375, 2.46835205078125, 2.467926025390625, 2.4673740234375, 2.467116943359375, 2.46747021484375, 2.467093505859375, 2.466975830078125, 2.46742626953125, 2.4675400390625, 2.467567626953125, 2.4670546875, 2.467200927734375, 2.467986328125, 2.46847900390625, 2.468297607421875, 2.466345947265625, 2.467420166015625, 2.468263916015625, 2.46740283203125, 2.466755615234375, 2.467157958984375, 2.471689208984375, 2.467622802734375, 2.4668671875, 2.467239013671875, 2.46835205078125, 2.467516357421875, 2.46651904296875, 2.466069580078125, 2.46681591796875, 2.466124755859375, 2.465919921875, 2.465881103515625, 2.46696240234375, 2.46622607421875, 2.46639208984375, 2.465967041015625, 2.46714990234375, 2.466946044921875, 2.466486328125, 2.465672119140625, 2.466734130859375, 2.466515869140625, 2.46664404296875, 2.4666142578125, 2.4672265625, 2.466231201171875, 2.46652001953125, 2.4661728515625, 2.46692041015625, 2.466157470703125, 2.46626611328125, 2.46582470703125, 2.466809814453125, 2.467745849609375, 2.4677080078125, 2.466193359375, 2.4672470703125, 2.467043212890625, 2.47162060546875, 2.4657490234375, 5.08459130859375, 2.46683544921875, 2.46649755859375, 2.467766357421875, 2.467400634765625, 2.467850341796875, 2.467080078125, 2.46782470703125, 2.466931640625, 2.46681298828125, 2.466908203125, 2.467493896484375, 2.466670654296875, 2.46761767578125, 2.467306396484375, 2.46744873046875, 2.46968310546875, 2.467335205078125, 2.466291748046875, 2.46672900390625, 2.4669912109375, 2.468328369140625, 2.466957275390625, 2.46684375, 2.467378173828125, 2.4669912109375, 2.467179443359375, 2.46784521484375, 2.468820068359375, 2.466711669921875, 2.465966064453125, 2.465700927734375, 2.4670791015625, 2.46738427734375, 2.467485595703125, 2.4666328125, 2.4676455078125, 2.466926513671875, 2.46613720703125, 2.46624755859375, 2.4671220703125, 2.46669921875, 2.465594482421875, 2.46590771484375, 2.466947021484375, 2.466668701171875, 2.466886474609375, 2.46719189453125, 2.46757080078125, 2.466482177734375, 2.46595068359375, 2.46605712890625, 2.467263427734375, 2.466189208984375, 2.467197998046875, 2.46523095703125, 2.466417724609375, 2.466122802734375, 2.467242919921875, 2.466663330078125, 2.46723388671875, 2.466820068359375, 2.472427490234375, 5.0853681640625, 2.46607568359375, 2.46763818359375, 2.466788330078125, 2.467725341796875, 2.466460693359375, 2.467099609375, 2.46757177734375, 2.46760546875, 2.466769775390625, 2.46808056640625, 2.46717236328125, 2.468547607421875, 2.46778369140625, 2.4672236328125, 2.466810791015625, 2.467812255859375, 2.468862060546875, 2.467946533203125, 2.467600341796875, 2.46891015625, 2.46693994140625, 2.46805810546875, 2.46829052734375, 2.469150634765625, 2.468052978515625, 2.4687646484375, 2.467143798828125, 2.467037109375, 2.466817138671875, 2.468148193359375, 2.467322998046875, 2.467407958984375, 2.468336669921875, 2.468115478515625, 2.4671845703125, 2.4663388671875, 2.4676884765625, 2.467588134765625, 2.466388916015625, 2.466503662109375, 2.466964599609375, 2.4676494140625, 2.466975830078125, 2.466547607421875, 2.46734033203125, 2.467566650390625, 2.467642333984375, 2.466969482421875, 2.468360107421875, 2.467146728515625, 2.466821044921875, 2.466783203125, 2.46778466796875, 2.46734130859375, 2.467453857421875, 2.467220458984375, 2.468514892578125, 2.467493896484375, 2.4675439453125, 2.467715087890625, 2.47457275390625, 2.468958251953125, 5.0886162109375, 2.467864501953125, 2.469477294921875, 2.468490234375, 2.4670166015625, 2.467751953125, 2.46676171875, 2.466572265625, 2.467084228515625, 2.467091552734375, 2.4673525390625, 2.4678388671875, 2.46633154296875, 2.467618896484375, 2.46672900390625, 2.466831298828125, 2.4672685546875, 2.467834716796875, 2.46698193359375, 2.467683349609375, 2.46766796875, 2.46748974609375, 2.46799560546875, 2.4667822265625, 2.4664052734375, 2.467306396484375, 2.469897216796875, 2.46721630859375, 2.46658251953125, 2.46739453125, 2.467197998046875, 2.468125732421875, 2.466786376953125, 2.467189697265625, 2.467324951171875, 2.467589111328125, 2.46772314453125, 2.468556884765625, 2.466018310546875, 2.466438232421875, 2.467070068359375, 2.466093017578125, 2.466754638671875, 2.46706884765625, 2.46609521484375, 2.467577880859375, 2.46765771484375, 2.468675537109375, 2.46646875, 2.466482177734375, 2.466712646484375, 2.465977294921875, 2.46544384765625, 2.466545654296875, 2.466736083984375, 2.467407958984375, 2.466906005859375, 2.466165771484375, 2.46822900390625, 2.4675185546875, 2.466716552734375, 2.4658984375, 2.466937744140625]",tokens/s,0.3992511456810495,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3047.755776,4521.984,0.0,3875.536896,3575.121408,s,10,3.925874938964844,0.3925874938964844,0.0032003814064900295,0.39131011962890627,0.3974839904785156,0.39779729309082035,0.39804793518066406,"[0.398110595703125, 0.3898521728515625, 0.39046603393554685, 0.39330859375, 0.38967181396484374, 0.38878271484375, 0.39181430053710936, 0.3956484069824219, 0.39741436767578125, 0.3908059387207031]",tokens/s,652.0839404718809,kWh,4.606836531152074e-06,2.5243247530642504e-06,1.9845015875998338e-05,2.697617716021466e-05,tokens/kWh,9489854.640247436,MB,3047.755776,4521.984,0.0,3875.536896,3800.975872,s,10,231.19119335937498,23.119119335937498,0.024652283207228468,23.1104677734375,23.1560455078125,23.16491630859375,23.17201294921875,"[23.104484375, 23.106837890625, 23.11409765625, 23.1257734375, 23.09865234375, 23.091490234375, 23.10330859375, 23.173787109375, 23.15407421875, 23.1186875]",tokens/s,2.7250172934602097,kWh,0.0002730506428477602,0.00014965489141425045,0.0011581440098478151,0.0015808495441098258,tokens/kWh,39851.989858702975,,s,629,234.351453125,0.3725778269077901,0.04661763996043978,0.3668070373535156,0.3683815368652344,0.36919970703125,0.7562686254882812,"[0.3660308532714844, 0.3664025573730469, 0.36736306762695314, 0.3682928771972656, 0.3671849060058594, 0.366424072265625, 0.36691455078125, 0.36642098999023437, 0.3666820983886719, 0.36670053100585936, 0.3671910400390625, 0.3671593017578125, 0.3675473937988281, 0.367383544921875, 0.36641281127929687, 0.3659335632324219, 0.366244873046875, 0.3652311096191406, 0.3665745849609375, 0.3662633056640625, 0.3673968505859375, 0.36597760009765623, 0.3667660827636719, 0.3654379577636719, 0.3664527282714844, 0.3673671569824219, 0.36636468505859376, 0.3656898498535156, 0.3663196105957031, 0.3655997314453125, 0.36726272583007813, 0.36568780517578126, 0.3668695068359375, 0.36599508666992187, 0.3665059204101562, 0.3655762023925781, 0.36666366577148435, 0.36643328857421875, 0.3669176330566406, 0.3657113647460937, 0.36776141357421877, 0.3660533752441406, 0.36670053100585936, 0.36581787109375, 0.36618853759765624, 0.3657318420410156, 0.36655206298828125, 0.3656663818359375, 0.36818527221679687, 0.36655206298828125, 0.3670487060546875, 0.366587890625, 0.3682109375, 0.3672012939453125, 0.3683788757324219, 0.3662612609863281, 0.36835430908203126, 0.3673917541503906, 0.3714969482421875, 0.36665652465820314, 0.3664425048828125, 0.36596124267578123, 0.7560806274414062, 0.36733746337890627, 0.36763238525390624, 0.3667384338378906, 0.3690987548828125, 0.3665008544921875, 0.3659049072265625, 0.3675392150878906, 0.36642611694335936, 0.36708966064453125, 0.36646810913085937, 0.3664025573730469, 0.3674142761230469, 0.36601959228515624, 0.36743270874023437, 0.36704461669921873, 0.36616806030273436, 0.36657357788085937, 0.3658526611328125, 0.3659263916015625, 0.3657093200683594, 0.3664773254394531, 0.3665899658203125, 0.3660308532714844, 0.36575845336914065, 0.3663247375488281, 0.3659059143066406, 0.36675994873046874, 0.3660185546875, 0.3673917541503906, 0.36654180908203127, 0.36684698486328127, 0.367072265625, 0.3671255187988281, 0.36628070068359375, 0.3660789794921875, 0.36613528442382814, 0.3671644287109375, 0.3661455383300781, 0.36799591064453124, 0.36629299926757813, 0.36816485595703125, 0.36593869018554687, 0.3678883972167969, 0.3660892028808594, 0.3684013977050781, 0.36708865356445314, 0.36752896118164063, 0.36656741333007814, 0.3682099304199219, 0.36648345947265626, 0.36751565551757814, 0.3659735107421875, 0.3680624694824219, 0.3659202575683594, 0.3667906494140625, 0.36607489013671873, 0.3667957763671875, 0.36641485595703127, 0.36750439453125, 0.36619058227539064, 0.36707431030273435, 0.36645068359375, 0.7576248168945312, 0.3653406677246094, 0.3671552124023437, 0.3671224365234375, 0.3667906494140625, 0.3663052673339844, 0.36621209716796876, 0.36776141357421877, 0.3668439025878906, 0.36665139770507815, 0.3658373107910156, 0.36659506225585936, 0.36775115966796873, 0.36711117553710937, 0.36632986450195315, 0.36725247192382815, 0.36715213012695314, 0.36897998046875, 0.3676968994140625, 0.3678535766601562, 0.366065673828125, 0.367072265625, 0.36664523315429687, 0.3668899841308594, 0.366271484375, 0.3664445495605469, 0.3664271240234375, 0.366392333984375, 0.36568576049804685, 0.36705279541015623, 0.36768154907226563, 0.36760678100585936, 0.36624383544921874, 0.3671849060058594, 0.36741018676757814, 0.36703640747070315, 0.36581375122070314, 0.36656536865234374, 0.3663052673339844, 0.36876492309570313, 0.366455810546875, 0.3671715698242187, 0.36698419189453124, 0.3677542419433594, 0.36644046020507814, 0.36671795654296874, 0.36616293334960937, 0.36698828125, 0.3662899169921875, 0.36686746215820315, 0.3674449768066406, 0.36675994873046874, 0.366519287109375, 0.3665111083984375, 0.36600628662109375, 0.36778289794921876, 0.3663800354003906, 0.36820684814453125, 0.3660175476074219, 0.36745419311523436, 0.3680143432617187, 0.3678023681640625, 0.3672965087890625, 0.76101123046875, 0.36763442993164064, 0.3688806457519531, 0.3682867126464844, 0.3672862854003906, 0.36792831420898436, 0.36681729125976564, 0.36853555297851565, 0.36648446655273437, 0.36719000244140626, 0.3671961669921875, 0.3679938659667969, 0.3675965576171875, 0.3683921813964844, 0.3668418579101563, 0.3667855224609375, 0.36585470581054685, 0.36641998291015626, 0.3668428649902344, 0.3661527099609375, 0.3681187744140625, 0.3663144836425781, 0.367393798828125, 0.36705484008789063, 0.36674969482421876, 0.36767642211914064, 0.3666595764160156, 0.36638516235351565, 0.3659837341308594, 0.3676375122070312, 0.36701901245117186, 0.36602264404296875, 0.36724429321289065, 0.36727194213867187, 0.3677163391113281, 0.36646194458007814, 0.3668899841308594, 0.36752896118164063, 0.3668070373535156, 0.36688385009765623, 0.3675699157714844, 0.36781158447265627, 0.3655679931640625, 0.3665203247070313, 0.3669176330566406, 0.3664250793457031, 0.36553421020507815, 0.3678494567871094, 0.367056884765625, 0.3667271728515625, 0.3659151306152344, 0.36611276245117186, 0.3676180419921875, 0.3684751281738281, 0.3661475830078125, 0.3675002746582031, 0.3667466125488281, 0.36979815673828126, 0.3675893859863281, 0.3668500366210937, 0.36594073486328127, 0.3664721984863281, 0.3660421142578125, 0.7563417358398438, 0.36537957763671874, 0.3666237487792969, 0.3666851806640625, 0.36812799072265623, 0.3660308532714844, 0.3663882141113281, 0.3670702209472656, 0.3671562194824219, 0.3671490478515625, 0.3683932189941406, 0.36664523315429687, 0.3658895263671875, 0.36612710571289064, 0.36652340698242186, 0.3672749938964844, 0.3655301208496094, 0.366635009765625, 0.3657994384765625, 0.36625100708007813, 0.36786483764648437, 0.3671715698242187, 0.36642816162109376, 0.3672166442871094, 0.3667108459472656, 0.36772549438476565, 0.36702822875976565, 0.367720458984375, 0.3670384521484375, 0.3671142272949219, 0.3670169677734375, 0.369112060546875, 0.36612506103515624, 0.3668643798828125, 0.3657052307128906, 0.3660328979492187, 0.3655086059570313, 0.3667589111328125, 0.3669944458007813, 0.36692889404296875, 0.3655577697753906, 0.3665080261230469, 0.36726373291015624, 0.36662066650390623, 0.3658455505371094, 0.36661654663085935, 0.365918212890625, 0.368047119140625, 0.3663114318847656, 0.36607794189453124, 0.36666778564453123, 0.36623870849609375, 0.36541131591796877, 0.36611993408203125, 0.36540826416015626, 0.36666470336914064, 0.3660861511230469, 0.3677306823730469, 0.3666360168457031, 0.36710296630859374, 0.366376953125, 0.3661721496582031, 0.36556698608398436, 0.75503515625, 0.36811160278320315, 0.3666186218261719, 0.3668746337890625, 0.36598681640625, 0.36689202880859373, 0.3665131530761719, 0.36666983032226563, 0.3660205993652344, 0.3664025573730469, 0.3666462707519531, 0.3671009216308594, 0.3655587768554687, 0.3668203430175781, 0.36630117797851564, 0.36666366577148435, 0.36585574340820315, 0.36627865600585935, 0.3661414489746094, 0.36637799072265625, 0.36607998657226565, 0.3658045349121094, 0.3679395751953125, 0.36621517944335935, 0.36576153564453123, 0.3663124389648437, 0.3655833740234375, 0.3661219787597656, 0.3678760986328125, 0.36634521484375, 0.36614862060546877, 0.3665377197265625, 0.3658229675292969, 0.36720025634765624, 0.3659571228027344, 0.36605746459960936, 0.36572467041015627, 0.36643429565429686, 0.3677470703125, 0.3671142272949219, 0.36663092041015627, 0.36711935424804687, 0.3663636474609375, 0.36681625366210935, 0.3660943298339844, 0.36676812744140624, 0.36628582763671874, 0.3674306640625, 0.3657963562011719, 0.36687359619140625, 0.36616293334960937, 0.3663595581054688, 0.3658577880859375, 0.367494140625, 0.3659980773925781, 0.3674224548339844, 0.3666606140136719, 0.3666483154296875, 0.36678964233398437, 0.367278076171875, 0.3672596435546875, 0.36691558837890625, 0.3659898986816406, 0.7581112060546875, 0.36696063232421877, 0.36686541748046875, 0.3666217041015625, 0.3667189636230469, 0.36658688354492186, 0.36612606811523435, 0.3663329162597656, 0.36610763549804687, 0.3663811340332031, 0.3662243347167969, 0.3669329833984375, 0.3673456726074219, 0.3678023681640625, 0.36764877319335937, 0.3677501525878906, 0.3669678039550781, 0.3668746337890625, 0.3683153991699219, 0.36921240234375, 0.368362548828125, 0.36775827026367186, 0.3663052673339844, 0.3672483825683594, 0.36686746215820315, 0.36620184326171873, 0.36589157104492187, 0.36683877563476563, 0.36737841796875, 0.36710400390625, 0.3661424560546875, 0.36658074951171876, 0.365517822265625, 0.36632781982421875, 0.36605645751953125, 0.366856201171875, 0.36679168701171877, 0.3667793884277344, 0.3662469177246094, 0.36760064697265626, 0.36884786987304685, 0.3667568664550781, 0.3658803100585937, 0.36786996459960936, 0.3664742431640625, 0.3666083984375, 0.36686541748046875, 0.367140869140625, 0.3666790466308594, 0.36664422607421876, 0.365802490234375, 0.36675277709960935, 0.365348876953125, 0.36615576171875, 0.3656755065917969, 0.36651724243164063, 0.3666483154296875, 0.3665623168945312, 0.3659253845214844, 0.36593869018554687, 0.3652372436523437, 0.36640359497070313, 0.3656540222167969, 0.7624765625, 0.36612710571289064, 0.3665633239746094, 0.36761907958984374, 0.36593048095703123, 0.36634112548828124, 0.3662571411132812, 0.3672842102050781, 0.3657646179199219, 0.368606201171875, 0.36768359375, 0.3676794738769531, 0.36858981323242185, 0.3683133544921875, 0.369396728515625, 0.36714599609375, 0.36760577392578125, 0.3668357238769531, 0.3677470703125, 0.3677235107421875, 0.36642098999023437, 0.36621209716796876, 0.3658362731933594, 0.36801739501953123, 0.3667783813476562, 0.3668715515136719, 0.3696394348144531, 0.369375244140625, 0.3691806640625, 0.3698810729980469, 0.37041253662109375, 0.36909466552734377, 0.3689021301269531, 0.3667169189453125, 0.36602264404296875, 0.3657953186035156, 0.3676252136230469, 0.36923904418945314, 0.3685191650390625, 0.3708651428222656, 0.3690188903808594, 0.370050048828125, 0.36955239868164064, 0.366551025390625, 0.3658076171875, 0.3684198303222656, 0.3685857238769531, 0.3687065734863281, 0.36557516479492186, 0.3663943786621094, 0.3689195556640625, 0.3698083801269531, 0.3692380065917969, 0.3681546325683594, 0.3669350280761719, 0.36759039306640623, 0.3672842102050781, 0.3702108154296875, 0.3688243103027344, 0.36716030883789064, 0.36670465087890625, 0.36789248657226564, 0.3689768981933594, 0.7654307861328125, 0.367162353515625, 0.369691650390625, 0.3691734924316406, 0.370017333984375, 0.368702392578125, 0.36607794189453124, 0.36709375, 0.3667589111328125, 0.36747161865234373, 0.3679764404296875, 0.3669626770019531, 0.3658486022949219, 0.36790267944335936, 0.3670026245117187, 0.36758526611328124, 0.3668479919433594, 0.36762625122070314, 0.36738970947265626, 0.36922470092773435, 0.36768768310546873, 0.36928103637695314, 0.3660902404785156, 0.36697601318359374, 0.36616705322265625, 0.3675197448730469, 0.36707635498046876, 0.3673385009765625, 0.3668746337890625, 0.36747161865234373, 0.36656536865234374, 0.36796722412109373, 0.36610662841796876, 0.3665213317871094, 0.365781005859375, 0.36651007080078124, 0.36767025756835936, 0.3668213806152344, 0.36538470458984373, 0.3673118591308594, 0.365907958984375, 0.3671910400390625, 0.3668971557617188, 0.3673313293457031, 0.3669053344726563, 0.3664783935546875, 0.36691961669921874, 0.36783718872070315, 0.36801126098632814, 0.36738970947265626, 0.36602264404296875, 0.36634521484375, 0.3659909057617188, 0.36675787353515626, 0.3704688720703125, 0.37160037231445314, 0.370060302734375, 0.3701022644042969, 0.36780850219726563, 0.36857342529296877, 0.3668899841308594, 0.36739788818359376, 0.3669186706542969, 0.7624959716796875, 0.366529541015625, 0.367541259765625, 0.3666298828125, 0.367783935546875, 0.36849972534179687, 0.36857550048828125, 0.36766720581054685, 0.3665428466796875, 0.367025146484375, 0.3665684509277344, 0.3679231872558594, 0.3658014831542969, 0.3674449768066406, 0.36611276245117186, 0.367678466796875, 0.3667189636230469, 0.3687383117675781, 0.36729037475585935, 0.3665848388671875, 0.36641177368164063, 0.36767333984375, 0.3674972229003906, 0.36638516235351565, 0.3664025573730469, 0.36736306762695314, 0.3676334228515625, 0.3689072570800781, 0.3667712097167969, 0.3672862854003906, 0.3658874816894531, 0.36724429321289065, 0.3662274475097656, 0.3670732727050781, 0.3663523864746094, 0.3670425720214844, 0.3662489624023437, 0.36671282958984375, 0.3670978698730469, 0.3670241394042969, 0.3659970703125, 0.3673231506347656, 0.3663739013671875, 0.3662264404296875, 0.36703436279296875, 0.36681729125976564, 0.3661414489746094, 0.3668623352050781, 0.366129150390625, 0.367130615234375, 0.36584756469726565, 0.3665489807128906, 0.3668070373535156, 0.3673395080566406, 0.3674347534179688, 0.3669698486328125, 0.36617010498046876, 0.3672535095214844, 0.36757708740234374, 0.3671152648925781, 0.3667189636230469, 0.3670374450683594, 0.3659479064941406]",tokens/s,2.684002986166677,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2593.296384,7298.613248,0.0,6652.166144,6323.352576,s,10,7.731728576660157,0.7731728576660156,0.0036666872570226623,0.7712686767578125,0.7772910278320313,0.7798124450683593,0.7818295788574219,"[0.7823338623046875, 0.776730712890625, 0.7708938598632813, 0.7714732666015625, 0.7710640869140625, 0.770478515625, 0.770045166015625, 0.773399658203125, 0.7746173706054688, 0.7706920776367188]",tokens/s,331.10319052428935,kWh,9.090474362556752e-06,4.981150633102515e-06,4.3684586229685554e-05,5.775621122534482e-05,tokens/kWh,4432423.709394238,MB,2593.296384,7298.613248,0.0,6652.166144,6382.565888,s,10,458.08653125,45.808653125,0.015714942284677954,45.80371484375,45.83385546875,45.83744140625,45.84031015625,"[45.8103125, 45.8045234375, 45.84102734375, 45.79598828125, 45.78718359375, 45.80095703125, 45.79937890625, 45.80290625, 45.8111953125, 45.83305859375]",tokens/s,1.3752860148079282,kWh,0.0005408116745630392,0.00029641227384910964,0.0025237221621385317,0.0033609461105506802,tokens/kWh,18744.721851454397,,s,629,464.3046874389652,0.7381632550698964,0.09178720852369766,0.7269785766601562,0.7282177978515625,0.72855,1.49778814453125,"[0.728890380859375, 0.7286569213867188, 0.728468505859375, 0.727841796875, 0.7261204223632812, 0.7262678833007813, 0.7260436401367187, 0.7263180541992188, 0.7261552734375, 0.7261777954101563, 0.727593994140625, 0.7266488037109375, 0.726371337890625, 0.7267962646484375, 0.7268167724609375, 0.72713623046875, 0.7261091918945313, 0.7263775024414062, 0.726086669921875, 0.7265126953125, 0.7268792114257813, 0.7265433349609375, 0.7264358520507812, 0.7269488525390625, 0.7277793579101562, 0.7264532470703124, 0.7258746948242187, 0.7262843017578124, 0.726619140625, 0.726530029296875, 0.726930419921875, 0.7266365356445312, 0.7263467407226563, 0.7267451171875, 0.7262003173828125, 0.7265435180664063, 0.7259226684570312, 0.7268444213867188, 0.727319580078125, 0.72618798828125, 0.725907470703125, 0.7261306762695312, 0.7267153930664062, 0.7260835571289063, 0.7304304809570312, 0.7278233642578125, 0.7278356323242188, 0.7286978759765625, 0.7283251342773438, 0.7282206420898437, 0.7269744873046875, 0.7285032958984375, 0.728458251953125, 0.7281141967773438, 0.7277547607421875, 0.727568359375, 0.7287777099609375, 0.727846923828125, 0.7280547485351563, 0.72765234375, 0.7284254760742187, 0.7282565307617187, 1.504901123046875, 0.7284664306640625, 0.7290664672851562, 0.7276646118164063, 0.7266221923828124, 0.7263283081054688, 0.7270154418945313, 0.7276861572265625, 0.7269110717773437, 0.7259544677734375, 0.7266682739257813, 0.7265515747070312, 0.7271710815429687, 0.7266058349609374, 0.7279667358398437, 0.7284111328125, 0.7286005859375, 0.726703125, 0.7264102172851562, 0.7261973876953125, 0.7271340942382812, 0.72724072265625, 0.7264429931640625, 0.726245361328125, 0.7261767578125, 0.7261071166992188, 0.72650244140625, 0.726097900390625, 0.7263776245117187, 0.726574951171875, 0.728237060546875, 0.7274116821289063, 0.7262125854492187, 0.7267153930664062, 0.7282913208007813, 0.726614013671875, 0.7263006591796874, 0.7260282592773437, 0.726560791015625, 0.7266262817382813, 0.7263662109375, 0.7263231811523437, 0.7260938110351562, 0.727815185546875, 0.7281008911132812, 0.727647216796875, 0.7297269897460937, 0.726920166015625, 0.72753564453125, 0.7265023803710937, 0.726255615234375, 0.7262545776367187, 0.7261071166992188, 0.72612353515625, 0.726687744140625, 0.726403076171875, 0.7265781860351562, 0.7276553955078126, 0.728158203125, 0.7277864990234375, 0.7276748657226563, 0.7281663818359375, 0.7285872802734376, 1.497443359375, 0.726253662109375, 0.7272754516601563, 0.7280455932617188, 0.7274752197265625, 0.7277711181640625, 0.728121337890625, 0.7287439575195312, 0.7278479614257812, 0.7272069091796876, 0.728195068359375, 0.7279093627929687, 0.7278653564453125, 0.7270317993164063, 0.7276226806640625, 0.7280025634765624, 0.7273707275390625, 0.7268731079101562, 0.7277240600585938, 0.7278991088867187, 0.728922119140625, 0.7292661743164063, 0.7271915283203125, 0.72674609375, 0.72665087890625, 0.7293204345703125, 0.7278919677734375, 0.7269775390625, 0.7271495971679688, 0.7265064697265625, 0.7276011352539062, 0.7278345947265625, 0.7281715087890624, 0.7285504150390625, 0.72888525390625, 0.7278458862304688, 0.7282175903320313, 0.7279083251953125, 0.7274700927734375, 0.72768408203125, 0.7280230102539063, 0.727125, 0.7273963623046875, 0.7267492065429687, 0.7279011840820313, 0.7279144897460937, 0.7274905395507812, 0.7290408935546875, 0.7272591552734375, 0.7271188354492187, 0.7262003173828125, 0.7260712890625, 0.7269284057617188, 0.7274291381835938, 0.7278981323242187, 0.7281622924804687, 0.727773193359375, 0.72789501953125, 0.7277240600585938, 0.727568359375, 0.727462890625, 0.72740966796875, 0.7279739379882812, 1.49783544921875, 0.7277803344726562, 0.7284193115234375, 0.7274598388671875, 0.727204833984375, 0.7272642822265625, 0.7271331787109375, 0.7269990234375, 0.72734619140625, 0.726697998046875, 0.7275581665039063, 0.7281787109375, 0.7283179321289063, 0.7270953369140625, 0.7263682250976562, 0.7266856689453125, 0.7272109985351562, 0.7262371826171875, 0.7263467407226563, 0.726561767578125, 0.7266099243164063, 0.7263775024414062, 0.7269427490234375, 0.7270051879882813, 0.7267901611328125, 0.7268003540039063, 0.7271588134765625, 0.7267839965820313, 0.7270287475585937, 0.7264204711914063, 0.7269273681640624, 0.7266437377929688, 0.7264727172851563, 0.7263098754882813, 0.7269898071289063, 0.72724072265625, 0.7265023803710937, 0.726582275390625, 0.727125, 0.7272601318359375, 0.7271116943359375, 0.7265310668945313, 0.7264296875, 0.7270942993164062, 0.72810498046875, 0.7268444213867188, 0.7269007568359375, 0.72627197265625, 0.7266754760742188, 0.7263273315429688, 0.726993896484375, 0.72631298828125, 0.727208984375, 0.7275560913085938, 0.7275110473632812, 0.7266375732421875, 0.7264818725585938, 0.7270154418945313, 0.726582275390625, 0.7271209106445312, 0.7262269287109375, 0.7262637939453125, 0.7265955810546875, 1.49766650390625, 0.7265485229492188, 0.7276533203125, 0.7271311645507812, 0.726666259765625, 0.7262494506835937, 0.7270901489257813, 0.7279677734375, 0.7269109497070313, 0.7260007934570313, 0.726369140625, 0.7265126342773438, 0.7262740478515625, 0.7264921875, 0.7263375244140625, 0.7265730590820313, 0.727419921875, 0.7267584228515624, 0.726582275390625, 0.7271137084960938, 0.7265505981445313, 0.726699951171875, 0.72631298828125, 0.7264921875, 0.7262166748046875, 0.7264942016601562, 0.727056396484375, 0.7262740478515625, 0.726329345703125, 0.72700927734375, 0.7266652221679688, 0.7271157836914063, 0.7266324462890625, 0.7262699584960938, 0.7262648315429687, 0.7263733520507812, 0.7264257202148438, 0.7263466796875, 0.7261910400390625, 0.726476806640625, 0.7270922241210938, 0.7264163818359375, 0.726287353515625, 0.726835205078125, 0.7277772827148438, 0.7272254638671874, 0.7271760864257812, 0.7273492431640625, 0.7281674194335938, 0.7272499389648438, 0.7273533325195313, 0.7268812866210937, 0.7270850830078125, 0.7263784790039063, 0.7263406372070312, 0.7266806030273437, 0.7265914916992188, 0.7289251708984374, 0.7269837036132812, 0.7268054809570312, 0.7262802124023438, 0.7263119506835938, 0.727593994140625, 1.498050537109375, 0.7272069091796876, 0.7269785766601562, 0.7263672485351562, 0.7267686157226563, 0.7265392456054688, 0.7266918334960938, 0.7279226684570312, 0.7270952758789062, 0.7273717651367188, 0.7269324951171875, 0.7273584594726562, 0.7267799072265625, 0.7266355590820313, 0.7265770874023437, 0.7268864135742188, 0.7267072143554687, 0.7268515625, 0.72646875, 0.7266506958007812, 0.7269765014648437, 0.7271884765625, 0.7277824096679687, 0.7268259887695312, 0.7264942016601562, 0.7269846801757812, 0.7266631469726562, 0.7266221923828124, 0.7265433959960937, 0.7265709228515626, 0.7269048461914063, 0.726640625, 0.7268331298828125, 0.7268157348632812, 0.7275376586914063, 0.7284869384765625, 0.72732568359375, 0.72707275390625, 0.7283681030273438, 0.7273052368164062, 0.727035888671875, 0.726771728515625, 0.7267860717773438, 0.7267205200195312, 0.7269447631835938, 0.7271106567382812, 0.7269212036132813, 0.7263958740234375, 0.7277291259765625, 0.7267225341796875, 0.7280814208984375, 0.726513671875, 0.72703076171875, 0.7273072509765625, 0.7264839477539062, 0.72660888671875, 0.7266047973632812, 0.7268126831054688, 0.7263416137695312, 0.7265904541015625, 0.727103515625, 0.72654541015625, 0.7273564453125, 1.4995804443359375, 0.7264389038085938, 0.72692529296875, 0.726814697265625, 0.7266785278320312, 0.7263928833007812, 0.7267000122070313, 0.726929443359375, 0.7269508666992187, 0.7267072143554687, 0.7272652587890625, 0.727456787109375, 0.7268905029296875, 0.7272182006835938, 0.7271659545898438, 0.7272396850585937, 0.727488525390625, 0.727024658203125, 0.7267123413085937, 0.7267573852539062, 0.72673486328125, 0.7267174682617188, 0.7264542846679688, 0.7265056762695312, 0.7268145141601563, 0.7267010498046875, 0.7269017333984376, 0.7276697387695312, 0.7267901611328125, 0.7262535400390625, 0.7270850830078125, 0.7274118041992188, 0.7274280395507813, 0.7267235717773437, 0.727041015625, 0.7266365356445312, 0.7266611328125, 0.7265853271484375, 0.7269519653320312, 0.7284623413085938, 0.7270697021484375, 0.7270390014648438, 0.7264204711914063, 0.7266826171875, 0.7274915771484375, 0.726845458984375, 0.72681982421875, 0.72749365234375, 0.72808447265625, 0.727667724609375, 0.726656005859375, 0.7265833129882813, 0.726640625, 0.7277496337890625, 0.7269703979492188, 0.7271680297851563, 0.7267593994140625, 0.7272683715820313, 0.727320556640625, 0.7269324951171875, 0.7269560546875, 0.7263651733398437, 0.7267870483398438, 1.500390380859375, 0.7265679321289062, 0.7275233154296875, 0.7270062255859375, 0.7268106079101563, 0.7261430053710938, 0.7260712890625, 0.7270625, 0.7281285400390625, 0.728479736328125, 0.7286661376953125, 0.7285464477539062, 0.7271463623046875, 0.7275120849609376, 0.7265228881835938, 0.7265628051757812, 0.7272130737304687, 0.7273912353515625, 0.7264358520507812, 0.7261306762695312, 0.7267123413085937, 0.7264491577148438, 0.72669287109375, 0.726719482421875, 0.7263631591796875, 0.7265198364257812, 0.7262894287109375, 0.7263252563476562, 0.7271505737304688, 0.7277117309570312, 0.7283435668945313, 0.7273421020507812, 0.7282186279296875, 0.7270390014648438, 0.7294136352539062, 0.7282667236328125, 0.7285493774414062, 0.7274598388671875, 0.7267921752929688, 0.7266416625976563, 0.72656591796875, 0.72635595703125, 0.7266129760742187, 0.7267891235351562, 0.7269498901367187, 0.7263416137695312, 0.72686181640625, 0.7261245727539063, 0.7262761840820312, 0.7264869995117188, 0.7264901123046875, 0.7260047607421874, 0.726487060546875, 0.7265208129882812, 0.7264603881835937, 0.7263908081054687, 0.7275448608398437, 0.727146484375, 0.72680859375, 0.7264603881835937, 0.7277178955078125, 0.72709326171875, 0.728501220703125, 1.5012095947265625, 0.72654541015625, 0.7268945922851563, 0.7272919311523437, 0.7272028198242187, 0.7270809326171875, 0.7276093139648437, 0.7282923583984375, 0.7271884765625, 0.7268485717773437, 0.726763427734375, 0.7262822265625, 0.7274660034179687, 0.7266007080078125, 0.72632421875, 0.7261010131835938, 0.726930419921875, 0.7262833251953125, 0.727236572265625, 0.7263109130859375, 0.727277587890625, 0.729017333984375, 0.7280557861328125, 0.7287122192382812, 0.726213623046875, 0.72635595703125, 0.7278028564453125, 0.7271577758789063, 0.7265413208007813, 0.7267686157226563, 0.7265740966796875, 0.7276277465820312, 0.727436279296875, 0.7270174560546875, 0.7273318481445312, 0.72755712890625, 0.72652392578125, 0.7278878784179688, 0.7272489013671875, 0.7272007446289063, 0.727320556640625, 0.7268229370117187, 0.7260794677734375, 0.7274660034179687, 0.726408203125, 0.726381591796875, 0.7260671997070313, 0.7263037719726563, 0.7268433837890625, 0.726957275390625, 0.7271473388671875, 0.7270225830078125, 0.7275509643554687, 0.7284777221679688, 0.7270584106445312, 0.7272191772460938, 0.727357421875, 0.7268218994140625, 0.7266734008789062, 0.726677490234375, 0.7279493408203125, 0.7279093627929687, 0.7280087280273437, 1.50259716796875, 0.7271526489257812, 0.727667724609375, 0.7279851684570312, 0.7274424438476562, 0.7262618408203125, 0.7266088256835938, 0.7269324951171875, 0.7273543701171875, 0.7273164672851562, 0.7271823120117188, 0.7268035278320313, 0.7278939208984375, 0.7264112548828126, 0.7268864135742188, 0.726513671875, 0.7265802001953126, 0.72677685546875, 0.7268116455078125, 0.7273564453125, 0.72827392578125, 0.72766259765625, 0.7272315063476562, 0.7262218017578125, 0.726414306640625, 0.7278561401367187, 0.728342529296875, 0.7276656494140625, 0.7273707275390625, 0.7264389038085938, 0.7276830444335938, 0.7273564453125, 0.7274208984375, 0.7277598876953125, 0.72755712890625, 0.7271044921875, 0.7269417114257812, 0.7284859008789063, 0.728322021484375, 0.727647216796875, 0.7293214721679687, 0.7274178466796875, 0.7273226318359375, 0.7271823120117188, 0.72783154296875, 0.7280087280273437, 0.7273554077148437, 0.7271423950195313, 0.7276287841796875, 0.7275499267578125, 0.7273082885742187, 0.7275765991210937, 0.7275704345703125, 0.7288955078125, 0.7285176391601562, 0.7280199584960938, 0.727841796875, 0.72796875, 0.7280548095703125, 0.7271168212890625, 0.7272642822265625, 0.7274669799804687, 0.7284869384765625]",tokens/s,1.3547138700439787,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1510.83008,1619.525632,0.0,973.078528,855.737856,s,10,0.7501975097656249,0.07501975097656251,0.003494001966293861,0.0744185905456543,0.07687975234985352,0.0807021327972412,0.08376003715515137,"[0.08452451324462891, 0.0724151382446289, 0.07226691436767578, 0.07602333068847657, 0.07370025634765626, 0.072499267578125, 0.07526127624511719, 0.07513692474365234, 0.07233955383300782, 0.07603033447265625]",tokens/s,3412.4346811012333,kWh,8.781473283414488e-07,4.811843592162606e-07,2.329853715734e-06,3.6891854032917087e-06,tokens/kWh,69392012.60299407,MB,1510.83008,1619.525632,0.0,973.078528,915.411456,s,10,44.923669921875,4.492366992187501,0.05186865648995985,4.49665234375,4.532765380859375,4.566419799804687,4.593343334960937,"[4.4801484375, 4.46079150390625, 4.396806640625, 4.52528662109375, 4.43945751953125, 4.50497265625, 4.49965625, 4.4936484375, 4.52282763671875, 4.60007421875]",tokens/s,14.023787484317472,kWh,5.2254403288717625e-05,2.8638463246623156e-05,0.00013152905892685864,0.00021242192546219938,tokens/kWh,296579.5544076776,,s,629,45.51549240112303,0.07236167313374095,0.008872551278845286,0.07199948883056641,0.07308881988525391,0.07345274963378906,0.1417435186767578,"[0.07422156524658204, 0.07438028717041016, 0.07465984344482422, 0.07427993774414063, 0.070793212890625, 0.07357952117919922, 0.07368294525146485, 0.07249817657470703, 0.07192473602294921, 0.0720373764038086, 0.07181107330322266, 0.07195545959472656, 0.07189401245117187, 0.07177318572998047, 0.07202201843261719, 0.07179264068603515, 0.07144448089599609, 0.07195033264160157, 0.07206502532958985, 0.0718704605102539, 0.07257292938232422, 0.07203839874267579, 0.07174246215820312, 0.07193804931640625, 0.0720373764038086, 0.07203123474121094, 0.07179058837890626, 0.07161753845214844, 0.07207730865478515, 0.06936883544921875, 0.06907904052734375, 0.06904524993896484, 0.06900326538085938, 0.0691599349975586, 0.0692305908203125, 0.0704000015258789, 0.07194111633300782, 0.07188582611083984, 0.07171788787841797, 0.07179673767089843, 0.0721244125366211, 0.07180902099609375, 0.07232717132568359, 0.06903193664550782, 0.06900838470458984, 0.0690483169555664, 0.07060889434814453, 0.07242137908935548, 0.06946304321289062, 0.06903091430664063, 0.06926335906982421, 0.06909337615966797, 0.06909645080566407, 0.06918144226074219, 0.06912204742431641, 0.0692152328491211, 0.06930022430419921, 0.06917120361328125, 0.06931763458251954, 0.06909951782226563, 0.06903091430664063, 0.06910975646972656, 0.14174925231933594, 0.07115980529785157, 0.07259852600097656, 0.07286988830566406, 0.07273677062988282, 0.07269068908691406, 0.06918656158447266, 0.07026585388183594, 0.07330303955078125, 0.06960230255126953, 0.06916505432128907, 0.0694824981689453, 0.07036313629150391, 0.0726292495727539, 0.07242649841308593, 0.07292723083496094, 0.07256371307373047, 0.0725749740600586, 0.07288422393798828, 0.0727930908203125, 0.06923571014404296, 0.07285657501220703, 0.07292825317382813, 0.07260671997070313, 0.06921318054199219, 0.06922649383544922, 0.06944051361083985, 0.06901862335205078, 0.06910873413085937, 0.0694814682006836, 0.06917938995361328, 0.0691435546875, 0.06913843536376953, 0.06960230255126953, 0.07092428588867188, 0.07321907043457031, 0.07265586853027343, 0.07284429168701172, 0.07265996551513672, 0.07249919891357422, 0.0727388153076172, 0.06851993560791016, 0.06896537780761719, 0.06924390411376953, 0.06928793334960938, 0.06909951782226563, 0.06925926208496094, 0.06904115295410156, 0.06911590576171875, 0.07245005035400391, 0.07256063842773437, 0.07257087707519531, 0.0727162857055664, 0.0725555191040039, 0.07245414733886718, 0.06913638305664062, 0.06932685089111328, 0.06927565002441406, 0.06910566711425781, 0.06935040283203125, 0.06927974700927735, 0.06926643371582031, 0.06896947479248047, 0.1415485382080078, 0.06951423645019532, 0.06943334197998047, 0.06925619506835938, 0.06917324829101562, 0.06899097442626953, 0.06922752380371094, 0.06912000274658203, 0.06899199676513672, 0.06969446563720703, 0.07301529693603516, 0.06934323120117188, 0.06960537719726563, 0.07306034851074218, 0.07241011047363281, 0.07290675354003906, 0.07274700927734375, 0.0726476821899414, 0.07265996551513672, 0.0726824951171875, 0.07269068908691406, 0.06931763458251954, 0.06954188537597657, 0.06927257537841797, 0.06938623809814454, 0.06953062438964844, 0.06952960205078125, 0.06926233673095702, 0.06965862274169922, 0.06954803466796874, 0.06898995208740234, 0.06898892974853515, 0.0695767059326172, 0.06904729461669921, 0.06909030151367188, 0.0691230697631836, 0.06926131439208984, 0.06881587219238282, 0.06911795043945312, 0.0690145263671875, 0.069106689453125, 0.06901760101318359, 0.06876467132568359, 0.06921625518798828, 0.06918246459960938, 0.06903091430664063, 0.0689797134399414, 0.06893158721923828, 0.0690544662475586, 0.06931865692138672, 0.06913433837890624, 0.0691619873046875, 0.06918758392333985, 0.06922956848144532, 0.06901248168945312, 0.06939955139160156, 0.0690708465576172, 0.06908415985107422, 0.06909747314453125, 0.06877798461914063, 0.0688721923828125, 0.06958284759521484, 0.07407615661621093, 0.14876364135742187, 0.0725524444580078, 0.06927769470214844, 0.07208243560791015, 0.07297433471679687, 0.0731176986694336, 0.07251251220703125, 0.0712273941040039, 0.07263846588134766, 0.07023104095458985, 0.07284735870361328, 0.07289651489257812, 0.07278694152832031, 0.07275827026367188, 0.0733675537109375, 0.07290982055664062, 0.07269888305664063, 0.06938419342041016, 0.07068057250976563, 0.07282994842529297, 0.07284838104248047, 0.07352217864990235, 0.07396147155761719, 0.07278694152832031, 0.0727224349975586, 0.07267123413085938, 0.07291801452636719, 0.07321497344970704, 0.07286579132080079, 0.0730408935546875, 0.06961766052246093, 0.07272755432128906, 0.07313715362548828, 0.07269990539550782, 0.07287091064453124, 0.07342694091796875, 0.07279821014404297, 0.07299993896484375, 0.07290777587890625, 0.07300505828857422, 0.07294976043701172, 0.07384166717529297, 0.07301222229003906, 0.07304806518554688, 0.07300096130371093, 0.06921318054199219, 0.06943846130371094, 0.07083724975585938, 0.07355596923828125, 0.07319039916992187, 0.06939443206787109, 0.06938111877441407, 0.06927155303955078, 0.0692520980834961, 0.06911283111572265, 0.06944153594970703, 0.06942515563964843, 0.06953266906738281, 0.06910975646972656, 0.06917017364501953, 0.06945382690429687, 0.0691568603515625, 0.0722903060913086, 0.1428500518798828, 0.07174861145019532, 0.07272755432128906, 0.07288422393798828, 0.07220531463623046, 0.07277977752685547, 0.07296819305419922, 0.07285759735107422, 0.0730439682006836, 0.06917120361328125, 0.06948044586181641, 0.06915174102783203, 0.06912921905517579, 0.06931148529052734, 0.06940672302246094, 0.06928179168701172, 0.07211110687255859, 0.07298047637939453, 0.07278079986572265, 0.07271218872070312, 0.07268966674804687, 0.06976306915283204, 0.0731504669189453, 0.07382220458984375, 0.07313919830322266, 0.07456563568115235, 0.0726456298828125, 0.07287091064453124, 0.07290777587890625, 0.072595458984375, 0.06927974700927735, 0.06974668884277344, 0.06926131439208984, 0.06974668884277344, 0.0696094741821289, 0.0693565444946289, 0.06932173156738282, 0.06926541137695312, 0.06931763458251954, 0.06929203033447266, 0.06896025848388672, 0.0688875503540039, 0.0692838363647461, 0.06925107574462891, 0.06969344329833985, 0.06929510498046874, 0.06936064147949218, 0.06916607666015626, 0.06940364837646484, 0.06927667236328125, 0.0688721923828125, 0.06915071868896484, 0.06949273681640625, 0.06910361480712891, 0.06893260955810547, 0.06888857269287109, 0.06916710662841796, 0.06919065856933594, 0.06913433837890624, 0.06897663879394532, 0.06965760040283203, 0.06907596588134765, 0.06904524993896484, 0.14172877502441406, 0.06887628936767579, 0.06923980712890625, 0.06865408325195313, 0.06912102508544922, 0.06938317108154297, 0.06938521575927735, 0.06910771179199218, 0.06913843536376953, 0.06909951782226563, 0.07147007751464844, 0.07286374664306641, 0.07196057891845703, 0.07204454040527344, 0.07199948883056641, 0.07233126068115234, 0.07187763214111328, 0.07217971038818359, 0.07222169494628906, 0.07219097900390625, 0.07204966735839843, 0.07197491455078125, 0.07207833862304687, 0.07198822021484375, 0.0723394546508789, 0.07189299011230468, 0.0720547866821289, 0.07195750427246093, 0.0719288330078125, 0.07224626922607422, 0.0721981430053711, 0.0721295394897461, 0.07224729919433594, 0.07194624328613282, 0.07236812591552734, 0.07330508422851563, 0.07221247863769531, 0.07189094543457031, 0.07187763214111328, 0.0722503662109375, 0.07257292938232422, 0.07226982116699218, 0.07204761505126953, 0.07212134552001953, 0.07196876525878906, 0.07214694213867187, 0.07219404602050782, 0.07190940856933593, 0.07228310394287109, 0.0723609619140625, 0.07222476959228516, 0.07254732513427735, 0.07211724853515625, 0.07198617553710937, 0.07232102203369141, 0.0694302749633789, 0.06944563293457032, 0.06930226898193359, 0.06912000274658203, 0.07197286224365235, 0.07240499114990234, 0.07262515258789062, 0.07231488037109375, 0.1474877471923828, 0.07206195068359375, 0.07203635406494141, 0.0718704605102539, 0.07204147338867188, 0.07205785369873047, 0.07204249572753907, 0.07156735992431641, 0.0718551025390625, 0.07209369659423828, 0.07219404602050782, 0.07199334716796875, 0.07205683135986328, 0.0719974365234375, 0.07223910522460937, 0.07229440307617188, 0.0720373764038086, 0.07192985534667969, 0.07207218933105469, 0.07226060485839844, 0.07216844940185548, 0.07213158416748047, 0.07198822021484375, 0.07197695922851563, 0.07198515319824218, 0.07197901153564454, 0.07212748718261719, 0.07194111633300782, 0.07195136260986328, 0.07207936096191406, 0.07236505889892578, 0.07226982116699218, 0.06943231964111328, 0.06955929565429687, 0.069607421875, 0.06953164672851563, 0.07322624206542969, 0.07243981170654297, 0.07254937744140624, 0.07196979522705078, 0.07131033325195313, 0.07205785369873047, 0.07247872161865235, 0.07209471893310547, 0.07210291290283204, 0.06919782257080079, 0.06972108459472656, 0.06934835052490235, 0.06924082946777343, 0.06914765167236328, 0.06955519866943359, 0.06937907409667969, 0.06931148529052734, 0.06938829040527343, 0.06970674896240234, 0.06969548797607422, 0.06920089721679687, 0.07203942108154297, 0.07206809234619141, 0.07200972747802735, 0.07209779357910157, 0.07210291290283204, 0.07222681427001953, 0.14453248596191406, 0.06921011352539062, 0.0692326431274414, 0.06925312042236328, 0.06934220886230469, 0.07179571533203125, 0.07198617553710937, 0.07202098846435546, 0.07208243560791015, 0.07199436950683594, 0.07232921600341796, 0.07199027252197265, 0.07135231781005859, 0.07213772583007813, 0.07198207855224609, 0.07201996612548828, 0.07227597045898437, 0.0715315170288086, 0.07204863739013671, 0.0719319076538086, 0.07195442962646484, 0.0720148468017578, 0.07350169372558593, 0.07210291290283204, 0.07235584259033204, 0.07218790435791016, 0.07257804870605469, 0.07216844940185548, 0.07202713775634766, 0.07208550262451172, 0.07201689910888671, 0.0721786880493164, 0.07204557037353515, 0.07207218933105469, 0.0721070098876953, 0.0719482879638672, 0.07234662628173828, 0.07192985534667969, 0.07208345794677734, 0.07192371368408203, 0.07228108978271484, 0.07198925018310547, 0.07188787078857421, 0.07216639709472657, 0.07209062194824219, 0.07224217224121093, 0.07236914825439453, 0.07234559631347656, 0.07200768280029297, 0.07227903747558594, 0.07189913940429687, 0.06916403198242188, 0.06938009643554688, 0.0693903350830078, 0.06937190246582031, 0.06929714965820312, 0.06908108520507812, 0.06912102508544922, 0.06940876770019531, 0.06922752380371094, 0.06929100799560547, 0.06920191955566406, 0.06866534423828125, 0.14497279357910156, 0.07190630340576172, 0.07215411376953125, 0.07193702697753906, 0.07205785369873047, 0.07186431884765625, 0.07196672058105469, 0.07226060485839844, 0.07261901092529296, 0.07256473541259766, 0.0723609619140625, 0.07218585968017578, 0.07166259002685547, 0.07175373077392579, 0.0723773422241211, 0.0719626235961914, 0.07179878234863281, 0.072015869140625, 0.0722001953125, 0.07199641418457031, 0.07210495758056641, 0.07236300659179687, 0.07228006744384766, 0.07196672058105469, 0.07183257293701172, 0.07211519622802734, 0.07188480377197265, 0.07188070678710938, 0.07184076690673828, 0.07201689910888671, 0.07219404602050782, 0.07205068969726562, 0.07195442962646484, 0.07184076690673828, 0.07215513610839844, 0.07230873870849609, 0.07222067260742188, 0.07214284515380859, 0.07292928314208984, 0.07242649841308593, 0.07191654205322266, 0.07287398529052734, 0.07126732635498047, 0.07227391815185547, 0.07001599884033204, 0.07193292999267578, 0.07222681427001953, 0.07207218933105469, 0.07356108856201173, 0.07293440246582031, 0.07190835571289063, 0.07186943817138672, 0.06945689392089843, 0.06953164672851563, 0.06899814605712891, 0.06878720092773437, 0.06898278045654296, 0.06849638366699219, 0.06950809478759766, 0.07029452514648438, 0.07316889953613281, 0.07317708587646485, 0.07281254577636719, 0.14895103454589845, 0.07272550201416016, 0.07273779296875, 0.07310028839111328, 0.07281459045410156, 0.07278899383544922, 0.0730245132446289, 0.07308595275878907, 0.07223705291748046, 0.072880126953125, 0.07315660858154296, 0.07311974334716796, 0.07316172790527344, 0.07296717071533203, 0.07294464111328125, 0.07245823669433593, 0.07288217926025391, 0.07276441955566407, 0.07286784362792968, 0.07301734161376953, 0.07289548492431641, 0.07284735870361328, 0.07271321868896484, 0.07318630218505859, 0.07307878112792969, 0.07253504180908203, 0.07293644714355468, 0.07297843170166016, 0.07267430114746094, 0.07276748657226563, 0.07329894256591797, 0.07337881469726562, 0.0731668472290039, 0.07399219512939453, 0.07357746887207031, 0.07269580841064453, 0.0729169921875, 0.07252992248535156, 0.07297126770019531, 0.07306240081787109, 0.07285964965820313, 0.07280947113037109, 0.07383859252929688, 0.07331942749023437, 0.07286271667480469, 0.07300812530517578, 0.07352012634277344, 0.07395225524902344, 0.073385986328125, 0.07318630218505859, 0.07355289459228516, 0.07336959838867188, 0.07346995544433593, 0.0732938232421875, 0.07325389099121093, 0.07335321807861328, 0.07319347381591797, 0.0742266845703125, 0.0733337631225586, 0.0729722900390625, 0.0729200668334961, 0.07297740936279297, 0.0695920639038086]",tokens/s,13.819470400466983,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1308.95872,1044.905984,0.0,398.45888,290.479104,s,10,0.7298112030029297,0.07298112030029298,0.0019203205238493336,0.07210595321655273,0.07568494338989258,0.07613730201721192,0.07649918891906739,"[0.07658966064453125, 0.07404208374023437, 0.0716014404296875, 0.07261046600341797, 0.07435145568847656, 0.07145033264160157, 0.07128550720214843, 0.07136418914794922, 0.07093164825439453, 0.07558441925048828]",tokens/s,3507.7565121862385,kWh,8.692573962940111e-07,4.763112833442766e-07,2.233504646279191e-06,3.579073325917479e-06,tokens/kWh,71526894.44672821,MB,1309.261824,1044.905984,0.0,398.45888,337.28256,s,10,45.10692724609375,4.510692724609375,0.02785183557667033,4.507988037109375,4.524690771484375,4.5543905517578125,4.578150375976563,"[4.4999814453125, 4.51792333984375, 4.49663525390625, 4.50728076171875, 4.5086953125, 4.47609033203125, 4.5180908203125, 4.4837802734375, 4.514359375, 4.58409033203125]",tokens/s,13.966812604255988,kWh,5.335015537424221e-05,2.9239079429191457e-05,0.0001294696562717161,0.00021205889107514978,tokens/kWh,297087.2840114681,,s,629,45.688359863281214,0.07263650216737877,0.008731184173797845,0.07209983825683594,0.0731078628540039,0.07340298156738281,0.14137104125976563,"[0.07185408020019532, 0.07194009399414063, 0.07217356872558593, 0.07269068908691406, 0.07360717010498047, 0.0735498275756836, 0.07288934326171875, 0.07259142303466797, 0.07237420654296875, 0.07188172912597657, 0.07213568115234376, 0.0719452133178711, 0.07213875579833984, 0.07207730865478515, 0.07207014465332032, 0.0719257583618164, 0.06970162963867188, 0.06967910766601562, 0.0696995849609375, 0.07190425872802735, 0.07216028594970703, 0.07225545501708984, 0.07087206268310547, 0.06890290832519531, 0.06927776336669922, 0.06949574279785156, 0.06937395477294922, 0.06961663818359375, 0.06992694091796875, 0.07223088073730469, 0.07267635345458984, 0.07262620544433594, 0.07189091491699219, 0.07197593688964844, 0.07230976104736328, 0.07216025543212891, 0.07246540832519531, 0.07132262420654296, 0.07190016174316406, 0.07246745300292969, 0.07053619384765625, 0.06935552215576171, 0.06963097381591797, 0.07147932434082031, 0.06940361785888671, 0.0716410903930664, 0.07243264007568359, 0.07219302368164063, 0.07221145629882812, 0.07042355346679688, 0.06942002868652344, 0.06980608367919922, 0.06986752319335937, 0.07069696044921875, 0.0722165756225586, 0.0733306884765625, 0.07286374664306641, 0.07210086059570313, 0.07077069091796875, 0.07022796630859375, 0.07062940979003907, 0.07200457763671875, 0.14657945251464843, 0.07262617492675781, 0.07222579193115235, 0.07230054473876953, 0.07205580902099609, 0.07209779357910157, 0.07216537475585938, 0.07003648376464844, 0.07229849243164063, 0.07284019470214843, 0.07227597045898437, 0.07243981170654297, 0.07234457397460937, 0.07199948883056641, 0.07222169494628906, 0.07268147277832031, 0.07375667572021484, 0.07214591979980468, 0.07202201843261719, 0.07222271728515625, 0.07206604766845703, 0.0720025634765625, 0.07187251281738281, 0.0722913589477539, 0.072248291015625, 0.07194624328613282, 0.07201999664306641, 0.07205987548828124, 0.07217356872558593, 0.0712837142944336, 0.06940569305419922, 0.06932991790771484, 0.06942310333251953, 0.06952140808105468, 0.06934835052490235, 0.06939238739013671, 0.06940774536132813, 0.06954188537597657, 0.06909951782226563, 0.0694824981689453, 0.06964530944824218, 0.06932991790771484, 0.07233843231201172, 0.07286271667480469, 0.06985113525390625, 0.06934528350830078, 0.07116902160644531, 0.0729722900390625, 0.0716072998046875, 0.07257907104492188, 0.0729917449951172, 0.07322112274169922, 0.07285657501220703, 0.07319859313964844, 0.07252787017822265, 0.07272038269042969, 0.07294668579101563, 0.0729886703491211, 0.07285350036621094, 0.07272959899902344, 0.07295283508300782, 0.07268556976318359, 0.07238143920898438, 0.1415720977783203, 0.0693934097290039, 0.06967814636230468, 0.06940972900390625, 0.06931148529052734, 0.06929817962646484, 0.06965049743652343, 0.0701173095703125, 0.07237427520751953, 0.07200054168701171, 0.07237423706054688, 0.07208857727050781, 0.07215309143066406, 0.07220838165283203, 0.07202713775634766, 0.0719810562133789, 0.07221862030029297, 0.07206092834472656, 0.07223705291748046, 0.07211007690429687, 0.07236300659179687, 0.07231590270996094, 0.07095603179931641, 0.0694814682006836, 0.06937395477294922, 0.06936780548095703, 0.07176601409912109, 0.07184486389160157, 0.0720343017578125, 0.07231897735595703, 0.07199129486083984, 0.07193299102783203, 0.0723834228515625, 0.07228825378417969, 0.07213260650634766, 0.07228108978271484, 0.07208448028564453, 0.07200153350830078, 0.07205990600585938, 0.07209983825683594, 0.07197491455078125, 0.07196057891845703, 0.0720547866821289, 0.073133056640625, 0.07183769226074219, 0.07257807922363281, 0.0721899185180664, 0.07195340728759765, 0.0722677764892578, 0.07214284515380859, 0.07201487731933594, 0.06975177764892578, 0.06966681671142579, 0.06874931335449219, 0.06921011352539062, 0.06935346984863282, 0.06987059020996093, 0.07226265716552735, 0.07209164428710937, 0.07197081756591797, 0.07196057891845703, 0.0713338851928711, 0.07061196899414063, 0.1456865234375, 0.0722279052734375, 0.07271212768554687, 0.07285043334960938, 0.0725432357788086, 0.07223500823974609, 0.0720865249633789, 0.07014915466308594, 0.06968726348876954, 0.06959820556640625, 0.06964019012451172, 0.0694681625366211, 0.07034880065917969, 0.07004876708984376, 0.06967193603515626, 0.06940160369873047, 0.07108914947509766, 0.07227597045898437, 0.07151821136474609, 0.06967501068115234, 0.06957772827148437, 0.0698071060180664, 0.07123967742919922, 0.07210086059570313, 0.07223603057861328, 0.07214284515380859, 0.07249510192871093, 0.07254835510253907, 0.07294361877441406, 0.07138508605957031, 0.07067545318603516, 0.07234969329833985, 0.07242546844482421, 0.07169741058349609, 0.07204966735839843, 0.06960543823242188, 0.06955718231201172, 0.07074406433105469, 0.07213772583007813, 0.07239683532714844, 0.07193702697753906, 0.07239676666259766, 0.0722012176513672, 0.07219200134277344, 0.07279411315917969, 0.07149260711669922, 0.07210393524169922, 0.07228211212158203, 0.07218380737304687, 0.07210598754882812, 0.07205171203613281, 0.07216435241699219, 0.07028326416015625, 0.06974668884277344, 0.07239577484130859, 0.07243673706054687, 0.07223094177246094, 0.07230358123779297, 0.07244390106201172, 0.07284838104248047, 0.07209062194824219, 0.072163330078125, 0.07201894378662109, 0.14699827575683594, 0.07176294708251953, 0.0707747802734375, 0.0720343017578125, 0.0722135009765625, 0.07226268768310547, 0.0720823974609375, 0.07234976196289063, 0.07255648040771484, 0.07198515319824218, 0.07197798156738282, 0.07256678771972656, 0.07222476959228516, 0.07208755493164062, 0.07251971435546875, 0.07147618865966797, 0.06935346984863282, 0.06952550506591797, 0.06944051361083985, 0.06955213165283203, 0.06944255828857422, 0.06934323120117188, 0.07061299133300782, 0.07203533172607422, 0.07238861083984376, 0.07203225708007813, 0.07213568115234376, 0.0720374755859375, 0.07249091339111328, 0.0709017562866211, 0.06946304321289062, 0.07154790496826172, 0.07224217224121093, 0.07243673706054687, 0.07215615844726563, 0.07217459106445312, 0.07221247863769531, 0.07201894378662109, 0.07098880004882813, 0.06991667175292969, 0.07119564819335937, 0.07190630340576172, 0.0721786880493164, 0.07199334716796875, 0.06973235321044922, 0.06972415924072266, 0.06972930908203125, 0.06958694458007812, 0.06966268920898437, 0.07221145629882812, 0.07257907104492188, 0.07218688201904297, 0.07213772583007813, 0.07218380737304687, 0.07208345794677734, 0.07205068969726562, 0.07378431701660157, 0.07249612426757812, 0.07221862030029297, 0.0720650863647461, 0.07227897644042969, 0.07233740997314453, 0.07240601348876953, 0.1438771514892578, 0.07196463775634766, 0.07234867095947266, 0.07229952239990234, 0.07298047637939453, 0.07293030548095703, 0.07180902099609375, 0.07185408020019532, 0.07234047698974609, 0.07194316864013672, 0.07218278503417969, 0.07320985412597657, 0.07245516967773437, 0.07180287933349609, 0.07000985717773438, 0.0729917449951172, 0.07227187347412109, 0.07225958251953125, 0.07234047698974609, 0.07209677124023438, 0.07210393524169922, 0.07231283569335938, 0.07199436950683594, 0.07164825439453125, 0.06944358062744141, 0.0694302749633789, 0.06980812835693359, 0.06952652740478515, 0.06984703826904297, 0.07122022247314454, 0.07212850952148438, 0.06986752319335937, 0.06965248107910156, 0.06977843475341797, 0.06978047943115234, 0.06932173156738282, 0.0697364501953125, 0.0695572509765625, 0.06944153594970703, 0.07092131042480469, 0.07262095642089844, 0.07213362884521485, 0.0723927001953125, 0.0719974365234375, 0.07194422149658203, 0.0721714859008789, 0.07187967681884766, 0.0721244125366211, 0.07195852661132812, 0.07079424285888672, 0.06962790679931641, 0.06972518157958985, 0.06952345275878906, 0.06946918487548828, 0.06943539428710938, 0.06939238739013671, 0.06983372497558593, 0.06957260894775391, 0.06938521575927735, 0.06923776245117187, 0.07017164611816407, 0.06932582092285157, 0.06944461059570313, 0.1413570556640625, 0.06928076934814453, 0.07196774291992188, 0.0723220443725586, 0.07226573181152343, 0.07199231719970703, 0.07206502532958985, 0.07222476959228516, 0.07213875579833984, 0.06940057373046875, 0.06947840118408204, 0.07286784362792968, 0.07213260650634766, 0.0724295654296875, 0.07200563049316407, 0.07263334655761719, 0.07273677062988282, 0.07225856018066407, 0.0693411865234375, 0.0695367660522461, 0.07020543670654297, 0.07206809234619141, 0.07203020477294922, 0.0722001953125, 0.07224012756347656, 0.07218688201904297, 0.07218694305419922, 0.07206291198730469, 0.07192678070068359, 0.0715857925415039, 0.07136051177978515, 0.0714076156616211, 0.07242240142822266, 0.07221453094482422, 0.07244499206542969, 0.07197689819335938, 0.07119974517822265, 0.07206297302246094, 0.0721981430053711, 0.0724316177368164, 0.07192473602294921, 0.07172608184814454, 0.06988082885742188, 0.06962483215332031, 0.07035497283935546, 0.06945276641845703, 0.06980198669433593, 0.07191449737548829, 0.07256678771972656, 0.07234457397460937, 0.07288422393798828, 0.07266918182373047, 0.07230668640136718, 0.07237324523925781, 0.0723978271484375, 0.07207526397705079, 0.07200057220458984, 0.07223903656005859, 0.07222172546386718, 0.07205680084228516, 0.0722012176513672, 0.07250841522216797, 0.07297948455810546, 0.14137648010253906, 0.06947328186035157, 0.069607421875, 0.06972518157958985, 0.07188377380371094, 0.07261798095703124, 0.07253196716308594, 0.07225138854980469, 0.0723763198852539, 0.07227289581298828, 0.0724869155883789, 0.0720404510498047, 0.07224832153320312, 0.0722790756225586, 0.07235478210449219, 0.07247462463378906, 0.07243059539794922, 0.07224217224121093, 0.07231999969482422, 0.07176908874511718, 0.07236198425292968, 0.07177011108398437, 0.06941900634765626, 0.0709969940185547, 0.07273983764648438, 0.07211827087402344, 0.07241318511962891, 0.07507456207275391, 0.07245209503173829, 0.07226681518554688, 0.07216531372070313, 0.07257190704345703, 0.07180902099609375, 0.07219200134277344, 0.07004978942871094, 0.06961151885986328, 0.06961663818359375, 0.06986041259765625, 0.06977324676513671, 0.06963404846191407, 0.06956646728515625, 0.06976614379882813, 0.06975692749023438, 0.06940774536132813, 0.07025663757324219, 0.06947020721435547, 0.07162265777587891, 0.07189810943603515, 0.07244185638427734, 0.07230054473876953, 0.07227497863769532, 0.07341053009033204, 0.07264460754394532, 0.07066521453857422, 0.06972415924072266, 0.0694620132446289, 0.06928281402587891, 0.06948761749267578, 0.0691251220703125, 0.06890598297119141, 0.06905343627929687, 0.06960025787353516, 0.06945689392089843, 0.14089112854003907, 0.0694128646850586, 0.069570556640625, 0.06914662170410156, 0.06950399780273438, 0.07003545379638672, 0.06975794982910156, 0.06944870758056641, 0.070181884765625, 0.06986444854736328, 0.069607421875, 0.0728453140258789, 0.07321395111083985, 0.07279718780517579, 0.07399935913085938, 0.07330099487304688, 0.07279718780517579, 0.07354777526855469, 0.07338188934326172, 0.073301025390625, 0.07308284759521484, 0.0729917449951172, 0.07306854248046875, 0.0727388153076172, 0.07293440246582031, 0.0727357406616211, 0.07297023773193359, 0.07296102142333985, 0.07303270721435547, 0.07281462097167969, 0.07316886138916015, 0.073059326171875, 0.0725074234008789, 0.07300093078613282, 0.07273785400390625, 0.07161849975585938, 0.06959820556640625, 0.06925823974609376, 0.06970883178710938, 0.06981629180908203, 0.06969036865234375, 0.06949683380126953, 0.06930738830566406, 0.06938419342041016, 0.06971298980712891, 0.0707419204711914, 0.07269376373291016, 0.07251353454589844, 0.07328562927246093, 0.06910873413085937, 0.06949581146240234, 0.06906163024902344, 0.07005286407470702, 0.07312281799316406, 0.07338393402099609, 0.07292108917236328, 0.07231078338623047, 0.07324467468261718, 0.07336345672607422, 0.07308595275878907, 0.07299993896484375, 0.07279001617431641, 0.07310745239257813, 0.14934938049316407, 0.07343206024169922, 0.07323033905029297, 0.07366553497314453, 0.07251865386962891, 0.07295795440673829, 0.07302963256835937, 0.073480224609375, 0.07338902282714843, 0.07340748596191406, 0.0733655014038086, 0.07314534759521485, 0.07351602935791016, 0.07292825317382813, 0.07283715057373047, 0.07243465423583985, 0.07391846466064453, 0.07298047637939453, 0.07312384033203125, 0.07325183868408203, 0.07271321868896484, 0.07309209442138671, 0.07304192352294922, 0.07337062072753907, 0.0729354248046875, 0.07303884887695312, 0.07325081634521484, 0.0731668472290039, 0.07339622497558594, 0.07318016052246094, 0.07310028839111328, 0.07337574768066406, 0.07324774169921874, 0.07300300598144531, 0.07360620880126953, 0.0735692138671875, 0.07393798065185547, 0.07294355010986328, 0.07357542419433594, 0.07405875396728516, 0.07427174377441406, 0.07364198303222656, 0.07310950469970703, 0.07334912109375, 0.0739277114868164, 0.07357746887207031, 0.07303472137451172, 0.07309414672851562, 0.07331635284423828, 0.07301529693603516, 0.0722135009765625, 0.07023104095458985, 0.07103794860839843, 0.06991667175292969, 0.0706355209350586, 0.0701685791015625, 0.06987264251708984, 0.0711445083618164, 0.0702627182006836, 0.0697343978881836, 0.06988800048828125, 0.07153561401367188, 0.07306034851074218]",tokens/s,13.767182754693582,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2022.883328,5539.10272,0.0,4892.655616,4542.741504,s,10,5.694402282714844,0.5694402282714843,0.0022278964552512335,0.5688670349121093,0.5707117797851562,0.5731342102050782,0.5750721545410157,"[0.575556640625, 0.5690338134765625, 0.5678692626953125, 0.5687002563476562, 0.5676268920898437, 0.5674249267578125, 0.5684844970703125, 0.5696422119140625, 0.5701734619140625, 0.5698903198242188]",tokens/s,449.56430418883286,kWh,6.703966662839608e-06,3.6734817678128214e-06,3.077389807590622e-05,4.115134650655865e-05,tokens/kWh,6220938.601831633,MB,2022.883328,5539.10272,0.0,4892.655616,4726.280192,s,10,334.554328125,33.4554328125,0.012952548332308819,33.45466796875,33.46486484375,33.476350390625,33.485538828125,"[33.4878359375, 33.44866796875, 33.443125, 33.44916015625, 33.43737890625, 33.4588671875, 33.4546875, 33.4623125, 33.4546484375, 33.45764453125]",tokens/s,1.8831022259697452,kWh,0.00039510093122168826,0.00021654957452975094,0.0018051211817500937,0.0024167716875015325,tokens/kWh,26067.832690116305,,s,629,339.1397302856444,0.5391728621393395,0.06755475317116652,0.53097265625,0.5316083862304688,0.5317822387695312,1.09872478515625,"[0.5310126342773438, 0.53113037109375, 0.5317058715820312, 0.530935791015625, 0.5307023315429688, 0.531651611328125, 0.5313556518554687, 0.53151025390625, 0.5312081909179688, 0.5308344116210938, 0.5306572875976563, 0.531704833984375, 0.5313720092773437, 0.5314744262695312, 0.5315502319335937, 0.5319423828125, 0.5314641723632813, 0.53194140625, 0.5313074951171874, 0.53119384765625, 0.5315758056640625, 0.5314529418945313, 0.5310607299804687, 0.5313535766601563, 0.5317161254882813, 0.531689453125, 0.53146826171875, 0.531725341796875, 0.5313976440429687, 0.53174169921875, 0.5316290283203124, 0.5319515991210938, 0.5317816162109374, 0.5313218383789062, 0.5316331787109375, 0.5317826538085938, 0.5314375610351563, 0.5311324462890625, 0.5311549682617187, 0.5317427368164063, 0.5311119384765625, 0.5312276611328125, 0.5314365234375, 0.5316792602539062, 0.5316904907226563, 0.5320693969726562, 0.531862548828125, 0.5321605224609375, 0.5318092651367188, 0.5320345458984375, 0.5320376586914063, 0.531989501953125, 0.5317529296875, 0.5317816162109374, 0.532279296875, 0.531968994140625, 0.531651611328125, 0.5318492431640625, 0.5322158203125, 0.5313269653320313, 0.5311262817382812, 0.5317109985351562, 1.100517333984375, 0.531336181640625, 0.5311201171875, 0.5306900634765624, 0.5306705932617187, 0.5307289428710937, 0.5306470336914062, 0.5307658081054687, 0.530572265625, 0.5306316528320313, 0.5310679321289062, 0.5309183959960937, 0.5307955322265625, 0.5309910888671875, 0.5315430297851562, 0.531473388671875, 0.5305477294921875, 0.5311221923828126, 0.5307709350585937, 0.530966552734375, 0.5305128784179688, 0.5308323974609375, 0.5309839477539062, 0.5310494995117188, 0.5307473754882812, 0.5309757690429687, 0.5309235229492187, 0.5309337768554687, 0.53066650390625, 0.5311743774414063, 0.53091943359375, 0.5312245483398438, 0.5310628051757813, 0.5319270629882813, 0.5310371704101563, 0.5307791137695312, 0.5309276123046875, 0.530777099609375, 0.5308047485351562, 0.5308313598632812, 0.530946044921875, 0.5307647705078125, 0.5308211059570312, 0.5306654663085938, 0.530524169921875, 0.5307432861328125, 0.5308251953125, 0.53072998046875, 0.530977783203125, 0.5311867065429687, 0.5309869995117188, 0.5312542724609375, 0.5314273071289063, 0.5308969116210938, 0.5310075073242188, 0.5313853149414063, 0.5306746826171875, 0.53089892578125, 0.5307893676757812, 0.5309020385742188, 0.5307484130859375, 0.5309389038085938, 0.5306982421875, 1.0987735595703125, 0.5308344116210938, 0.530502685546875, 0.5315389404296875, 0.5309573364257812, 0.5307207641601562, 0.530703369140625, 0.531072021484375, 0.5305855712890625, 0.5308221435546875, 0.530577392578125, 0.530745361328125, 0.5306163330078125, 0.5306101684570312, 0.5306142578125, 0.5307074584960938, 0.5306920776367188, 0.530682861328125, 0.5304330444335937, 0.5306326904296875, 0.5307760620117188, 0.5308211059570312, 0.5306358032226562, 0.53072998046875, 0.5306920776367188, 0.5310648193359375, 0.5310341186523437, 0.5307627563476562, 0.530850830078125, 0.5307586669921875, 0.5305743408203125, 0.530819091796875, 0.5308047485351562, 0.5305784301757812, 0.5307218017578125, 0.5306890258789062, 0.5307053833007812, 0.5306900634765624, 0.5304514770507812, 0.5309265747070312, 0.5308579711914062, 0.5307044067382812, 0.5306583251953125, 0.530681884765625, 0.5314078979492187, 0.5308272705078125, 0.53083544921875, 0.5314437255859376, 0.5309910888671875, 0.5313760986328125, 0.5309757690429687, 0.5308323974609375, 0.5307709350585937, 0.5311876831054687, 0.5309788208007813, 0.5310750732421875, 0.5309757690429687, 0.5311590576171875, 0.530819091796875, 0.5316198120117187, 0.5310689086914062, 0.5308856201171875, 0.530956298828125, 1.099652099609375, 0.5306685180664062, 0.5309573364257812, 0.5311047973632812, 0.5308251953125, 0.530680908203125, 0.5306971435546874, 0.5309389038085938, 0.5305805053710938, 0.5308150024414062, 0.530714599609375, 0.5305753784179688, 0.530609130859375, 0.5306388549804687, 0.5310023803710937, 0.5311734008789063, 0.5306429443359375, 0.5309942016601562, 0.5309910888671875, 0.5308006591796876, 0.5307330322265625, 0.5312122802734375, 0.5308692626953125, 0.53094091796875, 0.5309368286132813, 0.5310699462890625, 0.531135498046875, 0.5310894165039063, 0.5310156860351563, 0.5310453491210938, 0.5309798583984375, 0.5312214965820312, 0.5310709838867187, 0.5311631469726562, 0.5312348022460938, 0.5308098754882813, 0.5313177490234375, 0.5314129638671875, 0.5311539306640625, 0.5311221923828126, 0.5308692626953125, 0.5309265747070312, 0.5310023803710937, 0.5306695556640625, 0.5307924194335938, 0.5308477172851562, 0.5312583618164063, 0.5308375244140625, 0.5306859741210938, 0.5308375244140625, 0.5310238647460938, 0.5310392456054688, 0.5313167114257813, 0.5309163818359375, 0.530735107421875, 0.5309808349609375, 0.5313843383789062, 0.53096240234375, 0.530924560546875, 0.5308661499023437, 0.5307893676757812, 0.5308641357421875, 0.5307678833007813, 1.098599365234375, 0.5306071166992188, 0.5305692138671875, 0.5305211181640626, 0.53089794921875, 0.5305497436523438, 0.5308221435546875, 0.5307658081054687, 0.5307996215820312, 0.530661376953125, 0.5306900634765624, 0.5311692504882812, 0.530609130859375, 0.5308006591796876, 0.5306644287109376, 0.5308375244140625, 0.5307730102539062, 0.5307525024414063, 0.5305477294921875, 0.5310003051757812, 0.53079345703125, 0.5306757202148438, 0.5304637451171875, 0.5307637939453125, 0.5306132202148437, 0.530956298828125, 0.530988037109375, 0.5308764038085938, 0.5306808471679687, 0.5308743896484375, 0.5307279663085938, 0.5307576293945313, 0.5310965576171875, 0.5306583251953125, 0.53056103515625, 0.5305681762695312, 0.530555908203125, 0.5308845825195313, 0.5308344116210938, 0.5308221435546875, 0.530787353515625, 0.5306757202148438, 0.5306941528320313, 0.5308262329101563, 0.5310812377929688, 0.530934814453125, 0.5305538330078124, 0.5308098754882813, 0.5308231811523437, 0.5310986328125, 0.5306429443359375, 0.530845703125, 0.5307422485351563, 0.5307197265625, 0.530492431640625, 0.5308323974609375, 0.5305548706054688, 0.53087744140625, 0.5306767578125, 0.5308753662109374, 0.5305855712890625, 0.5308129272460937, 0.5303736572265625, 1.098567626953125, 0.5309030151367188, 0.5317017822265625, 0.5314866943359375, 0.531140625, 0.5313515625, 0.5313720092773437, 0.530951171875, 0.5308917846679687, 0.530724853515625, 0.5307863159179688, 0.5310587158203125, 0.530777099609375, 0.530819091796875, 0.53087744140625, 0.5309696044921876, 0.5308897094726562, 0.5310904541015625, 0.531515380859375, 0.5310259399414062, 0.5307739868164062, 0.5312061157226563, 0.5318901977539062, 0.5310761108398437, 0.5309962158203125, 0.5314529418945313, 0.5309951782226563, 0.5310013427734375, 0.5313760986328125, 0.5313331298828124, 0.5312849731445313, 0.5316055297851563, 0.5308313598632812, 0.5314816284179688, 0.531324951171875, 0.5315655517578125, 0.5319567260742187, 0.5317283935546875, 0.5308856201171875, 0.5309910888671875, 0.5312266235351563, 0.53136279296875, 0.5310289916992188, 0.5309757690429687, 0.530524169921875, 0.5308047485351562, 0.5308436279296875, 0.5307371215820312, 0.5306552124023437, 0.5307576293945313, 0.530914306640625, 0.530629638671875, 0.530746337890625, 0.5308231811523437, 0.5308712768554688, 0.5312450561523437, 0.5309327392578125, 0.53103515625, 0.5309214477539063, 0.5310279541015624, 0.5311580200195313, 0.5310894165039063, 0.5313126220703125, 1.0993970947265626, 0.5308150024414062, 0.53054052734375, 0.53062451171875, 0.5311447143554687, 0.5309869995117188, 0.5307893676757812, 0.5311334228515625, 0.5309542236328125, 0.5308313598632812, 0.5310842895507812, 0.5308108520507813, 0.5306603393554687, 0.531177490234375, 0.5307023315429688, 0.5314396362304687, 0.5306491088867188, 0.5312952270507812, 0.5308108520507813, 0.530840576171875, 0.5308078002929687, 0.5307863159179688, 0.5308047485351562, 0.5311262817382812, 0.53103515625, 0.5310740356445313, 0.5309634399414063, 0.531051513671875, 0.5307965698242187, 0.5314877319335938, 0.530746337890625, 0.5310259399414062, 0.5311201171875, 0.5307340698242188, 0.5309931640625, 0.5309593505859375, 0.5321942749023437, 0.5316433715820312, 0.5312388916015625, 0.530872314453125, 0.5307698974609375, 0.5307975463867187, 0.53082421875, 0.5306982421875, 0.5306019897460937, 0.5307197265625, 0.5305917358398438, 0.5307258911132813, 0.53085693359375, 0.531398681640625, 0.53166796875, 0.5317969970703125, 0.531862548828125, 0.5320745239257813, 0.5316608276367187, 0.5313966064453125, 0.5307791137695312, 0.53075048828125, 0.5309102172851563, 0.5311057739257813, 0.5310842895507812, 0.5312010498046875, 0.5308037109375, 1.1007191162109375, 0.53106689453125, 0.5309931640625, 0.5313853149414063, 0.5308897094726562, 0.5309972534179688, 0.5310842895507812, 0.531324951171875, 0.5306767578125, 0.5316229248046875, 0.5309573364257812, 0.530756591796875, 0.5307310180664062, 0.53082421875, 0.5305927734375, 0.5316853637695312, 0.5315819702148438, 0.5312542724609375, 0.5311344604492187, 0.531040283203125, 0.5308897094726562, 0.5313392944335937, 0.5316823120117188, 0.53125634765625, 0.5309573364257812, 0.5317652587890624, 0.531435546875, 0.531162109375, 0.5312890625, 0.5313095703125, 0.5313320922851562, 0.5313013916015625, 0.5311661987304688, 0.531324951171875, 0.5308743896484375, 0.53102490234375, 0.5308999633789062, 0.53098291015625, 0.5310023803710937, 0.5311641845703124, 0.5315082397460937, 0.5309317016601562, 0.5308917846679687, 0.5309010009765625, 0.5306695556640625, 0.531409912109375, 0.5308856201171875, 0.531504150390625, 0.5308948364257813, 0.5309337768554687, 0.5309696044921876, 0.5308682250976563, 0.5308712768554688, 0.5310637817382813, 0.5310596923828125, 0.530924560546875, 0.5309398803710937, 0.5311754150390625, 0.5311918334960938, 0.5315389404296875, 0.5313822631835937, 0.5316004028320312, 0.5323612060546875, 1.101580322265625, 0.5309798583984375, 0.5314283447265625, 0.53097265625, 0.5306849365234375, 0.53102490234375, 0.5308344116210938, 0.5312901000976562, 0.5309429931640625, 0.5312133178710937, 0.53045556640625, 0.5307914428710937, 0.53096142578125, 0.5311590576171875, 0.531051513671875, 0.5311539306640625, 0.5308795166015625, 0.5314437255859376, 0.5311702880859375, 0.5311856689453125, 0.5311795043945312, 0.5310105590820312, 0.5306644287109376, 0.530904052734375, 0.5307525024414063, 0.530788330078125, 0.5305599975585937, 0.5308651733398437, 0.5308682250976563, 0.5307955322265625, 0.5306275634765625, 0.5308323974609375, 0.53082421875, 0.5314150390625, 0.5311631469726562, 0.53131982421875, 0.5312214965820312, 0.5309327392578125, 0.5311282958984375, 0.5312542724609375, 0.530845703125, 0.5310156860351563, 0.53068798828125, 0.5308897094726562, 0.5308231811523437, 0.5310628051757813, 0.531072021484375, 0.5317027587890625, 0.5310494995117188, 0.5310187377929687, 0.5309296875, 0.531330078125, 0.5311447143554687, 0.53096044921875, 0.5310719604492188, 0.531030029296875, 0.5310156860351563, 0.53125634765625, 0.5312440185546875, 0.5312337646484375, 0.5311641845703124, 0.5312235717773437, 0.530788330078125, 1.10097412109375, 0.5311385498046876, 0.5308712768554688, 0.5309481201171875, 0.5307709350585937, 0.5306480712890626, 0.5311713256835937, 0.5311160278320313, 0.5307095336914063, 0.5308487548828125, 0.53075048828125, 0.5309020385742188, 0.5306849365234375, 0.5309808349609375, 0.53065625, 0.5309685668945312, 0.5306695556640625, 0.5307422485351563, 0.5307944946289063, 0.53087744140625, 0.53075048828125, 0.5307781372070313, 0.5308733520507812, 0.5309255981445312, 0.5307822265625, 0.531041259765625, 0.5312808837890625, 0.5311181030273437, 0.531198974609375, 0.5309603881835937, 0.5314375610351563, 0.5310955810546875, 0.5312604370117188, 0.5309583129882812, 0.5311651611328125, 0.530998291015625, 0.53083544921875, 0.5311876831054687, 0.531009521484375, 0.531167236328125, 0.5306757202148438, 0.531293212890625, 0.5311754150390625, 0.53102490234375, 0.5311047973632812, 0.5315072021484375, 0.531167236328125, 0.5316218872070313, 0.5310310668945313, 0.531662841796875, 0.5310525512695312, 0.5312481079101562, 0.5310341186523437, 0.5309962158203125, 0.5309798583984375, 0.5312184448242188, 0.5310965576171875, 0.5310013427734375, 0.5311498413085938, 0.53165771484375, 0.5311181030273437, 0.531272705078125, 0.5313116455078125]",tokens/s,1.8546927529553001,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1575.784448,5448.925184,0.0,4802.47808,4489.252352,s,10,5.114569244384766,0.5114569244384766,0.0014045752365005437,0.51150537109375,0.5130841125488281,0.5135362274169921,0.5138979193115234,"[0.512983642578125, 0.5139883422851562, 0.5091934204101562, 0.5101927795410156, 0.5104193725585937, 0.5101881103515625, 0.5112746887207031, 0.5121220092773437, 0.5117360534667968, 0.5124708251953125]",tokens/s,500.5309103617276,kWh,6.020330099595917e-06,3.2988774182740595e-06,2.7782869448500503e-05,3.710207696637048e-05,tokens/kWh,6899883.266158921,MB,1575.784448,5448.925184,0.0,4802.47808,4557.794816,s,10,301.333625,30.133362499999997,0.011965382718741807,30.1356015625,30.146014453125,30.1513353515625,30.1555920703125,"[30.12017578125, 30.120552734375, 30.1369375, 30.140724609375, 30.15665625, 30.137947265625, 30.123359375, 30.14483203125, 30.118173828125, 30.134265625]",tokens/s,2.0907059409649356,kWh,0.00035569889715148347,0.00019495335105611957,0.001580753417379499,0.002131405665587102,tokens/kWh,29557.958401431977,,s,629,305.4536612854004,0.48561790347440437,0.060780789003776696,0.4781598815917969,0.47936102294921873,0.4797579284667969,0.9888164648437501,"[0.47731610107421873, 0.4777103271484375, 0.4780533752441406, 0.4779223022460938, 0.47746868896484373, 0.47739285278320315, 0.4777676696777344, 0.47874050903320314, 0.47792022705078124, 0.4776212463378906, 0.47766937255859376, 0.4774625244140625, 0.4779018249511719, 0.47790591430664064, 0.47894528198242187, 0.47799398803710935, 0.47878759765625, 0.47878964233398436, 0.47852645874023436, 0.47817422485351563, 0.4778905334472656, 0.47780557250976563, 0.47809637451171877, 0.47767245483398435, 0.477765625, 0.4778649597167969, 0.4779622497558594, 0.47779531860351565, 0.47874456787109376, 0.47817214965820315, 0.4782950439453125, 0.47745944213867186, 0.47759051513671874, 0.4785008544921875, 0.47783013916015626, 0.4780267639160156, 0.4782438354492187, 0.4785858459472656, 0.4793190307617187, 0.47905487060546875, 0.47873635864257813, 0.47829400634765623, 0.4777697143554688, 0.4778240661621094, 0.47742047119140624, 0.4780851135253906, 0.4782008361816406, 0.47801651000976564, 0.47831243896484377, 0.47876199340820313, 0.4778547058105469, 0.47859405517578124, 0.4782591857910156, 0.47776461791992186, 0.47824075317382814, 0.4776304626464844, 0.4778383483886719, 0.4776642456054688, 0.47760385131835936, 0.478160888671875, 0.47876300048828124, 0.4783943786621094, 0.9927915649414063, 0.47756494140625, 0.4774000549316406, 0.47765298461914063, 0.47747378540039065, 0.4782704772949219, 0.477907958984375, 0.4783944091796875, 0.477813720703125, 0.47758950805664063, 0.47759051513671874, 0.4776069030761719, 0.47729867553710936, 0.4773724060058594, 0.47751473999023436, 0.47745742797851565, 0.4780113525390625, 0.4786903076171875, 0.47860427856445314, 0.4783482971191406, 0.47794790649414065, 0.47776461791992186, 0.4778291320800781, 0.4779427795410156, 0.4779376525878906, 0.47769189453125, 0.47908352661132814, 0.47773797607421875, 0.47901287841796875, 0.4777134094238281, 0.4779151306152344, 0.47777587890625, 0.47748709106445314, 0.47771136474609377, 0.4781527099609375, 0.47798886108398436, 0.47750042724609376, 0.47809127807617186, 0.47905892944335937, 0.4782591857910156, 0.4781598815917969, 0.4782981262207031, 0.4776468505859375, 0.47786392211914064, 0.4776560668945313, 0.478023681640625, 0.48116427612304685, 0.4793456726074219, 0.4777492370605469, 0.47873126220703127, 0.47907122802734375, 0.4783626098632813, 0.4779949951171875, 0.47790286254882813, 0.4776908874511719, 0.478065673828125, 0.4777738342285156, 0.477770751953125, 0.47832369995117185, 0.47939175415039065, 0.47875686645507814, 0.47773284912109376, 0.478023681640625, 0.988906494140625, 0.47783013916015626, 0.4776437683105469, 0.4778649597167969, 0.4783902587890625, 0.47753521728515624, 0.4795494384765625, 0.47817214965820315, 0.4789770202636719, 0.4776386413574219, 0.47787213134765627, 0.47758950805664063, 0.47774514770507814, 0.47762841796875, 0.47790386962890624, 0.479541259765625, 0.4791797790527344, 0.47914700317382813, 0.4779294738769531, 0.4777625732421875, 0.47816497802734376, 0.4782356567382812, 0.47807180786132814, 0.4797726745605469, 0.4777687072753906, 0.4803246154785156, 0.4800747375488281, 0.4793231506347656, 0.4796539001464844, 0.48039935302734377, 0.4796252136230469, 0.4790927429199219, 0.477655029296875, 0.4791510925292969, 0.4787394409179688, 0.47877017211914064, 0.47938558959960936, 0.47959552001953126, 0.47838516235351564, 0.47773593139648435, 0.478129150390625, 0.4781404113769531, 0.4776857604980469, 0.4779018249511719, 0.47747378540039065, 0.4776847229003906, 0.4781803588867187, 0.4799620971679687, 0.4786053161621094, 0.47794790649414065, 0.4777123718261719, 0.4776683654785156, 0.4774696960449219, 0.47712460327148437, 0.4774481811523438, 0.47752908325195315, 0.47732632446289064, 0.4782438354492187, 0.47871487426757814, 0.47837286376953125, 0.4777123718261719, 0.47754238891601564, 0.4779929504394531, 0.9885849609375, 0.47834625244140627, 0.47808615112304687, 0.47836468505859375, 0.47836468505859375, 0.4776908874511719, 0.47762738037109376, 0.4779254455566406, 0.477751220703125, 0.4784117736816406, 0.47790286254882813, 0.4781741943359375, 0.4787138671875, 0.47869439697265626, 0.47841998291015625, 0.4790876159667969, 0.47936920166015623, 0.4789770202636719, 0.4778157958984375, 0.47849676513671874, 0.4780349426269531, 0.4785776672363281, 0.4781475830078125, 0.4785581970214844, 0.4792135620117188, 0.47874969482421875, 0.4787026062011719, 0.47837899780273435, 0.47872308349609377, 0.47839334106445314, 0.4786411437988281, 0.4790169677734375, 0.47768063354492185, 0.4778465270996094, 0.4781793212890625, 0.47906610107421876, 0.47883367919921876, 0.4782213134765625, 0.4785172424316406, 0.47826739501953125, 0.478087158203125, 0.4777277526855469, 0.4778680419921875, 0.47746456909179685, 0.4802908020019531, 0.47867086791992186, 0.47896060180664063, 0.4792279052734375, 0.4781783142089844, 0.47810867309570315, 0.4786063232421875, 0.4785162353515625, 0.4785848388671875, 0.4778659973144531, 0.4776212463378906, 0.4777840576171875, 0.4788787231445312, 0.47973171997070313, 0.47971328735351565, 0.4792842102050781, 0.477655029296875, 0.47770315551757814, 0.4779049377441406, 0.9883463134765625, 0.4791357421875, 0.4782458801269531, 0.4794173583984375, 0.478166015625, 0.47800833129882814, 0.4775157775878906, 0.4780421142578125, 0.47852032470703126, 0.4791224365234375, 0.47801651000976564, 0.47799398803710935, 0.4782530517578125, 0.47967129516601564, 0.47834417724609374, 0.4779847717285156, 0.4790302734375, 0.47956378173828124, 0.4773304443359375, 0.4774420471191406, 0.4774143981933594, 0.4773447570800781, 0.47814349365234377, 0.47828582763671873, 0.479710205078125, 0.4788910217285156, 0.479056884765625, 0.47832369995117185, 0.4782438354492187, 0.4783964233398437, 0.479025146484375, 0.4786606140136719, 0.4782909545898438, 0.47821823120117185, 0.4802129821777344, 0.47790286254882813, 0.47835751342773436, 0.4782561340332031, 0.478635009765625, 0.47870156860351565, 0.4777001037597656, 0.4775802917480469, 0.47765914916992186, 0.479578125, 0.4784527282714844, 0.47922894287109374, 0.47923199462890625, 0.4788213806152344, 0.47869439697265626, 0.4787804260253906, 0.47939993286132815, 0.47935186767578125, 0.4787639770507813, 0.4786268310546875, 0.4793589782714844, 0.48041677856445314, 0.48034817504882815, 0.482777099609375, 0.4785745849609375, 0.478571533203125, 0.47958123779296874, 0.47865029907226564, 0.4787384338378906, 0.990581787109375, 0.4781793212890625, 0.4788521423339844, 0.47797039794921875, 0.47851007080078123, 0.47790286254882813, 0.4778321838378906, 0.47813427734375, 0.4789043273925781, 0.47835134887695313, 0.47826226806640626, 0.47895858764648436, 0.4790947875976562, 0.4783759460449219, 0.47830938720703126, 0.4781803588867187, 0.477991943359375, 0.47756903076171875, 0.47764480590820313, 0.47726080322265624, 0.47773797607421875, 0.47950848388671874, 0.47766015625, 0.47899853515625, 0.47830322265625, 0.4781537170410156, 0.47825204467773436, 0.4785592346191406, 0.479025146484375, 0.47944705200195314, 0.47835134887695313, 0.478445556640625, 0.47876199340820313, 0.47891659545898435, 0.4791101379394531, 0.47820391845703125, 0.4782581787109375, 0.47862374877929686, 0.4814622802734375, 0.47808819580078127, 0.47835751342773436, 0.47878964233398436, 0.4785551452636719, 0.4777082824707031, 0.4787271728515625, 0.4784322509765625, 0.47835546875, 0.4775679931640625, 0.4779346008300781, 0.47834521484375, 0.478160888671875, 0.47767962646484374, 0.47781170654296873, 0.4783585205078125, 0.4791285705566406, 0.4783697814941406, 0.47800216674804685, 0.47763250732421875, 0.4782294921875, 0.47773696899414064, 0.47801651000976564, 0.4774718017578125, 0.47802670288085936, 0.9896888427734375, 0.47829608154296877, 0.47802975463867187, 0.47756497192382813, 0.47750039672851563, 0.47811380004882814, 0.4781025390625, 0.4778936767578125, 0.47798370361328124, 0.47788851928710935, 0.4786268310546875, 0.4782899169921875, 0.4779632568359375, 0.47777484130859377, 0.47753521728515624, 0.4779007873535156, 0.47758544921875, 0.47753726196289065, 0.47787213134765627, 0.47847628784179685, 0.4782909545898438, 0.4778014831542969, 0.47860223388671874, 0.47986483764648435, 0.47783935546875, 0.47765298461914063, 0.4779980773925781, 0.47794073486328126, 0.4787517395019531, 0.4781506652832031, 0.4780155029296875, 0.47900875854492186, 0.47818548583984377, 0.47805029296875, 0.4782233581542969, 0.47802471923828127, 0.4779346008300781, 0.47827557373046875, 0.478497802734375, 0.47849063110351564, 0.47899237060546873, 0.4786196594238281, 0.47815167236328127, 0.4799846801757813, 0.47795709228515626, 0.47788851928710935, 0.4778270568847656, 0.4778547058105469, 0.478060546875, 0.47782608032226564, 0.4778434143066406, 0.477601806640625, 0.4788572082519531, 0.47828274536132814, 0.47832986450195314, 0.4779417724609375, 0.477633544921875, 0.477907958984375, 0.478065673828125, 0.4780482482910156, 0.47808819580078127, 0.47858380126953126, 0.47811892700195313, 0.9911572265625, 0.4780707702636719, 0.47788134765625, 0.4775536499023437, 0.4774696960449219, 0.47756185913085936, 0.47758642578125, 0.4777615356445313, 0.47794790649414065, 0.4787118225097656, 0.47779736328125, 0.47749325561523437, 0.4775475158691406, 0.4777697143554688, 0.4781363220214844, 0.4779141235351563, 0.4792012939453125, 0.4778874816894531, 0.47856845092773437, 0.47768267822265625, 0.4781311950683594, 0.4785070190429688, 0.47809127807617186, 0.4776365966796875, 0.4780707702636719, 0.47816497802734376, 0.47828582763671873, 0.47777279663085936, 0.4780728454589844, 0.47835751342773436, 0.47851211547851563, 0.47822235107421873, 0.4775055236816406, 0.4781465454101563, 0.47790386962890624, 0.4779018249511719, 0.478359619140625, 0.4794295654296875, 0.4784394226074219, 0.4795248718261719, 0.4780544128417969, 0.477949951171875, 0.48094821166992185, 0.4794306640625, 0.4784814147949219, 0.4792197265625, 0.4782981262207031, 0.47831655883789065, 0.47831243896484377, 0.4784998474121094, 0.47935488891601563, 0.47914599609375, 0.48005630493164064, 0.48013311767578126, 0.47997030639648436, 0.48005426025390624, 0.4799140625, 0.4794142150878906, 0.4800153503417969, 0.4793067626953125, 0.47961505126953125, 0.47790789794921873, 0.4778670043945312, 0.9900185546875, 0.4775372924804687, 0.47779840087890624, 0.477633544921875, 0.4782950439453125, 0.47761920166015626, 0.47800421142578126, 0.478803955078125, 0.4791029663085937, 0.4782643127441406, 0.47842098999023436, 0.4776908874511719, 0.4780185546875, 0.47770932006835937, 0.4777851257324219, 0.477681640625, 0.477812744140625, 0.47979006958007814, 0.47791717529296873, 0.4780451965332031, 0.4788193359375, 0.477955078125, 0.47790591430664064, 0.47803903198242187, 0.47824697875976563, 0.4786861572265625, 0.47816192626953125, 0.477655029296875, 0.47835134887695313, 0.478919677734375, 0.47831039428710936, 0.4777205810546875, 0.4773253173828125, 0.47773284912109376, 0.4778260498046875, 0.4776069030761719, 0.47773492431640624, 0.4777461853027344, 0.478587890625, 0.47767041015625, 0.4789801330566406, 0.47915618896484374, 0.4780257263183594, 0.47762841796875, 0.4778168334960938, 0.4776714172363281, 0.47783526611328125, 0.47757107543945315, 0.4779704284667969, 0.4786677856445313, 0.47908148193359373, 0.4783206481933594, 0.4781240234375, 0.47756494140625, 0.4773294067382812, 0.47757208251953126, 0.477348876953125, 0.47757720947265625, 0.47773492431640624, 0.47816192626953125, 0.47770932006835937, 0.4789381103515625, 0.47819366455078127, 0.9904609375, 0.47848550415039065, 0.478013427734375, 0.4775751647949219, 0.4774912109375, 0.4782847900390625, 0.4794931335449219, 0.4796549072265625, 0.47851318359375, 0.4785254211425781, 0.47859506225585935, 0.4788695068359375, 0.47816293334960935, 0.47827969360351563, 0.4785244140625, 0.4797358093261719, 0.4782438354492187, 0.4776570739746094, 0.4789125061035156, 0.47882650756835937, 0.4777082824707031, 0.47753216552734373, 0.47809637451171877, 0.47831655883789065, 0.4776488952636719, 0.4774471740722656, 0.4796170349121094, 0.47986483764648435, 0.4793456726074219, 0.479599609375, 0.4791654357910156, 0.4776488952636719, 0.4775475158691406, 0.47792538452148436, 0.4778240051269531, 0.4774819946289062, 0.4782612915039062, 0.4777738037109375, 0.4777543640136719, 0.47963134765625, 0.47828070068359374, 0.4778486022949219, 0.4777625427246094, 0.477601806640625, 0.4777185363769531, 0.47762432861328125, 0.47770932006835937, 0.4777062377929687, 0.47948800659179686, 0.4784066467285156, 0.47847015380859376, 0.4775106506347656, 0.4777420654296875, 0.47842098999023436, 0.47805233764648436, 0.47766015625, 0.47758746337890623, 0.47821926879882815, 0.4781087036132812, 0.4776806640625, 0.47913876342773437, 0.47851828002929686, 0.4783575744628906]",tokens/s,2.059232151132392,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,,cuda,0,42,,,,,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,2236.862464,2932.342784,0.0,2285.89568,2082.706944,s,10,2.5091812438964842,0.25091812438964844,0.0016359490817563227,0.25024176025390626,0.25317278594970705,0.2535381980895996,0.25383052780151366,"[0.25309158325195313, 0.2539036102294922, 0.24987110900878906, 0.24888946533203124, 0.2497696075439453, 0.24914796447753906, 0.2504080047607422, 0.25179104614257813, 0.25223333740234377, 0.2500755157470703]",tokens/s,1020.253122897013,kWh,2.948670335578402e-06,1.6157431971317612e-06,1.3066561943753649e-05,1.763097547646381e-05,tokens/kWh,14519899.953450851,MB,2238.38208,2959.60576,0.0,2313.158656,2180.685312,s,10,143.8805810546875,14.38805810546875,0.011353783216855234,14.382828125,14.40145888671875,14.403668896484376,14.405436904296876,"[14.4009677734375, 14.40587890625, 14.39833203125, 14.3805361328125, 14.374423828125, 14.3830166015625, 14.400140625, 14.373748046875, 14.3808974609375, 14.3826396484375]",tokens/s,4.378631191102458,kWh,0.000169606630340582,9.295822156634463e-05,0.0007504743544440634,0.0010130392063509902,tokens/kWh,62189.10344736671,,s,629,145.90565167236326,0.2319644700673502,0.029875164751919897,0.22824755859375,0.22927770080566406,0.22946876831054688,0.47820606079101563,"[0.22992076110839843, 0.22815129089355468, 0.22794752502441407, 0.22817485046386718, 0.2277969970703125, 0.22800997924804686, 0.22908108520507814, 0.22838169860839844, 0.2285506591796875, 0.2286049346923828, 0.22830694580078126, 0.22801516723632811, 0.22815020751953125, 0.22767308044433593, 0.22776422119140624, 0.2276741180419922, 0.22829362487792967, 0.22767205810546876, 0.22779493713378907, 0.2277058563232422, 0.22765670776367186, 0.22813594055175782, 0.22820147705078125, 0.2283335723876953, 0.2275809326171875, 0.22812364196777343, 0.2283008575439453, 0.22861715698242188, 0.2281564178466797, 0.22812979125976562, 0.2289090576171875, 0.2288476104736328, 0.22834585571289062, 0.22857522583007814, 0.2290155487060547, 0.22790553283691406, 0.22831922912597657, 0.22821784973144532, 0.22858444213867188, 0.2286612548828125, 0.22839808654785157, 0.22902169799804686, 0.22916812133789063, 0.22946917724609375, 0.22901350402832032, 0.22913536071777343, 0.22909747314453124, 0.229138427734375, 0.22936679077148436, 0.22892851257324218, 0.22936268615722658, 0.22932582092285156, 0.22920909118652344, 0.22904730224609374, 0.2291425323486328, 0.22898892211914063, 0.22877183532714843, 0.2286202850341797, 0.229064697265625, 0.22876876831054688, 0.2292131805419922, 0.22902476501464844, 0.48329931640625, 0.2282854461669922, 0.228822021484375, 0.2279741516113281, 0.22834687805175782, 0.22893157958984375, 0.22888038635253907, 0.22889677429199218, 0.22806629943847656, 0.22842678833007812, 0.22791778564453125, 0.22817791748046876, 0.22819737243652344, 0.228274169921875, 0.22817791748046876, 0.2278656005859375, 0.22779391479492186, 0.22778982543945311, 0.22769561767578125, 0.22855783081054687, 0.2280273895263672, 0.22766490173339843, 0.2277734375, 0.2279331817626953, 0.22785125732421874, 0.2278594512939453, 0.227852294921875, 0.2281492462158203, 0.2276505584716797, 0.22739762878417968, 0.22808883666992188, 0.22794650268554686, 0.22817485046386718, 0.228094970703125, 0.22861415100097657, 0.22928793334960937, 0.22927769470214843, 0.22896333312988282, 0.22915072631835937, 0.22908006286621094, 0.22935040283203126, 0.22938829040527345, 0.22915583801269532, 0.22902578735351561, 0.22928282165527344, 0.22933914184570312, 0.23262924194335938, 0.22968319702148438, 0.22875648498535156, 0.22789529418945312, 0.2278666229248047, 0.2295767059326172, 0.22941900634765625, 0.22929306030273439, 0.22928282165527344, 0.22947532653808594, 0.2292490234375, 0.22923365783691407, 0.22946815490722655, 0.22925106811523438, 0.22942207336425782, 0.22925315856933592, 0.2293155517578125, 0.4786954345703125, 0.22789324951171874, 0.22778778076171874, 0.22853427124023437, 0.2276505584716797, 0.22818611145019532, 0.22808677673339844, 0.22816160583496095, 0.22942201232910156, 0.22774887084960938, 0.22915277099609374, 0.2292162628173828, 0.22927772521972656, 0.22797821044921876, 0.2286940155029297, 0.22868377685546876, 0.22756658935546875, 0.22776832580566406, 0.22760652160644532, 0.22787379455566406, 0.2284400634765625, 0.22826905822753907, 0.22876876831054688, 0.22959616088867188, 0.22788198852539063, 0.22754815673828124, 0.2290493469238281, 0.22905958557128905, 0.22827314758300782, 0.22785842895507813, 0.22869094848632812, 0.2282854461669922, 0.22885580444335937, 0.22893157958984375, 0.2289971160888672, 0.22875340270996095, 0.22852301025390626, 0.22836224365234375, 0.22938829040527345, 0.2279147491455078, 0.22865306091308593, 0.22844825744628905, 0.22864588928222657, 0.22837759399414062, 0.2290636749267578, 0.2289213409423828, 0.22904115295410157, 0.23109939575195312, 0.22880665588378907, 0.22881996154785156, 0.2284707794189453, 0.229232666015625, 0.22840829467773438, 0.2283520050048828, 0.23005081176757813, 0.22963821411132812, 0.22803654479980467, 0.22828851318359375, 0.22792909240722656, 0.22795878601074218, 0.22777548217773438, 0.22785331726074218, 0.22797004699707032, 0.47824075317382814, 0.22801408386230468, 0.22845132446289063, 0.22803660583496094, 0.22865408325195313, 0.22791679382324218, 0.22993101501464844, 0.22821376037597657, 0.22777548217773438, 0.2276874237060547, 0.22872781372070314, 0.22810418701171875, 0.22863462829589845, 0.2276290588378906, 0.22807244873046875, 0.2276546630859375, 0.22767514038085937, 0.22787481689453126, 0.22811648559570313, 0.22791270446777342, 0.22781336975097657, 0.22764236450195313, 0.2276822967529297, 0.22771916198730469, 0.22807347106933593, 0.22767718505859375, 0.22773248291015624, 0.2274396209716797, 0.2276177978515625, 0.2276126708984375, 0.22771405029296876, 0.2276884460449219, 0.22757273864746094, 0.22881689453125, 0.22877593994140624, 0.22816461181640624, 0.22797311401367187, 0.22859878540039064, 0.22825677490234375, 0.22843699645996093, 0.22875750732421876, 0.22935040283203126, 0.22820352172851563, 0.22832333374023436, 0.23120999145507812, 0.22912101745605468, 0.22907904052734376, 0.22970060729980468, 0.22873907470703125, 0.22823014831542968, 0.22959414672851564, 0.22853219604492186, 0.22784101867675782, 0.22805708312988282, 0.22899507141113282, 0.2288885803222656, 0.227852294921875, 0.2276669464111328, 0.2279219207763672, 0.22771916198730469, 0.22784921264648436, 0.2281492462158203, 0.22833561706542968, 0.47811685180664065, 0.22757478332519532, 0.2276433868408203, 0.227662841796875, 0.22771510314941407, 0.22770684814453124, 0.22767001342773438, 0.2279884796142578, 0.22770176696777344, 0.2278707275390625, 0.22784512329101564, 0.22757171630859374, 0.22767922973632812, 0.22853836059570312, 0.22817181396484376, 0.22842568969726562, 0.22801408386230468, 0.2303057861328125, 0.22843084716796874, 0.22846669006347656, 0.22829670715332032, 0.227631103515625, 0.22766796875, 0.22799462890625, 0.22806732177734376, 0.22791270446777342, 0.22781747436523436, 0.22777548217773438, 0.22781234741210937, 0.22849331665039063, 0.22844009399414061, 0.22815536499023437, 0.22805509948730468, 0.22854751586914063, 0.22936679077148436, 0.2276741180419922, 0.22772940063476563, 0.22759014892578125, 0.22804173278808593, 0.22774169921875, 0.22859373474121095, 0.22844819641113281, 0.22795266723632812, 0.22765052795410157, 0.22817893981933593, 0.22788096618652343, 0.2275768280029297, 0.227915771484375, 0.22789631652832032, 0.22774783325195314, 0.22783692932128907, 0.22787890625, 0.22841958618164063, 0.22780006408691406, 0.22799154663085938, 0.2284862060546875, 0.22839187622070312, 0.23194111633300782, 0.22911180114746094, 0.22951731872558595, 0.22850355529785157, 0.22849740600585938, 0.22812570190429687, 0.47752294921875, 0.2276556854248047, 0.22764544677734375, 0.22774169921875, 0.22828953552246095, 0.22794444274902342, 0.22778880310058594, 0.2285332489013672, 0.22908927917480468, 0.2287073211669922, 0.2291087341308594, 0.22870118713378906, 0.22815948486328125, 0.22893466186523437, 0.2281328582763672, 0.22798233032226561, 0.22857215881347656, 0.22788812255859375, 0.2276177978515625, 0.22820658874511718, 0.22790963745117188, 0.2278912353515625, 0.22779286193847656, 0.22789631652832032, 0.2286622772216797, 0.2280079345703125, 0.22793728637695312, 0.22807142639160155, 0.22804888916015625, 0.2280335388183594, 0.22812159729003906, 0.2278830108642578, 0.2276884460449219, 0.22792807006835938, 0.22778675842285157, 0.22810009765625, 0.22793215942382813, 0.22936473083496095, 0.23167283630371094, 0.22877081298828125, 0.22863258361816408, 0.22877593994140624, 0.22783282470703126, 0.2277181396484375, 0.22791270446777342, 0.22783183288574219, 0.22780720520019532, 0.22802841186523437, 0.2279536590576172, 0.2279619140625, 0.22795872497558595, 0.22792909240722656, 0.22810009765625, 0.22938214111328126, 0.22828031921386718, 0.228242431640625, 0.2287636413574219, 0.22942311096191406, 0.22908114624023437, 0.22857618713378905, 0.22855372619628905, 0.22922035217285155, 0.2285117492675781, 0.4789893188476563, 0.22813900756835936, 0.22828134155273438, 0.22873292541503906, 0.2284707794189453, 0.22896640014648437, 0.22858956909179687, 0.2285096893310547, 0.22846669006347656, 0.22824858093261718, 0.2286878662109375, 0.2290083770751953, 0.22912716674804687, 0.22828851318359375, 0.22853836059570312, 0.228384765625, 0.22901145935058595, 0.2284523468017578, 0.22929306030273439, 0.22884352111816406, 0.2288916473388672, 0.22799874877929688, 0.22763005065917968, 0.2294476776123047, 0.22789324951171874, 0.22849740600585938, 0.2280437774658203, 0.22870835876464843, 0.228890625, 0.228600830078125, 0.22893466186523437, 0.22901248168945312, 0.22887936401367187, 0.22905445861816406, 0.2291025848388672, 0.22861824035644532, 0.2289040069580078, 0.22949267578125, 0.22833255004882813, 0.22794650268554686, 0.2277928924560547, 0.2285117492675781, 0.2278656005859375, 0.22826905822753907, 0.22805914306640626, 0.22932992553710937, 0.2291025848388672, 0.2288046112060547, 0.22843597412109376, 0.22823526000976563, 0.2282977294921875, 0.22911488342285155, 0.22893772888183594, 0.2283100128173828, 0.22832127380371095, 0.22796287536621093, 0.22796389770507813, 0.22786151123046874, 0.22802024841308594, 0.22792703247070312, 0.2290247344970703, 0.2290872344970703, 0.2293729248046875, 0.48112127685546874, 0.22774476623535156, 0.22767205810546876, 0.22760140991210936, 0.22770790100097657, 0.228706298828125, 0.22839602661132813, 0.22828953552246095, 0.22751437377929687, 0.22827008056640624, 0.22855577087402343, 0.22760858154296876, 0.22853733825683595, 0.2290882568359375, 0.22853427124023437, 0.22787174987792969, 0.22959922790527343, 0.22843597412109376, 0.22793830871582033, 0.22853427124023437, 0.22780621337890625, 0.22782566833496093, 0.2280755157470703, 0.22808781433105468, 0.22815948486328125, 0.22787686157226564, 0.22864691162109374, 0.2278481903076172, 0.22766387939453125, 0.22787174987792969, 0.22767514038085937, 0.22795161437988282, 0.22794650268554686, 0.22880563354492187, 0.22775808715820312, 0.22767514038085937, 0.22767718505859375, 0.2277734375, 0.22772122192382813, 0.22761984252929687, 0.2289653778076172, 0.22932582092285156, 0.2285506591796875, 0.22796083068847656, 0.2276986846923828, 0.22787583923339844, 0.2282608642578125, 0.2280263671875, 0.22794137573242187, 0.2278461456298828, 0.22779391479492186, 0.22773248291015624, 0.2276259765625, 0.22793215942382813, 0.22887014770507813, 0.2283756103515625, 0.22759724426269531, 0.22863462829589845, 0.22802841186523437, 0.228279296875, 0.22952755737304686, 0.22824755859375, 0.2283223114013672, 0.4810905456542969, 0.22825372314453124, 0.22767202758789062, 0.22804582214355468, 0.22862745666503906, 0.227810302734375, 0.22774989318847655, 0.22795980834960938, 0.22772940063476563, 0.22866943359375, 0.22906982421875, 0.2284390411376953, 0.22901657104492187, 0.22875852966308594, 0.22855270385742188, 0.22873805236816405, 0.22831513977050782, 0.22964530944824219, 0.2282782745361328, 0.228890625, 0.22893772888183594, 0.22941389465332032, 0.22863871765136717, 0.22856192016601562, 0.2286868438720703, 0.22785536193847655, 0.22773554992675782, 0.22780723571777345, 0.22774887084960938, 0.22783795166015625, 0.2286028747558594, 0.2292316131591797, 0.22855474853515625, 0.2284277801513672, 0.22815129089355468, 0.22762086486816407, 0.22760345458984374, 0.2294599609375, 0.228251708984375, 0.22796998596191406, 0.22916300964355468, 0.22799667358398437, 0.22786355590820312, 0.2276986846923828, 0.22783282470703126, 0.22780621337890625, 0.22774681091308593, 0.22811955261230468, 0.22787890625, 0.22774374389648439, 0.2275000305175781, 0.22767001342773438, 0.2276884460449219, 0.22773554992675782, 0.2278778839111328, 0.22794650268554686, 0.22774476623535156, 0.22773452758789062, 0.22781234741210937, 0.22782975769042968, 0.22782054138183594, 0.22786151123046874, 0.22886604309082031, 0.4816783447265625, 0.22901145935058595, 0.22866943359375, 0.2290145263671875, 0.22938316345214843, 0.22859571838378906, 0.22785023498535156, 0.22841139221191406, 0.22864895629882812, 0.2287615966796875, 0.22899302673339844, 0.2279720916748047, 0.2287073211669922, 0.22850355529785157, 0.22853631591796875, 0.22866021728515626, 0.22911077880859376, 0.22943026733398436, 0.22803558349609376, 0.22808781433105468, 0.2292725830078125, 0.22914457702636717, 0.22839295959472655, 0.22797314453125, 0.22791267395019532, 0.22795161437988282, 0.22988902282714843, 0.2280447998046875, 0.22778163146972658, 0.2276444091796875, 0.2277928924560547, 0.2285015106201172, 0.22870835876464843, 0.22830181884765624, 0.22769766235351563, 0.22775193786621092, 0.22775091552734375, 0.22768333435058594, 0.22769664001464843, 0.22772633361816405, 0.22788607788085938, 0.22782054138183594, 0.22759837341308595, 0.22872572326660157, 0.2291568603515625, 0.22867762756347657, 0.228094970703125, 0.2280990753173828, 0.22783795166015625, 0.2277724151611328, 0.22763827514648438, 0.2278164520263672, 0.22774681091308593, 0.2276433868408203, 0.2277181396484375, 0.22805708312988282, 0.2277232666015625, 0.228068359375, 0.2278154296875, 0.2276864013671875, 0.22811546325683593, 0.22927462768554688, 0.2289459228515625]",tokens/s,4.311005041891343,,,main,False,False,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4144.504832,15760.621568,0.0,15114.174464,14045.205504,s,10,15.963228881835938,1.5963228881835938,0.0014829914244840832,1.5958744506835938,1.5984191040039064,1.5985000183105469,1.5985647497558595,"[1.595896728515625, 1.598401123046875, 1.5945810546875, 1.5952257080078125, 1.5950601806640625, 1.5945120849609375, 1.5958521728515624, 1.5975928955078125, 1.5975260009765626, 1.5985809326171876]",tokens/s,160.36855820021125,kWh,1.8825581471125285e-05,1.0316417645935872e-05,8.981001629239093e-05,0.00011895201540945208,tokens/kWh,2152128.310889114,MB,4144.504832,15760.621568,0.0,15114.174464,14169.857024,s,10,928.8058671874999,92.88058671875,0.004527086516632578,92.879125,92.88628203124999,92.886953515625,92.887490703125,"[92.8758671875, 92.88065625, 92.887625, 92.8848203125, 92.8846328125, 92.875515625, 92.87759375, 92.8768203125, 92.876203125, 92.8861328125]",tokens/s,0.6782902889144009,kWh,0.0010964920105205644,0.0006009745227833628,0.005288325397323612,0.006985791930627541,tokens/kWh,9018.304671198623,,s,629,941.6992608642577,1.4971371396888042,0.1896008283998211,1.4742733154296874,1.4748571533203125,1.4750364013671875,3.06951080078125,"[1.473944580078125, 1.47329638671875, 1.4738431396484375, 1.47358203125, 1.47386572265625, 1.4748426513671875, 1.47376123046875, 1.4739620361328125, 1.47388720703125, 1.47437158203125, 1.474060302734375, 1.4738883056640626, 1.4746326904296876, 1.473596435546875, 1.4739219970703126, 1.474044921875, 1.474198486328125, 1.4741043701171874, 1.4745589599609374, 1.4744586181640624, 1.4741165771484375, 1.4748856201171876, 1.474040771484375, 1.473933349609375, 1.474250732421875, 1.4747626953125, 1.4741483154296875, 1.4741063232421876, 1.47391796875, 1.47371826171875, 1.4743121337890626, 1.4736240234375, 1.474345947265625, 1.4741422119140626, 1.47420263671875, 1.473943603515625, 1.474134033203125, 1.474208740234375, 1.473671142578125, 1.474809814453125, 1.473850341796875, 1.47422314453125, 1.4740899658203126, 1.4746153564453126, 1.474150390625, 1.474164794921875, 1.474740234375, 1.4742220458984374, 1.474397216796875, 1.47470947265625, 1.4741094970703126, 1.474333740234375, 1.4749337158203124, 1.4750013427734374, 1.4743634033203126, 1.47471875, 1.4743265380859376, 1.4740859375, 1.4744422607421874, 1.4750106201171875, 1.4745364990234375, 1.4744473876953126, 3.072501708984375, 1.4742149658203125, 1.47382275390625, 1.4743377685546875, 1.4739732666015626, 1.4742138671875, 1.473607666015625, 1.473517578125, 1.4735635986328126, 1.4740213623046876, 1.47407568359375, 1.474303955078125, 1.474218017578125, 1.4742906494140624, 1.4739681396484374, 1.4741094970703126, 1.47432958984375, 1.47487744140625, 1.474449462890625, 1.474229248046875, 1.474135009765625, 1.474888671875, 1.4737879638671876, 1.4744893798828125, 1.4746142578125, 1.474745361328125, 1.4743009033203125, 1.4740203857421874, 1.4740869140625, 1.4739189453125, 1.474797607421875, 1.4739200439453124, 1.474556884765625, 1.4741739501953126, 1.4747054443359375, 1.4741309814453125, 1.4740633544921875, 1.4746910400390625, 1.4741361083984375, 1.4743233642578124, 1.474419677734375, 1.4740213623046876, 1.47390673828125, 1.4743306884765626, 1.4746715087890625, 1.474198486328125, 1.474620361328125, 1.47437255859375, 1.4746910400390625, 1.474514892578125, 1.4745426025390624, 1.47401220703125, 1.4740531005859374, 1.47437158203125, 1.475441650390625, 1.4746439208984374, 1.474841552734375, 1.47451904296875, 1.474598876953125, 1.474634765625, 1.4747381591796875, 1.4739384765625, 1.473933349609375, 3.0693232421875, 1.473892333984375, 1.474171875, 1.4738431396484375, 1.474007080078125, 1.4742384033203124, 1.47473095703125, 1.4749234619140625, 1.473754150390625, 1.4738544921875, 1.474039794921875, 1.47367529296875, 1.47413916015625, 1.47485693359375, 1.4745743408203125, 1.474783203125, 1.475303466796875, 1.474924560546875, 1.4741279296875, 1.4751273193359375, 1.4750792236328125, 1.474249755859375, 1.4747965087890624, 1.474628662109375, 1.4746798095703124, 1.47464599609375, 1.475072021484375, 1.4749490966796874, 1.474650146484375, 1.474839599609375, 1.4745999755859376, 1.47433984375, 1.474587646484375, 1.4739671630859374, 1.47449853515625, 1.47369580078125, 1.4738216552734376, 1.4743746337890624, 1.474156494140625, 1.47430712890625, 1.4742518310546875, 1.474651123046875, 1.4738739013671875, 1.474051025390625, 1.4744114990234376, 1.474198486328125, 1.474423828125, 1.4742803955078125, 1.4740162353515625, 1.473574951171875, 1.4743848876953125, 1.4738145751953124, 1.47405419921875, 1.4739793701171875, 1.475199951171875, 1.4745067138671875, 1.4744473876953126, 1.4743388671875, 1.474714599609375, 1.4745006103515625, 1.4744422607421874, 1.4752808837890625, 1.474572265625, 3.06950341796875, 1.4739578857421876, 1.4735145263671876, 1.4739886474609376, 1.4746470947265624, 1.4735728759765625, 1.4737674560546874, 1.4742681884765625, 1.4737213134765625, 1.47318994140625, 1.47401318359375, 1.47386474609375, 1.47394970703125, 1.4739599609375, 1.4740623779296875, 1.474145263671875, 1.4742425537109376, 1.4741483154296875, 1.4742333984375, 1.4738831787109374, 1.474155517578125, 1.4743111572265626, 1.474144287109375, 1.4746112060546874, 1.4750372314453124, 1.4743541259765625, 1.4747115478515624, 1.4738729248046876, 1.474272216796875, 1.47419140625, 1.4745753173828124, 1.4750064697265626, 1.47466650390625, 1.4748436279296875, 1.474193359375, 1.4741329345703125, 1.4744525146484375, 1.4744913330078124, 1.4748323974609374, 1.4744207763671875, 1.4753935546875, 1.4751805419921875, 1.474482177734375, 1.4740582275390626, 1.4748548583984376, 1.474802734375, 1.4744371337890625, 1.4744453125, 1.4744443359375, 1.4745057373046875, 1.4743941650390624, 1.4749481201171875, 1.47416162109375, 1.47437255859375, 1.4748753662109375, 1.474218994140625, 1.47468798828125, 1.4740306396484375, 1.4754232177734374, 1.474871337890625, 1.4750535888671874, 1.474193359375, 1.47433984375, 3.0700166015625, 1.474471923828125, 1.4742957763671876, 1.474418701171875, 1.4738094482421875, 1.473839111328125, 1.4741832275390625, 1.47508935546875, 1.474249755859375, 1.474166748046875, 1.4742117919921875, 1.47376953125, 1.474102294921875, 1.4737879638671876, 1.4748671875, 1.4747493896484376, 1.474883544921875, 1.4746419677734375, 1.4745159912109376, 1.4746552734375, 1.474293701171875, 1.4748323974609374, 1.4744105224609374, 1.474787353515625, 1.47443408203125, 1.4740316162109375, 1.4738883056640626, 1.47376953125, 1.47512939453125, 1.47419140625, 1.4745528564453125, 1.4741678466796875, 1.47439208984375, 1.4739793701171875, 1.4745281982421874, 1.47403466796875, 1.4748272705078125, 1.474185302734375, 1.4739229736328125, 1.4739056396484376, 1.473829833984375, 1.474093017578125, 1.4740869140625, 1.474112548828125, 1.4749757080078125, 1.4745753173828124, 1.4741094970703126, 1.4738267822265625, 1.4748529052734376, 1.4741513671875, 1.473798095703125, 1.47447607421875, 1.474387939453125, 1.4741156005859375, 1.4746163330078126, 1.474620361328125, 1.4744791259765626, 1.4744678955078125, 1.4747166748046876, 1.4750802001953125, 1.4745999755859376, 1.4739814453125, 1.4747626953125, 3.069513671875, 1.47412890625, 1.4743746337890624, 1.4740838623046875, 1.4746378173828125, 1.4743603515625, 1.4743265380859376, 1.4740521240234374, 1.474298828125, 1.474333740234375, 1.47361279296875, 1.4742005615234375, 1.4735267333984374, 1.473796142578125, 1.4742364501953125, 1.47390869140625, 1.473765380859375, 1.4739835205078125, 1.4745712890625, 1.4740101318359375, 1.4739835205078125, 1.4743961181640626, 1.474408447265625, 1.4742752685546876, 1.4744114990234376, 1.4744586181640624, 1.4740623779296875, 1.4737520751953126, 1.47413916015625, 1.4744801025390626, 1.4740203857421874, 1.474050048828125, 1.47420361328125, 1.4737581787109375, 1.4745753173828124, 1.474017333984375, 1.474734130859375, 1.47439208984375, 1.4745015869140625, 1.4748160400390624, 1.474017333984375, 1.473838134765625, 1.4741207275390624, 1.473882080078125, 1.4738841552734374, 1.47435107421875, 1.4739732666015626, 1.4739046630859376, 1.4738043212890626, 1.4740582275390626, 1.4742333984375, 1.4741370849609374, 1.4745313720703126, 1.47392822265625, 1.4746859130859375, 1.4744012451171875, 1.4736475830078124, 1.4745435791015624, 1.4739937744140625, 1.4749501953125, 1.4744842529296875, 1.474662353515625, 1.474905029296875, 3.070246826171875, 1.4750576171875, 1.4738739013671875, 1.4734581298828124, 1.4737919921875, 1.474293701171875, 1.4743428955078124, 1.47382275390625, 1.47458154296875, 1.47357080078125, 1.474060302734375, 1.4740726318359374, 1.4753177490234375, 1.473629150390625, 1.4743223876953124, 1.4743489990234375, 1.473955810546875, 1.4739876708984374, 1.4747186279296876, 1.4738134765625, 1.4743214111328125, 1.4742047119140624, 1.4742978515625, 1.4737418212890625, 1.4734571533203125, 1.473523681640625, 1.473606689453125, 1.473850341796875, 1.4743746337890624, 1.4742579345703124, 1.4741832275390625, 1.474040771484375, 1.4740684814453124, 1.4739732666015626, 1.4740592041015625, 1.474809814453125, 1.4747279052734374, 1.4743153076171875, 1.4744268798828124, 1.474154541015625, 1.4741319580078125, 1.473996826171875, 1.4746644287109374, 1.4744012451171875, 1.4747647705078124, 1.474788330078125, 1.4744166259765625, 1.474029541015625, 1.4741083984375, 1.4744698486328125, 1.47449853515625, 1.473933349609375, 1.475099609375, 1.4742476806640625, 1.474344970703125, 1.47441455078125, 1.47498388671875, 1.4740675048828125, 1.4742537841796874, 1.4744873046875, 1.474292724609375, 1.47422509765625, 1.4751826171875, 3.072003173828125, 1.47430908203125, 1.4742056884765624, 1.474298828125, 1.4740029296875, 1.47441357421875, 1.4743634033203126, 1.473666015625, 1.4739056396484376, 1.4738585205078125, 1.4737633056640624, 1.47390771484375, 1.474503662109375, 1.4742467041015626, 1.47406640625, 1.4739844970703124, 1.474124755859375, 1.4739844970703124, 1.47361279296875, 1.474107421875, 1.473881103515625, 1.47378076171875, 1.474008056640625, 1.4745169677734375, 1.474460693359375, 1.474466796875, 1.4740469970703125, 1.4737305908203124, 1.47407666015625, 1.4743223876953124, 1.4743223876953124, 1.4740029296875, 1.4749521484375, 1.474461669921875, 1.4740694580078124, 1.4741329345703125, 1.47427734375, 1.474609130859375, 1.4740633544921875, 1.474460693359375, 1.474523193359375, 1.4746634521484374, 1.4743521728515625, 1.4738759765625, 1.4742733154296874, 1.4747269287109375, 1.474681884765625, 1.474302001953125, 1.4744627685546876, 1.4744678955078125, 1.4744381103515625, 1.474345947265625, 1.4741944580078126, 1.4742548828125, 1.4745078125, 1.4744801025390626, 1.47441455078125, 1.47418115234375, 1.4743223876953124, 1.4743634033203126, 1.4744627685546876, 1.4740469970703125, 1.4741370849609374, 3.0712216796875, 1.4734315185546876, 1.4745855712890625, 1.47361279296875, 1.473840087890625, 1.4741380615234374, 1.4739364013671874, 1.4736414794921875, 1.473987548828125, 1.4741053466796874, 1.4739149169921875, 1.47364453125, 1.4739620361328125, 1.47382373046875, 1.473976318359375, 1.474334716796875, 1.4739261474609375, 1.473628173828125, 1.4737264404296875, 1.4739302978515625, 1.4734212646484375, 1.4736036376953126, 1.4741319580078125, 1.47426513671875, 1.474302978515625, 1.4739517822265624, 1.4746705322265625, 1.47380126953125, 1.4740101318359375, 1.4745528564453125, 1.4744248046875, 1.47426513671875, 1.4744791259765626, 1.4744033203125, 1.4740244140625, 1.474103271484375, 1.4746705322265625, 1.4742518310546875, 1.474093017578125, 1.474193359375, 1.4737838134765624, 1.473976318359375, 1.474071533203125, 1.47503515625, 1.4746429443359375, 1.4743326416015625, 1.4748948974609375, 1.473871826171875, 1.4741412353515626, 1.474756591796875, 1.474630615234375, 1.474577392578125, 1.474472900390625, 1.474883544921875, 1.4748580322265625, 1.47447705078125, 1.4747811279296874, 1.47460400390625, 1.4745589599609374, 1.4746265869140625, 1.4749808349609375, 1.4749000244140624, 1.474957275390625, 3.07293701171875, 1.4741534423828124, 1.473850341796875, 1.474186279296875, 1.4741903076171874, 1.47452001953125, 1.4740736083984376, 1.474038818359375, 1.474935791015625, 1.4743348388671875, 1.473924072265625, 1.4737950439453125, 1.474008056640625, 1.4743111572265626, 1.474093017578125, 1.4743223876953124, 1.4741851806640625, 1.4741422119140626, 1.4739793701171875, 1.474102294921875, 1.4738380126953126, 1.4738585205078125, 1.4744945068359374, 1.4743634033203126, 1.474292724609375, 1.4741781005859376, 1.4750125732421875, 1.4740316162109375, 1.4740899658203126, 1.473870849609375, 1.47460302734375, 1.4747423095703125, 1.4751129150390625, 1.4748599853515625, 1.474618408203125, 1.475168212890625, 1.4755379638671875, 1.4745087890625, 1.474472900390625, 1.474514892578125, 1.474840576171875, 1.47443603515625, 1.4743101806640626, 1.4746552734375, 1.4748037109375, 1.4745220947265625, 1.4747740478515625, 1.4746317138671876, 1.4742313232421875, 1.4737684326171876, 1.4743951416015626, 1.474081787109375, 1.4740859375, 1.47426611328125, 1.474807861328125, 1.47405615234375, 1.4741063232421876, 1.474302978515625, 1.4747801513671874, 1.4749112548828125, 1.4753310546875, 1.4741790771484375, 1.4742220458984374]",tokens/s,0.6679414821062154,,,,,,,, 4bit-awq-gemm-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1368.223744,6256.328704,0.0,5609.8816,5292.617728,s,10,5.657606628417969,0.5657606628417968,0.0009498936514595362,0.5654808349609375,0.5660143920898437,0.5672829833984374,0.5682978564453125,"[0.5685515747070312, 0.5654931030273438, 0.565177734375, 0.5654685668945313, 0.5652743530273437, 0.5651702880859375, 0.5656128540039063, 0.5657227172851562, 0.5657324829101562, 0.5654029541015625]",tokens/s,452.48815764977473,kWh,6.682694372203615e-06,3.660675940348786e-06,3.1253065125888854e-05,4.159643543844126e-05,tokens/kWh,6154373.50103846,MB,1368.551424,6256.328704,0.0,5609.8816,5503.949312,s,10,330.81899609375,33.081899609375,0.006904428548072782,33.079876953125,33.091131640625,33.0930072265625,33.0945076953125,"[33.07798046875, 33.078234375, 33.07119921875, 33.0948828125, 33.09071484375, 33.08729296875, 33.08378515625, 33.07924609375, 33.0805078125, 33.07515234375]",tokens/s,1.9043646448327465,kWh,0.0003903993130889204,0.00021397087349180462,0.001808246989806311,0.0024126171763870358,tokens/kWh,26112.721328770578,,s,629,335.3824949340819,0.5331995149985406,0.06720644230645373,0.5250816040039062,0.52554873046875,0.52574228515625,1.0904419140625001,"[0.5248307495117187, 0.5246986083984375, 0.5245040893554688, 0.524674072265625, 0.52523828125, 0.5248563232421875, 0.5253027954101562, 0.5246597290039062, 0.5248173828125, 0.5247999877929688, 0.5255188598632813, 0.5246453247070313, 0.5245347900390624, 0.5246586303710937, 0.525032470703125, 0.5246986083984375, 0.5246525268554687, 0.5253273315429687, 0.5247651977539063, 0.5249915161132812, 0.5252413330078125, 0.5252423706054687, 0.5247785034179687, 0.5247273559570312, 0.5247057495117188, 0.5248440551757813, 0.5247232055664063, 0.5247467651367187, 0.5255465087890625, 0.5256212768554688, 0.5258025512695312, 0.5255167846679687, 0.5248409423828125, 0.5254369506835938, 0.5253928833007813, 0.5247928466796875, 0.5255188598632813, 0.5249791870117188, 0.5249310913085937, 0.5250969848632813, 0.5247672119140625, 0.5248972778320312, 0.5248655395507813, 0.5252689819335937, 0.5248040771484375, 0.525211669921875, 0.5255167846679687, 0.525169677734375, 0.5251573486328125, 0.524832763671875, 0.5248081665039063, 0.5248256225585938, 0.5252925415039063, 0.5251204833984375, 0.525022216796875, 0.5254482421875, 0.525276123046875, 0.5254348754882813, 0.5250242309570312, 0.5254471435546875, 0.5251328125, 0.5251553344726563, 1.0913822021484374, 0.5247088623046875, 0.52463818359375, 0.5248921508789063, 0.5250447387695313, 0.52459423828125, 0.5247610473632812, 0.524621826171875, 0.5250303955078125, 0.5247098999023437, 0.5248706665039062, 0.52496484375, 0.524769287109375, 0.525318115234375, 0.5248880615234375, 0.5248450317382812, 0.5251092529296875, 0.5249443969726563, 0.5248491821289063, 0.5246842651367187, 0.524643310546875, 0.5247713012695312, 0.5255167846679687, 0.5258618774414062, 0.5249320678710937, 0.5256673583984375, 0.5253673095703125, 0.525570068359375, 0.5251849975585937, 0.5251604614257812, 0.5255874633789063, 0.5250549926757813, 0.5249617309570312, 0.5247528686523437, 0.5250518798828125, 0.525127685546875, 0.5249607543945313, 0.5249474487304687, 0.525033447265625, 0.5248983154296875, 0.5250068359375, 0.52510205078125, 0.5252761840820312, 0.5249771728515625, 0.5250181274414063, 0.5249228515625, 0.5253406982421875, 0.5251481323242188, 0.5250140380859375, 0.5252628784179687, 0.525149169921875, 0.5251195068359376, 0.5251184692382812, 0.5249095458984375, 0.5248972778320312, 0.5250303955078125, 0.525391845703125, 0.524969970703125, 0.5247293701171875, 0.5249556274414062, 0.5251163940429687, 0.5255157470703125, 0.5251348266601562, 1.090335693359375, 0.524874755859375, 0.5245787963867188, 0.524506103515625, 0.5248809204101562, 0.5247160034179688, 0.5246280517578125, 0.5246934204101562, 0.5246658325195312, 0.5245573120117187, 0.5246976928710938, 0.5247764282226562, 0.524717041015625, 0.5248132934570312, 0.52461669921875, 0.52514306640625, 0.525487060546875, 0.5247119140625, 0.524663818359375, 0.524600341796875, 0.524516357421875, 0.524632080078125, 0.5244651489257812, 0.5252577514648438, 0.5246760864257812, 0.524959716796875, 0.5246556396484375, 0.5247354736328125, 0.5250734252929687, 0.5248143310546876, 0.5247682495117187, 0.5251287231445313, 0.5246996459960938, 0.5248102416992187, 0.5250416870117187, 0.5254154052734376, 0.5251204833984375, 0.524943359375, 0.5247672119140625, 0.5249054565429687, 0.5247723388671875, 0.5249924926757813, 0.5250969848632813, 0.5248839721679688, 0.524780517578125, 0.5248870239257812, 0.5249146728515625, 0.525117431640625, 0.5246310424804688, 0.5249474487304687, 0.5247897338867188, 0.52493310546875, 0.5250109252929688, 0.5253570556640625, 0.5257471923828125, 0.5256693725585937, 0.52505908203125, 0.5255249633789062, 0.5257697143554687, 0.5254031372070312, 0.5251993408203125, 0.5257103271484375, 0.5251307373046875, 1.090680908203125, 0.525180908203125, 0.524788818359375, 0.5252505493164062, 0.5254584350585938, 0.5254379272460937, 0.52552294921875, 0.5255587768554687, 0.525601806640625, 0.5258516235351562, 0.5258157958984375, 0.5254614868164063, 0.5251604614257812, 0.5255485229492187, 0.5253007202148438, 0.5253621826171875, 0.5256417236328125, 0.525749267578125, 0.5251840209960937, 0.5252259521484375, 0.5253324584960938, 0.5253058471679688, 0.52491162109375, 0.5250846557617187, 0.525254638671875, 0.525391845703125, 0.52520654296875, 0.5254430541992188, 0.5258055419921875, 0.525138916015625, 0.5251840209960937, 0.5249403076171875, 0.525707275390625, 0.5251296997070313, 0.52486962890625, 0.5257564086914063, 0.5252894897460938, 0.5254993896484375, 0.5254983520507812, 0.5253345336914063, 0.5253857421875, 0.5251768188476562, 0.52502734375, 0.5253447875976562, 0.5251942138671875, 0.52514306640625, 0.5253836669921875, 0.5265366821289063, 0.5247119140625, 0.5249392700195312, 0.5253396606445313, 0.5246771240234375, 0.5253150634765625, 0.5247682495117187, 0.5253621826171875, 0.5255966796875, 0.5253765258789063, 0.5253641967773437, 0.5249669189453126, 0.5252526245117187, 0.5254635009765625, 0.5250498657226562, 0.5251553344726563, 1.090404296875, 0.524969970703125, 0.5259376831054687, 0.5252003784179687, 0.5258106689453125, 0.5252955932617187, 0.5250447387695313, 0.525154296875, 0.5248737182617188, 0.5253058471679688, 0.5248440551757813, 0.524874755859375, 0.5246546020507813, 0.5248102416992187, 0.5245665283203125, 0.5248348388671875, 0.5247498168945313, 0.5249761352539063, 0.5246576538085937, 0.5247181396484375, 0.5246954956054688, 0.5256632080078125, 0.5248604125976563, 0.524747802734375, 0.5248604125976563, 0.5250211791992188, 0.5254573974609374, 0.5250242309570312, 0.52548095703125, 0.5251154174804687, 0.5251461181640625, 0.5252608032226562, 0.52522607421875, 0.5251276245117188, 0.5253990478515626, 0.5256478881835938, 0.525464599609375, 0.5254779052734375, 0.5252710571289062, 0.5255720825195312, 0.5254932250976563, 0.52564892578125, 0.5251912231445313, 0.5251798095703125, 0.5252771606445312, 0.5259796752929687, 0.5252730712890625, 0.5259386596679687, 0.5251870727539063, 0.525286376953125, 0.52527001953125, 0.5250816040039062, 0.525275146484375, 0.52520654296875, 0.5252454833984375, 0.5256109619140625, 0.5253765258789063, 0.52531201171875, 0.5252474975585938, 0.5265930786132812, 0.525812744140625, 0.5261884765625, 0.5251481323242188, 1.09045654296875, 0.5245020141601563, 0.5246781616210937, 0.5249658813476562, 0.5249915161132812, 0.5245040893554688, 0.52481640625, 0.5248297119140625, 0.5247836303710938, 0.524885986328125, 0.5248921508789063, 0.52484814453125, 0.5248286743164062, 0.524632080078125, 0.5257512817382812, 0.524990478515625, 0.5250908203125, 0.5249392700195312, 0.5247129516601563, 0.5252608032226562, 0.5249863891601563, 0.5254144287109375, 0.524600341796875, 0.5249567260742187, 0.5247467041015625, 0.5249075317382812, 0.5252260131835937, 0.5248870239257812, 0.5250662231445312, 0.5250048828125, 0.5253692626953125, 0.5249249267578125, 0.524853271484375, 0.5260421142578126, 0.5258383178710937, 0.5250867309570313, 0.5253775634765625, 0.524843017578125, 0.525390869140625, 0.5253294067382812, 0.5256530151367188, 0.5254686889648438, 0.525365234375, 0.525201416015625, 0.5253119506835937, 0.5253365478515625, 0.5253560180664063, 0.5255802612304687, 0.5252782592773437, 0.5253252563476563, 0.5253990478515626, 0.52508056640625, 0.5262468872070313, 0.5258137817382813, 0.525549560546875, 0.5252843627929688, 0.5254717407226562, 0.525453369140625, 0.5256119384765625, 0.5255628662109375, 0.5259458618164062, 0.5253795776367187, 0.52560693359375, 1.0914058837890626, 0.5248624877929687, 0.5247764282226562, 0.5250928344726562, 0.5249915161132812, 0.5249924926757813, 0.524921875, 0.5255986938476562, 0.525233154296875, 0.5249976196289062, 0.525106201171875, 0.525254638671875, 0.5254993896484375, 0.52495361328125, 0.5254133911132812, 0.5256365966796875, 0.5252393188476563, 0.5251287231445313, 0.5250734252929687, 0.5250263061523438, 0.5252188110351562, 0.5251512451171875, 0.5254318237304687, 0.5252034301757813, 0.5248993530273437, 0.5250560302734375, 0.5257349243164062, 0.5247313842773438, 0.5247518920898437, 0.5246392211914063, 0.5248256225585938, 0.5250816040039062, 0.5247979736328126, 0.5251195068359376, 0.5250089111328125, 0.525053955078125, 0.5249034423828125, 0.5249392700195312, 0.525000732421875, 0.5251604614257812, 0.5249515380859375, 0.5250344848632813, 0.5250201416015625, 0.524843017578125, 0.5250570068359375, 0.525391845703125, 0.5252781982421875, 0.5252321166992188, 0.5249515380859375, 0.5251287231445313, 0.5253140258789063, 0.524959716796875, 0.5254113159179687, 0.5252976684570313, 0.5251635131835938, 0.525433837890625, 0.5251604614257812, 0.5252894897460938, 0.5252474975585938, 0.5253939208984375, 0.5253867797851562, 0.5253079223632813, 0.525212646484375, 1.0918123779296875, 0.5249392700195312, 0.5248511962890625, 0.5250714111328125, 0.525445068359375, 0.5252464599609376, 0.5249423217773438, 0.5251881103515625, 0.5252290649414062, 0.5253109741210937, 0.5252495727539063, 0.524802001953125, 0.52481640625, 0.5247365112304687, 0.5250303955078125, 0.5251235961914062, 0.5250078735351562, 0.5248880615234375, 0.5249197998046875, 0.5250938720703126, 0.525497314453125, 0.5250078735351562, 0.5247528686523437, 0.5247733764648438, 0.524821533203125, 0.524938232421875, 0.5251942138671875, 0.5249832763671874, 0.5250949096679688, 0.5249238891601562, 0.524906494140625, 0.5249658813476562, 0.5247979736328126, 0.5248522338867188, 0.52481640625, 0.52491162109375, 0.5247928466796875, 0.5249320678710937, 0.5248573608398438, 0.5247938842773437, 0.5249791870117188, 0.52478466796875, 0.5247897338867188, 0.5247600708007812, 0.52487890625, 0.5249884033203125, 0.5252925415039063, 0.5250416870117187, 0.5249464111328125, 0.5251000366210937, 0.5250816040039062, 0.5249525756835938, 0.5252413940429688, 0.5255003662109375, 0.5252474975585938, 0.5254031372070312, 0.5251502075195312, 0.5256693725585937, 0.5256007690429687, 0.5253478393554688, 0.5253816528320312, 0.525201416015625, 0.5253990478515626, 1.0916168212890625, 0.52502734375, 0.5249310913085937, 0.524822509765625, 0.5248081665039063, 0.5246361694335937, 0.5247744140625, 0.5250693359375, 0.5248952026367187, 0.5249024047851563, 0.5249915161132812, 0.5248173828125, 0.525412353515625, 0.5252208862304687, 0.5250160522460937, 0.524969970703125, 0.525085693359375, 0.5248706665039062, 0.5251307373046875, 0.5251522827148437, 0.5256038208007813, 0.5248409423828125, 0.5249832763671874, 0.5250211791992188, 0.524906494140625, 0.524864501953125, 0.52510107421875, 0.5247754516601563, 0.5251942138671875, 0.5250908203125, 0.5250857543945312, 0.5254880981445312, 0.5257154541015625, 0.5253683471679688, 0.5251204833984375, 0.5250826416015625, 0.525117431640625, 0.525169677734375, 0.5254266967773438, 0.525581298828125, 0.5249771728515625, 0.5250396118164062, 0.5251522827148437, 0.5250263061523438, 0.5251163940429687, 0.5250775146484375, 0.5251696166992188, 0.5251348266601562, 0.5248726806640625, 0.5248040771484375, 0.525322265625, 0.5251829833984375, 0.5251051635742188, 0.5248706665039062, 0.5250303955078125, 0.5250109252929688, 0.5251963500976562, 0.5250313720703125, 0.5255403442382812, 0.5248737182617188, 0.5250242309570312, 0.5249843139648438, 0.5250836181640625, 1.0909224853515624, 0.5246965942382813, 0.5248829345703125, 0.5249362182617188, 0.5247160034179688, 0.5250303955078125, 0.5250089111328125, 0.5251287231445313, 0.5249238891601562, 0.5249658813476562, 0.5249515380859375, 0.5246607055664062, 0.52463720703125, 0.525317138671875, 0.5247047729492188, 0.5247109375, 0.5245911254882812, 0.5245593872070312, 0.5245286254882813, 0.5245368041992188, 0.5244630737304687, 0.5245419311523437, 0.5247344360351562, 0.5246781616210937, 0.5250242309570312, 0.525285400390625, 0.5248081665039063, 0.5248297119140625, 0.5248153686523438, 0.5249310913085937, 0.5255239868164062, 0.5253621826171875, 0.5255106811523438, 0.5247969360351562, 0.52493115234375, 0.5251900634765625, 0.525365234375, 0.52534375, 0.52518603515625, 0.5249095458984375, 0.524705810546875, 0.5250477905273437, 0.5248071899414063, 0.5250426635742188, 0.5253519287109375, 0.52510205078125, 0.5249894409179687, 0.5251450805664063, 0.52478466796875, 0.5255065307617187, 0.5247191162109375, 0.5256325073242187, 0.5251266479492187, 0.5251030883789063, 0.5249208374023437, 0.5255485229492187, 0.525053955078125, 0.5254256591796875, 0.525159423828125, 0.525106201171875, 0.5251512451171875, 0.5250416870117187, 0.5253683471679688]",tokens/s,1.8754705731544732,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpodtbpat2/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa7b-26f026ea38819f413564c63e;fe8b807e-5250-4567-a8b9-8ed4e2fe38a5) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 976, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 866, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 583, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 411, in forward query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa29-787e900806192c8213850fc6;4f3bbb2a-de72-43cb-a202-0733e6f310a8) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 1034, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 274, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 672, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 84, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 1124, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 950, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 578, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 317, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpcdzp_90u/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa545-705ec7dc4cd12f24336f4d9d;5c1cee1f-944f-427e-a8bb-3306cbc75d56) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpmd0d_7j8/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9d2-6cea5b9e6e7329f8696f71af;249e196c-75ac-467f-9a04-33b9b349438e) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaace-4d4258fb7c9f944a6686853f;8c2985fb-9dcf-4258-a94c-a87ee9f4c7d4) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpwm_obutw/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 344, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpe7602us0/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 835, in forward inputs_embeds = self.project_in(inputs_embeds) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 291, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 416, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 319, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 1047, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 932, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 639, in forward hidden_states, self_attn_weights, present_key_value = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 446, in forward qkv_states = self.wqkv(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 327, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpek2o7wtm/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa89-09800c3f4c244d442c6f81a5;e15a8c85-5c56-4b39-8c50-d268f7196ce2) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa37-6311646a5c47c7bb5c64afcd;74ae4787-636d-4952-ab78-1b5535e8a752) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmppfgzvkn7/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa553-0c59137e0a74ecdf71618267;6116998d-12de-4f25-a1ad-ace342cb13c6) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp2q60bee7/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9e0-7cb1c6095de40d4d74f70121;53231438-7253-4643-8b86-7c0a4575b07d) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaadb-0269a15f0fb061c510273b88;4bed6e2f-8ace-40f2-a799-ba33dce191eb) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpbvzim6x7/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpz6s3yav_/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa4b-2763b8c34bf14a5178850de7;021cff04-caef-48f5-aafd-5c2a0aebe482) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9f7-0ecdb001106b067c7d916e76;5d387675-6b80-4a7c-b00d-fa0710b78b97) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa515-4cd9ee526e393e0b2c53f940;1c287a92-4eed-4b60-b1ed-c821656f6375) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa99c-1760a7a77cd8ee007be0f51e;b3893274-4098-4455-b131-b1af67f7075e) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaa9d-18c73fe40319df131108d89a;65834509-e2ea-4a4a-8802-05955ae3eca9) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-exllama-v1-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 461, in post_init_awq_exllama_modules model = exllama_post_init(model) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 144, in exllama_post_init submodule.post_init() File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllama.py"", line 77, in post_init self.q4 = exl_ext.make_q4( NameError: name 'exl_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,1,64,1 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 667, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 536, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 272, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 163, in forward qkv = self.qkv_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa44-5f302e853db5bb8f135149b5;bf92de86-fc4a-494b-9935-69d15aad2191) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 976, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 866, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 583, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 339, in forward query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9ef-446c812a6bb84e200264b4e9;60b8f8a0-ab75-49fe-a5fa-0a14c6516974) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 1034, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 274, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/transformers_v4_35_2__modeling_llama.py"", line 672, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/Deci/DeciLM-7B/c3c9f4226801dc0433f32aebffe0aac68ee2f051/modeling_decilm.py"", line 84, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 1124, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 950, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 578, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gptj/modeling_gptj.py"", line 224, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 667, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 536, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 272, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/codegen/modeling_codegen.py"", line 163, in forward qkv = self.qkv_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa50f-438d9f71258d9d1c056536e1;fab07f65-199e-4628-99dc-2a71d05c15a8) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa993-7bd2fc9d24fc39b05f415c50;6b5b352e-ab93-4509-8ee1-d7bf66379068) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaa97-73d091fc4f369e11602676a9;cabb7c40-abc4-4bdb-8451-8159f0672eeb) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 249, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 251, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 761, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 647, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 414, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/xglm/modeling_xglm.py"", line 244, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 835, in forward inputs_embeds = self.project_in(inputs_embeds) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 1117, in forward outputs = self.model.decoder( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 883, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 524, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/opt/modeling_opt.py"", line 154, in forward query_states = self.q_proj(hidden_states) * self.scaling File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 327, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 971, in forward transformer_outputs = self.transformer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 839, in forward outputs = block( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 566, in forward attn_outputs = self.attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 518, in forward return self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neo/modeling_gpt_neo.py"", line 260, in forward query = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 1047, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 932, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 639, in forward hidden_states, self_attn_weights, present_key_value = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 344, in forward qkv_states = self.wqkv(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 1031, in forward outputs = self.gpt_neox( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 922, in forward outputs = layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 687, in forward attention_layer_outputs = self.attention( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/gpt_neox/modeling_gpt_neox.py"", line 172, in forward qkv = self.query_key_value(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1625.034752,2254.962688,0.0,1608.515584,1463.6928,s,10,1.2100343170166015,0.12100343170166014,0.001022141281647672,0.12073452758789063,0.12208388671874999,0.1227883918762207,0.12335199600219726,"[0.1234928970336914, 0.12192733001708984, 0.12002118682861328, 0.12029216003417968, 0.12038220977783202, 0.1198985595703125, 0.1206871337890625, 0.12078192138671875, 0.12138086700439453, 0.12117005157470703]",tokens/s,2115.6424772413106,kWh,1.4169729851856437e-06,7.764314618920512e-07,6.3009111253815796e-06,8.494315572459274e-06,tokens/kWh,30137801.90013389,MB,1625.034752,2254.962688,0.0,1608.515584,1560.974848,s,10,70.266875,7.0266874999999995,0.015802312461115554,7.021189208984375,7.04873154296875,7.0518982421875,7.0544316015624995,"[7.05506494140625, 7.04802783203125, 7.0200771484375, 7.04010400390625, 7.03416455078125, 7.01334814453125, 7.0082919921875, 7.02230126953125, 7.0167861328125, 7.008708984375]",tokens/s,8.965817819562917,kWh,8.271868514320838e-05,4.533574606695635e-05,0.00036366391262401533,0.0004917183438341799,tokens/kWh,128122.12680282927,,s,629,71.24009573364258,0.11325929369418533,0.01441184935436087,0.111351806640625,0.11215667266845704,0.11239075622558593,0.2323562939453125,"[0.11357389068603516, 0.11349612426757813, 0.11200812530517579, 0.11255910491943359, 0.1121167984008789, 0.11198560333251953, 0.11219558715820313, 0.11200819396972657, 0.11196825408935547, 0.11275468444824219, 0.11222630310058594, 0.11180441284179687, 0.11134464263916016, 0.11147980499267578, 0.11213721466064454, 0.11178495788574219, 0.11166719818115234, 0.11174092864990234, 0.11117977905273438, 0.1110456314086914, 0.11137638092041016, 0.11204710388183593, 0.11143679809570313, 0.11129449462890625, 0.11121353912353515, 0.1112074203491211, 0.11121561431884766, 0.11156582641601563, 0.11124736022949219, 0.1111910400390625, 0.1112965087890625, 0.11185664367675781, 0.11158732604980469, 0.1118187484741211, 0.11172659301757812, 0.11204096221923829, 0.11242803192138671, 0.11206655883789063, 0.11170611572265625, 0.1124925765991211, 0.11240444946289062, 0.11224883270263672, 0.11220172882080078, 0.11224473571777344, 0.11245168304443359, 0.11209001922607421, 0.11190579223632813, 0.11188838195800781, 0.11227750396728516, 0.11232051086425782, 0.11256114959716797, 0.11237478637695313, 0.11200921630859376, 0.11215462493896484, 0.1119549789428711, 0.11200611114501953, 0.11207987213134765, 0.11194371032714844, 0.11197846221923828, 0.11205836486816406, 0.11220787048339843, 0.11210034942626954, 0.2345594940185547, 0.1117286376953125, 0.11186585235595703, 0.11204914855957031, 0.11177677154541016, 0.1115832290649414, 0.11116851043701172, 0.1114071044921875, 0.11131391906738282, 0.11137741088867187, 0.11209728240966797, 0.11197542572021485, 0.11190681457519532, 0.11151052856445312, 0.11113471984863281, 0.11245875549316406, 0.11208191680908203, 0.11218540954589844, 0.11216889953613281, 0.11212287902832031, 0.11191302490234376, 0.11190470123291016, 0.11167948913574219, 0.11192729949951172, 0.11203174591064453, 0.11200819396972657, 0.11200409698486329, 0.11204198455810546, 0.11203584289550782, 0.11207577514648437, 0.11203174591064453, 0.11206861114501954, 0.11190476989746094, 0.11196518707275391, 0.11201945495605468, 0.11220070648193359, 0.1121239013671875, 0.11213619232177735, 0.11214745330810547, 0.11210034942626954, 0.11117158508300781, 0.11123609924316406, 0.11129138946533203, 0.11250688171386719, 0.11131903839111328, 0.11113369750976562, 0.11244338989257813, 0.11134259033203125, 0.11169075012207032, 0.11205427551269531, 0.11214745330810547, 0.11191705322265624, 0.11207884979248046, 0.11197849273681641, 0.11211673736572265, 0.11211161804199218, 0.11191500854492188, 0.11222118377685547, 0.11239730834960937, 0.11132415771484375, 0.11126681518554687, 0.1112985610961914, 0.11182182312011718, 0.23266201782226562, 0.11202662658691406, 0.11120025634765625, 0.11109683227539062, 0.11149005126953125, 0.11119513702392578, 0.11112754821777343, 0.11123916625976563, 0.11105177307128906, 0.1111900177001953, 0.11129036712646484, 0.11116851043701172, 0.11114803314208985, 0.11196927642822266, 0.11116134643554687, 0.11125965118408203, 0.11103539276123046, 0.11127398681640625, 0.11154434967041016, 0.11195798492431641, 0.11191193389892579, 0.1111377944946289, 0.1111562271118164, 0.11188735961914062, 0.11112754821777343, 0.11104972839355469, 0.11122278594970703, 0.11126271820068359, 0.11140402984619141, 0.11229183959960938, 0.11202559661865234, 0.11156588745117188, 0.1111817626953125, 0.11196927642822266, 0.11145932769775391, 0.11130470275878906, 0.11111219024658203, 0.11142041778564453, 0.11112140655517579, 0.11115110778808594, 0.11208601379394531, 0.11188428497314454, 0.11182284545898437, 0.11141017913818359, 0.11125043487548827, 0.11123404693603516, 0.11116646575927734, 0.11186994934082031, 0.11114189147949219, 0.11129446411132812, 0.11111833953857422, 0.1112442855834961, 0.11120845031738281, 0.11120435333251953, 0.11193344116210938, 0.11145116424560547, 0.11117052459716797, 0.11132723236083984, 0.11132109069824218, 0.1112074203491211, 0.11136307525634766, 0.11232870483398437, 0.11170201873779297, 0.2324981689453125, 0.11132723236083984, 0.11152384185791016, 0.11143577575683594, 0.11334963226318359, 0.11229798126220703, 0.11186176300048828, 0.11187916564941407, 0.11197337341308594, 0.11203993225097657, 0.11198976135253906, 0.11205836486816406, 0.11139788818359375, 0.11115929412841796, 0.11112754821777343, 0.11136921691894532, 0.11119513702392578, 0.11117056274414062, 0.11119206237792968, 0.11117465972900391, 0.11126067352294922, 0.11122380828857421, 0.11115827178955077, 0.11134873962402343, 0.1123768310546875, 0.11169586944580077, 0.11113676452636718, 0.11131903839111328, 0.11172249603271485, 0.11171635437011719, 0.11152793884277344, 0.11154022216796874, 0.11121971130371094, 0.11125247955322265, 0.11109174346923828, 0.11109065246582031, 0.111283203125, 0.11159961700439452, 0.11181670379638672, 0.11218841552734375, 0.11207065582275391, 0.11193856048583985, 0.11212083435058594, 0.11199385833740234, 0.11282943725585938, 0.11240140533447265, 0.11203072357177735, 0.11216486358642579, 0.11200102233886719, 0.11202969360351563, 0.112, 0.11213005065917969, 0.11189555358886719, 0.11197647857666015, 0.11237065887451173, 0.11224269104003906, 0.11207270050048829, 0.11202355194091797, 0.11200204467773438, 0.11213520050048828, 0.1117081298828125, 0.11131187438964844, 0.11138969421386719, 0.23215206909179686, 0.11141426849365234, 0.11262156677246093, 0.11177779388427735, 0.11186585235595703, 0.11191295623779297, 0.11129036712646484, 0.11202355194091797, 0.11225702667236329, 0.11216793823242187, 0.11205120086669922, 0.11202252960205078, 0.11183513641357422, 0.11183513641357422, 0.1118361587524414, 0.11228672027587891, 0.11196723175048828, 0.11265023803710937, 0.11201439666748046, 0.11204192352294921, 0.11198668670654296, 0.11204812622070312, 0.1128499526977539, 0.11216585540771484, 0.11173174285888672, 0.11194057464599609, 0.11185971069335937, 0.11190889739990234, 0.11204195404052734, 0.11242700958251953, 0.1118361587524414, 0.11156070709228516, 0.11167436981201172, 0.11125555419921875, 0.11129446411132812, 0.11137535858154297, 0.1112125473022461, 0.11132723236083984, 0.11112652587890624, 0.11121356964111329, 0.11136511993408203, 0.11127808380126954, 0.11119308471679687, 0.11122278594970703, 0.11130265808105469, 0.11126374053955078, 0.11110604858398437, 0.11118386840820313, 0.11117158508300781, 0.11144499206542968, 0.11132316589355469, 0.11124323272705078, 0.11118899536132812, 0.11127705383300782, 0.11124531555175782, 0.11131084442138672, 0.11114189147949219, 0.11251712036132812, 0.11123916625976563, 0.1111562271118164, 0.11102105712890625, 0.111172607421875, 0.11132518768310547, 0.23168409729003905, 0.11126067352294922, 0.11120333099365234, 0.11120333099365234, 0.11132112121582032, 0.11132003021240235, 0.1112279052734375, 0.11117772674560547, 0.11110399627685547, 0.1111541748046875, 0.1111695327758789, 0.11118796539306641, 0.11124940490722657, 0.11112960052490234, 0.11125452423095702, 0.11125759887695312, 0.11122585296630859, 0.11124224090576172, 0.11116646575927734, 0.11114598083496094, 0.11131494140625, 0.11128832244873046, 0.11116134643554687, 0.11131391906738282, 0.11123404693603516, 0.11126681518554687, 0.11123916625976563, 0.1112985610961914, 0.11178495788574219, 0.11137741088867187, 0.11124121856689453, 0.11152281951904297, 0.11121766662597657, 0.11117362976074219, 0.1112125473022461, 0.11121868896484376, 0.111098876953125, 0.1111900177001953, 0.11227442932128906, 0.11120025634765625, 0.11103846740722656, 0.11157708740234375, 0.11146240234375, 0.11182284545898437, 0.11149517059326172, 0.111499267578125, 0.11131289672851563, 0.11149209594726563, 0.11197853088378906, 0.11136713409423828, 0.11115724945068359, 0.11126067352294922, 0.11121151733398438, 0.11126067352294922, 0.11109478759765624, 0.11183001708984375, 0.11150643157958984, 0.11150745391845703, 0.11111833953857422, 0.11116544342041015, 0.11122994995117187, 0.11125452423095702, 0.1113088607788086, 0.23280429077148437, 0.11111014556884766, 0.11133030700683594, 0.11111119842529296, 0.11112957000732422, 0.11102105712890625, 0.11110809326171875, 0.11106201934814453, 0.11103334045410156, 0.11108870697021485, 0.11110905456542969, 0.11238092803955078, 0.11154227447509765, 0.11123506927490234, 0.11109273529052735, 0.11120333099365234, 0.11103129577636718, 0.11100672149658203, 0.11094937896728516, 0.1110487060546875, 0.11100672149658203, 0.1109534683227539, 0.11100569915771484, 0.11172767639160157, 0.11108550262451172, 0.11122688293457031, 0.11107020568847656, 0.11119721221923828, 0.11113673400878907, 0.11125043487548827, 0.1111562271118164, 0.11116236877441406, 0.1111551971435547, 0.11128012847900391, 0.11118694305419922, 0.11127091217041016, 0.11107123565673828, 0.11122994995117187, 0.11122380828857421, 0.1111551971435547, 0.11100466918945312, 0.11141836547851562, 0.11136511993408203, 0.11123814392089844, 0.11123302459716797, 0.11117670440673828, 0.11112960052490234, 0.11146444702148438, 0.11135078430175781, 0.11130470275878906, 0.11135897827148437, 0.11127808380126954, 0.11142451477050781, 0.11136716461181641, 0.1120901107788086, 0.11140300750732422, 0.11124326324462891, 0.11130985260009765, 0.111257568359375, 0.1111551971435547, 0.1111756820678711, 0.11127295684814453, 0.11109580993652343, 0.23243571472167968, 0.11107839965820313, 0.11138361358642578, 0.11138758087158203, 0.111172607421875, 0.11116441345214843, 0.11112754821777343, 0.11145728302001953, 0.11120230102539062, 0.11218739318847656, 0.11158425903320313, 0.11142348480224609, 0.11124326324462891, 0.11134566497802735, 0.11134770965576171, 0.11127603149414063, 0.11175628662109376, 0.11245772552490234, 0.11182899475097656, 0.11139379119873047, 0.11116031646728515, 0.11131187438964844, 0.11135897827148437, 0.11138253021240234, 0.11129446411132812, 0.11140914916992188, 0.1112995834350586, 0.11167334747314453, 0.11146444702148438, 0.11127398681640625, 0.1114286117553711, 0.11160883331298828, 0.111388671875, 0.11126783752441406, 0.111283203125, 0.11141529846191406, 0.11138355255126953, 0.11169382476806641, 0.111425537109375, 0.11148297882080078, 0.11134454345703125, 0.11160371398925781, 0.11167334747314453, 0.11133235168457031, 0.11129036712646484, 0.11148089599609375, 0.11127085113525391, 0.11113471984863281, 0.11119821166992187, 0.11146444702148438, 0.1114419174194336, 0.11144499206542968, 0.11137741088867187, 0.11137126159667969, 0.11174604797363281, 0.11149619293212891, 0.11148902130126953, 0.1114419174194336, 0.1115525131225586, 0.11180646514892578, 0.11210854339599609, 0.11200819396972657, 0.11173580932617187, 0.2335253143310547, 0.11135481262207031, 0.11140300750732422, 0.11150540924072265, 0.11128627014160156, 0.11110707092285156, 0.11116851043701172, 0.11114393615722656, 0.1112995834350586, 0.11127808380126954, 0.11109273529052735, 0.11120953369140625, 0.1113087387084961, 0.11121971130371094, 0.11118284606933594, 0.11124736022949219, 0.1122508773803711, 0.11217715454101562, 0.1113733139038086, 0.11129344177246094, 0.11147058868408204, 0.11150335693359376, 0.111351806640625, 0.11135897827148437, 0.11148492431640625, 0.11133952331542969, 0.11130675506591797, 0.11139481353759766, 0.11144703674316406, 0.11154124450683593, 0.11132825469970703, 0.11111219024658203, 0.11122073364257813, 0.11118386840820313, 0.11125971221923828, 0.1125447006225586, 0.11146546936035157, 0.11134361267089844, 0.11123097229003906, 0.11145728302001953, 0.11127091217041016, 0.1112074203491211, 0.1114224624633789, 0.11159859466552734, 0.11146854400634766, 0.11118284606933594, 0.11170816040039062, 0.11130777740478516, 0.11138662719726562, 0.11138253021240234, 0.11114701080322266, 0.11128729248046874, 0.11132109069824218, 0.11136102294921875, 0.11118284606933594, 0.11117881774902344, 0.11109677124023437, 0.11118796539306641, 0.11113267517089843, 0.11119411468505859, 0.111246337890625, 0.1112279052734375, 0.11181465911865235, 0.23307980346679688, 0.11142041778564453, 0.1113538589477539, 0.11121561431884766, 0.11131903839111328, 0.11121561431884766, 0.11105689239501954, 0.111246337890625, 0.11114291381835938, 0.1110835189819336, 0.1110241928100586, 0.11124934387207032, 0.11121561431884766, 0.11143475341796875, 0.11115007781982422, 0.11116031646728515, 0.11110912322998047, 0.11113881683349609, 0.11112754821777343, 0.11146137237548828, 0.11116646575927734, 0.1111173095703125, 0.11114291381835938, 0.11114495849609375, 0.11101900482177735, 0.11122176361083984, 0.11194265747070313, 0.11108147430419922, 0.11138662719726562, 0.11130368041992188, 0.11116748809814453, 0.11115110778808594, 0.11116649627685547, 0.11112751770019531, 0.1111695327758789, 0.11108454132080078, 0.11120127868652344, 0.11118796539306641, 0.11125555419921875, 0.11124018859863281, 0.11112140655517579, 0.11123712158203125, 0.11195391845703125, 0.1116231689453125, 0.111388671875, 0.11123404693603516, 0.11115110778808594, 0.11121459197998047, 0.1111562271118164, 0.11116748809814453, 0.11117772674560547, 0.11126374053955078, 0.1111910400390625, 0.11120333099365234, 0.11108454132080078, 0.11108761596679688, 0.1111203842163086, 0.1111357421875, 0.11103231811523437, 0.11127603149414063, 0.11121868896484376, 0.11217817687988281, 0.11138457489013671]",tokens/s,8.829297511779728,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpoc0pnttw/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1279.725568,872.93952,0.0,226.492416,184.397824,s,12,0.17399129676818847,0.014499274730682373,0.0004405777377392672,0.014355583667755127,0.01475712022781372,0.015286664152145383,0.01576770728111267,"[0.01588796806335449, 0.014419008255004882, 0.014310144424438476, 0.01430396842956543, 0.014377471923828124, 0.014377087593078612, 0.01433407974243164, 0.014250847816467285, 0.014390144348144531, 0.01479468822479248, 0.014266719818115234, 0.014279168128967285]",tokens/s,17656.055544507362,kWh,1.7155437431226257e-07,9.399407219055296e-08,3.2474264005518264e-07,5.902910865579981e-07,tokens/kWh,433684339.52262825,MB,1280.053248,872.93952,0.0,226.492416,197.932544,s,12,10.420531616210937,0.8683776346842448,0.009197306451406238,0.8653596801757812,0.8716784362792969,0.8835345275878906,0.8948784387207032,"[0.8977144165039063, 0.866240966796875, 0.8642362670898438, 0.8648695068359376, 0.8719328002929687, 0.8635298461914063, 0.862948974609375, 0.866379638671875, 0.86938916015625, 0.865849853515625, 0.8638052368164062, 0.8636349487304688]",tokens/s,72.54908174012076,kWh,1.0699516429705703e-05,5.862862695252688e-06,1.9223555826850592e-05,3.578593495180898e-05,tokens/kWh,1760468.186309475,,s,755,10.563054601669307,0.013990800796912995,0.001781085498877292,0.013702143669128418,0.014027775764465332,0.014339276885986327,0.028389335021972658,"[0.015265791893005372, 0.014698495864868164, 0.014765055656433105, 0.014334976196289062, 0.014128128051757812, 0.014334976196289062, 0.014543871879577636, 0.014234623908996581, 0.0144650239944458, 0.01454694366455078, 0.014418944358825684, 0.01439027214050293, 0.01425100803375244, 0.014261247634887696, 0.014215167999267577, 0.014323712348937988, 0.014708736419677734, 0.014633983612060546, 0.01434931182861328, 0.01467801570892334, 0.014370816230773926, 0.014229503631591797, 0.014525440216064453, 0.014231552124023437, 0.014211071968078613, 0.014244864463806153, 0.014368767738342286, 0.014796799659729003, 0.014531583786010742, 0.014297087669372559, 0.014436384201049805, 0.014063584327697753, 0.014220288276672363, 0.013916159629821777, 0.014133248329162598, 0.014468095779418945, 0.014696479797363281, 0.014910431861877441, 0.014256128311157227, 0.014102527618408203, 0.01407590389251709, 0.014150655746459961, 0.014071807861328126, 0.013922304153442382, 0.013725728034973144, 0.01371132755279541, 0.013733887672424316, 0.013731840133666993, 0.014045184135437011, 0.014156800270080566, 0.014639103889465332, 0.014141440391540527, 0.013728863716125488, 0.013714336395263671, 0.013676544189453126, 0.013729791641235351, 0.013732864379882812, 0.013691904067993164, 0.013711359977722168, 0.013743103981018067, 0.013743103981018067, 0.013660223960876464, 0.0284968318939209, 0.013691904067993164, 0.01368883228302002, 0.013686783790588379, 0.013670399665832519, 0.01399295997619629, 0.013728768348693847, 0.013667327880859375, 0.013691904067993164, 0.01365503978729248, 0.013710335731506347, 0.013666303634643554, 0.013699071884155273, 0.013669376373291015, 0.013689855575561523, 0.013734911918640137, 0.013744128227233888, 0.01374617576599121, 0.01366323184967041, 0.013693951606750488, 0.013799424171447755, 0.013740032196044923, 0.013728768348693847, 0.013709312438964843, 0.013709312438964843, 0.01379532814025879, 0.013731840133666993, 0.013713408470153808, 0.013707263946533203, 0.013776896476745605, 0.01368883228302002, 0.013706239700317382, 0.013716480255126954, 0.013789183616638183, 0.014757887840270996, 0.013946911811828614, 0.013715423583984376, 0.013734911918640137, 0.013693951606750488, 0.01369600009918213, 0.013839360237121581, 0.013735936164855958, 0.013712384223937989, 0.013700096130371094, 0.013719552040100098, 0.013707263946533203, 0.013677568435668945, 0.013913087844848633, 0.013707263946533203, 0.013698047637939453, 0.013667360305786132, 0.013723615646362305, 0.013713408470153808, 0.013699071884155273, 0.013731840133666993, 0.013686783790588379, 0.013784064292907714, 0.01374828815460205, 0.013744064331054687, 0.013783040046691895, 0.013717503547668456, 0.013740032196044923, 0.013710335731506347, 0.028412927627563478, 0.013706239700317382, 0.013686783790588379, 0.013731840133666993, 0.013669376373291015, 0.013661184310913087, 0.013645824432373046, 0.013658111572265624, 0.013669376373291015, 0.013628416061401367, 0.013694975852966309, 0.013636608123779297, 0.013705216407775878, 0.013678591728210449, 0.013858816146850587, 0.01386086368560791, 0.01368883228302002, 0.013693951606750488, 0.013660160064697266, 0.01374518394470215, 0.013757408142089844, 0.013689855575561523, 0.013717503547668456, 0.01369600009918213, 0.013725695610046386, 0.013678591728210449, 0.013732895851135254, 0.013722592353820801, 0.0136878080368042, 0.013707263946533203, 0.013698047637939453, 0.013684736251831055, 0.013669376373291015, 0.013684736251831055, 0.01375641632080078, 0.013708288192749024, 0.013765631675720215, 0.013703167915344238, 0.013721599578857421, 0.013702143669128418, 0.013694975852966309, 0.013763584136962891, 0.01369600009918213, 0.013714431762695312, 0.013794303894042969, 0.013682687759399414, 0.013697024345397948, 0.013739007949829102, 0.013708288192749024, 0.013678591728210449, 0.013742079734802246, 0.013710335731506347, 0.013716480255126954, 0.013678591728210449, 0.013706239700317382, 0.013776896476745605, 0.013827072143554688, 0.013702143669128418, 0.013737983703613281, 0.013683712005615235, 0.013726719856262207, 0.013710335731506347, 0.013788160324096679, 0.028406784057617186, 0.013711359977722168, 0.013690879821777344, 0.013694975852966309, 0.013714431762695312, 0.013697024345397948, 0.013701120376586913, 0.01367142391204834, 0.013676544189453126, 0.013768704414367675, 0.01366528034210205, 0.013711423873901367, 0.013736895561218262, 0.013741056442260742, 0.01368064022064209, 0.013705216407775878, 0.013719552040100098, 0.013714431762695312, 0.013728768348693847, 0.013700096130371094, 0.01368883228302002, 0.013689855575561523, 0.013683712005615235, 0.013835264205932616, 0.013717503547668456, 0.013843520164489746, 0.013794239997863769, 0.013710335731506347, 0.013718527793884277, 0.013725695610046386, 0.01368064022064209, 0.013674495697021484, 0.013712384223937989, 0.013718527793884277, 0.013752320289611816, 0.013718527793884277, 0.013736960411071777, 0.01367347240447998, 0.013757439613342285, 0.01370419216156006, 0.013725695610046386, 0.013716480255126954, 0.013707263946533203, 0.013740032196044923, 0.013692928314208984, 0.013726719856262207, 0.013685759544372558, 0.013721664428710938, 0.013703104019165038, 0.01367142391204834, 0.013768704414367675, 0.01369600009918213, 0.013686783790588379, 0.013691904067993164, 0.01368883228302002, 0.013734911918640137, 0.013686783790588379, 0.01375334358215332, 0.013711359977722168, 0.013979647636413574, 0.013771776199340821, 0.013735936164855958, 0.013831232070922852, 0.028631999969482423, 0.013729824066162109, 0.013675488471984864, 0.01368172836303711, 0.013717439651489258, 0.013657088279724122, 0.01366528034210205, 0.013697024345397948, 0.013677568435668945, 0.01365503978729248, 0.013760512351989745, 0.013660160064697266, 0.01366220760345459, 0.013717503547668456, 0.013717503547668456, 0.013676544189453126, 0.013710335731506347, 0.013657088279724122, 0.013947903633117676, 0.013619199752807617, 0.013640704154968262, 0.01366528034210205, 0.013693951606750488, 0.01365401554107666, 0.013627391815185547, 0.013701120376586913, 0.013742079734802246, 0.013648896217346192, 0.013675552368164062, 0.013744095802307128, 0.01440665626525879, 0.01619865608215332, 0.016926719665527345, 0.014181376457214356, 0.01386188793182373, 0.013735936164855958, 0.01376153564453125, 0.013664256095886231, 0.013693951606750488, 0.013689855575561523, 0.013661184310913087, 0.013678591728210449, 0.013649920463562011, 0.014139391899108887, 0.013820927619934082, 0.013752320289611816, 0.013699071884155273, 0.014027775764465332, 0.01368064022064209, 0.013682687759399414, 0.013675519943237305, 0.013695039749145509, 0.01367750358581543, 0.01376460838317871, 0.014016511917114258, 0.0136878080368042, 0.013678591728210449, 0.01369600009918213, 0.013848575592041015, 0.013717503547668456, 0.0136878080368042, 0.013729791641235351, 0.013838335990905762, 0.028457984924316407, 0.013737983703613281, 0.013685759544372558, 0.013691904067993164, 0.013822976112365723, 0.013697024345397948, 0.013604864120483399, 0.013649920463562011, 0.013724672317504882, 0.013774847984313965, 0.013691904067993164, 0.01366528034210205, 0.013736960411071777, 0.013702143669128418, 0.0136878080368042, 0.013701120376586913, 0.013822976112365723, 0.013692928314208984, 0.013674495697021484, 0.013799424171447755, 0.013674495697021484, 0.01368166446685791, 0.013725695610046386, 0.013814784049987794, 0.013684736251831055, 0.01368883228302002, 0.013848608016967773, 0.013685728073120117, 0.013669376373291015, 0.013674495697021484, 0.01367961597442627, 0.01367244815826416, 0.013739007949829102, 0.01367961597442627, 0.013712384223937989, 0.013697024345397948, 0.013697024345397948, 0.013740032196044923, 0.01376460838317871, 0.013657088279724122, 0.013855744361877441, 0.013742079734802246, 0.013683712005615235, 0.013691904067993164, 0.013676544189453126, 0.013682687759399414, 0.013661184310913087, 0.013661184310913087, 0.013647904396057129, 0.013673439979553222, 0.013668352127075196, 0.013643775939941406, 0.013797375679016113, 0.013658111572265624, 0.013664256095886231, 0.013652031898498534, 0.01367033576965332, 0.013643775939941406, 0.013650943756103515, 0.013666303634643554, 0.013641728401184081, 0.01367142391204834, 0.013726719856262207, 0.028403711318969727, 0.013668352127075196, 0.013674495697021484, 0.01363046360015869, 0.013641728401184081, 0.013643775939941406, 0.01365503978729248, 0.01366220760345459, 0.01366220760345459, 0.01368166446685791, 0.013640704154968262, 0.013685759544372558, 0.013637632369995116, 0.013725695610046386, 0.013702143669128418, 0.013674495697021484, 0.013714431762695312, 0.013770751953125, 0.013796352386474609, 0.013739007949829102, 0.013711359977722168, 0.01367244815826416, 0.013684736251831055, 0.013736960411071777, 0.01367244815826416, 0.013675519943237305, 0.01368992042541504, 0.01368569564819336, 0.01368166446685791, 0.01367961597442627, 0.0136878080368042, 0.01367961597442627, 0.013676544189453126, 0.013953023910522461, 0.013818880081176758, 0.01379532814025879, 0.013685759544372558, 0.01367347240447998, 0.013747200012207032, 0.013718527793884277, 0.013677568435668945, 0.013668352127075196, 0.013685759544372558, 0.01365503978729248, 0.013649920463562011, 0.013637632369995116, 0.013633536338806153, 0.013768704414367675, 0.01365401554107666, 0.013760543823242188, 0.013644767761230468, 0.013698047637939453, 0.013644800186157227, 0.013639679908752441, 0.013675519943237305, 0.013670399665832519, 0.013651968002319336, 0.013715456008911133, 0.01376972770690918, 0.013659135818481445, 0.01366528034210205, 0.013699071884155273, 0.013642751693725585, 0.02831974411010742, 0.013700096130371094, 0.013639679908752441, 0.013622271537780761, 0.013687840461730957, 0.013710304260253907, 0.013718527793884277, 0.01368166446685791, 0.01366323184967041, 0.013705216407775878, 0.01368064022064209, 0.013686783790588379, 0.013666303634643554, 0.013668352127075196, 0.013682687759399414, 0.013727744102478028, 0.013724672317504882, 0.013721599578857421, 0.013724672317504882, 0.014102527618408203, 0.01380352020263672, 0.013740032196044923, 0.013737983703613281, 0.013920255661010742, 0.013740032196044923, 0.013717503547668456, 0.013843456268310546, 0.01380352020263672, 0.013684736251831055, 0.013757439613342285, 0.013758463859558106, 0.013974528312683105, 0.013858816146850587, 0.01417420768737793, 0.013920255661010742, 0.013936639785766602, 0.01377280044555664, 0.013703167915344238, 0.01368883228302002, 0.013677568435668945, 0.013684736251831055, 0.013694975852966309, 0.013688863754272461, 0.013667296409606934, 0.013786111831665039, 0.013828096389770507, 0.013668352127075196, 0.013664256095886231, 0.013733887672424316, 0.013702143669128418, 0.013656064033508301, 0.013676544189453126, 0.01377894401550293, 0.013708288192749024, 0.0136878080368042, 0.013683712005615235, 0.013735936164855958, 0.01395404815673828, 0.013817855834960938, 0.013650943756103515, 0.013703167915344238, 0.013696063995361328, 0.013751232147216796, 0.028461055755615236, 0.013700096130371094, 0.013645824432373046, 0.014017536163330077, 0.013718527793884277, 0.015047679901123047, 0.014234623908996581, 0.014027775764465332, 0.01408512020111084, 0.014048255920410157, 0.014173184394836426, 0.013982720375061035, 0.014036992073059081, 0.013888511657714844, 0.013721599578857421, 0.01374617576599121, 0.013685759544372558, 0.013708288192749024, 0.013652992248535157, 0.013644800186157227, 0.013687871932983398, 0.013667263984680177, 0.01366220760345459, 0.01367347240447998, 0.01367961597442627, 0.013639679908752441, 0.01368166446685791, 0.013686783790588379, 0.013645824432373046, 0.013686783790588379, 0.01366220760345459, 0.013650943756103515, 0.013632512092590332, 0.013693951606750488, 0.013690879821777344, 0.013845503807067871, 0.013686783790588379, 0.013660160064697266, 0.013715456008911133, 0.01387724781036377, 0.014060544013977052, 0.01405951976776123, 0.013849599838256836, 0.013645824432373046, 0.013682687759399414, 0.013699071884155273, 0.013699071884155273, 0.013707263946533203, 0.013682687759399414, 0.013713408470153808, 0.013652992248535157, 0.013708288192749024, 0.01368166446685791, 0.01368064022064209, 0.013719552040100098, 0.013873151779174805, 0.01370419216156006, 0.013788224220275879, 0.013749183654785157, 0.013675519943237305, 0.01366329574584961, 0.01380140781402588, 0.014252032279968262, 0.029146112442016602, 0.013622271537780761, 0.013830143928527832, 0.013846528053283692, 0.013924351692199707, 0.013684736251831055, 0.013667327880859375, 0.01367347240447998, 0.01363865566253662, 0.01366323184967041, 0.013697024345397948, 0.013644800186157227, 0.013669376373291015, 0.01386086368560791, 0.013948927879333496, 0.013683712005615235, 0.013719552040100098, 0.013699071884155273, 0.013691904067993164, 0.013702143669128418, 0.013697024345397948, 0.013853728294372558, 0.013726688385009766, 0.013720576286315917, 0.013678591728210449, 0.01369600009918213, 0.013735936164855958, 0.013697024345397948, 0.013715456008911133, 0.013755392074584961, 0.013735936164855958, 0.013721599578857421, 0.013707263946533203, 0.013883392333984374, 0.013845503807067871, 0.013723648071289063, 0.013712384223937989, 0.013705216407775878, 0.01368883228302002, 0.013685759544372558, 0.013721599578857421, 0.013728768348693847, 0.013697024345397948, 0.013751296043395997, 0.013793279647827148, 0.013677568435668945, 0.01369600009918213, 0.013702143669128418, 0.013920255661010742, 0.013789183616638183, 0.013728768348693847, 0.013712384223937989, 0.01368166446685791, 0.013733920097351074, 0.013666272163391113, 0.01387724781036377, 0.013720576286315917, 0.013702143669128418, 0.013740032196044923, 0.013721599578857421, 0.013722623825073242, 0.013712384223937989, 0.01376153564453125, 0.02837708854675293, 0.01365401554107666, 0.01364684772491455, 0.013660160064697266, 0.013664256095886231, 0.013692928314208984, 0.013957119941711426, 0.01405951976776123, 0.013983743667602539, 0.01367961597442627, 0.013649920463562011, 0.013858880043029785, 0.013722559928894044, 0.013641728401184081, 0.013668352127075196, 0.013664256095886231, 0.013723648071289063, 0.01365503978729248, 0.013669376373291015, 0.01367142391204834, 0.01370419216156006, 0.01368166446685791, 0.01367142391204834, 0.013675519943237305, 0.013628416061401367, 0.013627391815185547, 0.013661184310913087, 0.013818880081176758, 0.013710335731506347, 0.013647871971130371, 0.013670399665832519, 0.01368064022064209, 0.01367347240447998, 0.013683712005615235, 0.01368883228302002, 0.013701120376586913, 0.013677568435668945, 0.013657088279724122, 0.013692928314208984, 0.013644800186157227, 0.013676544189453126, 0.013677568435668945, 0.01380352020263672, 0.013749247550964355, 0.013674495697021484, 0.013695039749145509, 0.013658047676086426, 0.013690879821777344, 0.01367244815826416, 0.013657088279724122, 0.013640704154968262, 0.013657088279724122, 0.013793279647827148, 0.013695008277893066, 0.013856736183166504, 0.013647871971130371, 0.013661215782165528, 0.013750240325927735, 0.013722623825073242, 0.013768704414367675, 0.013710335731506347, 0.013717503547668456, 0.01366220760345459, 0.028322816848754883, 0.013648896217346192, 0.013686783790588379, 0.01369600009918213, 0.013699071884155273, 0.0136878080368042, 0.013691904067993164, 0.013702143669128418, 0.013697024345397948, 0.013683712005615235, 0.013700096130371094, 0.013693951606750488, 0.013722623825073242, 0.013705216407775878, 0.013691935539245605, 0.013717472076416016, 0.013724672317504882, 0.013700096130371094, 0.013664256095886231, 0.013639679908752441, 0.013757439613342285, 0.013656064033508301, 0.013667327880859375, 0.01366431999206543, 0.013849535942077636, 0.013924351692199707, 0.013633567810058594, 0.013722592353820801, 0.013668352127075196, 0.013906944274902343, 0.013789183616638183, 0.013662240028381348, 0.013687775611877442, 0.013670399665832519, 0.01367347240447998, 0.01366528034210205, 0.013657088279724122, 0.013661184310913087, 0.013678591728210449, 0.013684736251831055, 0.013658143997192384, 0.013722592353820801, 0.01366528034210205, 0.013637632369995116, 0.01367347240447998, 0.01365503978729248, 0.01367347240447998, 0.013744128227233888, 0.01370419216156006, 0.013693951606750488, 0.013711359977722168, 0.01368064022064209, 0.01368064022064209, 0.013691904067993164, 0.01369600009918213, 0.013724672317504882, 0.013697024345397948, 0.01367142391204834, 0.013682687759399414, 0.01365503978729248, 0.013683712005615235, 0.013806591987609864, 0.013955072402954101]",tokens/s,71.4755369986145,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa75-6f2c86ba78417c5c3491bdce;17ad7322-4be7-4fd8-9b87-0741881cc338) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4499.935232,14621.868032,0.0,13975.420928,13365.937152,s,10,16.410234252929687,1.6410234252929687,0.0022480805464608366,1.6403901367187501,1.6418083374023436,1.6447224060058594,1.6470536608886719,"[1.647636474609375, 1.63996875, 1.6411607666015624, 1.640802978515625, 1.64035791015625, 1.6397760009765625, 1.6397884521484376, 1.6398060302734374, 1.6405145263671874, 1.64042236328125]",tokens/s,156.00021063337155,kWh,1.9377978377872043e-05,1.0617021217130968e-05,9.108710064739678e-05,0.00012108210024239978,tokens/kWh,2114267.9181109504,MB,4499.935232,14621.868032,0.0,13975.420928,13814.413824,s,10,975.3211015625,97.53211015625,0.011202982505412533,97.52955078125,97.54984375,97.55100390625,97.55193203125,"[97.540578125, 97.52715625, 97.522640625, 97.5219296875, 97.5304609375, 97.51653125, 97.5521640625, 97.5495859375, 97.5314140625, 97.528640625]",tokens/s,0.6459411151780855,kWh,0.0011513337257504463,0.0006310327948099438,0.005346427777138807,0.007128794297699197,tokens/kWh,8837.399056434146,,s,629,988.5350541992196,1.5715978604121124,0.19496209506894618,1.548072998046875,1.5490094970703125,1.549420166015625,3.189316650390625,"[1.547826171875, 1.5488624267578126, 1.5484969482421875, 1.5483443603515625, 1.5481630859375, 1.5493089599609375, 1.548452880859375, 1.5482459716796875, 1.5481619873046875, 1.5497840576171875, 1.54772998046875, 1.5493929443359375, 1.547630615234375, 1.5480760498046875, 1.547937744140625, 1.5482552490234376, 1.5470531005859376, 1.5489443359375, 1.5480074462890625, 1.548041259765625, 1.5472659912109374, 1.549042724609375, 1.5482132568359375, 1.5479869384765625, 1.54851123046875, 1.5473919677734376, 1.5474892578125, 1.547378662109375, 1.548716064453125, 1.548441650390625, 1.5491409912109375, 1.5472435302734375, 1.5476378173828125, 1.5480125732421874, 1.5484580078125, 1.5489659423828126, 1.5496949462890626, 1.5483074951171876, 1.5473623046875, 1.547552734375, 1.547135009765625, 1.548559326171875, 1.5482398681640626, 1.5498638916015626, 1.54827880859375, 1.5483668212890624, 1.5489197998046875, 1.548549072265625, 1.54787939453125, 1.547683837890625, 1.549091796875, 1.5491134033203124, 1.548537841796875, 1.548310546875, 1.547894775390625, 1.54776171875, 1.5491932373046875, 1.5472711181640626, 1.547672607421875, 1.5473602294921875, 1.5482440185546875, 1.548180419921875, 3.190286376953125, 1.5480146484375, 1.5486883544921874, 1.5482091064453125, 1.54775244140625, 1.5473919677734376, 1.547894775390625, 1.5473858642578124, 1.548252197265625, 1.5479337158203126, 1.54754052734375, 1.5482122802734375, 1.54810986328125, 1.5476787109375, 1.5477442626953124, 1.54800634765625, 1.5478446044921874, 1.5483709716796874, 1.5485460205078125, 1.5486474609375, 1.5467080078125, 1.546982421875, 1.5480340576171876, 1.5470633544921875, 1.5475240478515624, 1.5482020263671874, 1.5471483154296874, 1.5476029052734375, 1.548863525390625, 1.5478087158203124, 1.5482347412109374, 1.54766845703125, 1.54899658203125, 1.5479388427734375, 1.547747314453125, 1.547916259765625, 1.5490928955078125, 1.548327880859375, 1.548775390625, 1.5474073486328126, 1.547236328125, 1.5476397705078124, 1.5474749755859376, 1.5477001953125, 1.5486075439453124, 1.54838427734375, 1.5483780517578125, 1.5483616943359375, 1.5482193603515626, 1.548074951171875, 1.547906005859375, 1.549001708984375, 1.5483565673828126, 1.5475179443359375, 1.5475548095703124, 1.5477432861328124, 1.54893310546875, 1.54842724609375, 1.5487344970703125, 1.5480279541015625, 1.5475323486328125, 1.5480032958984375, 1.5488101806640624, 3.191152587890625, 1.5476182861328125, 1.548537841796875, 1.5480074462890625, 1.54785791015625, 1.548368896484375, 1.5487181396484375, 1.548142578125, 1.5480279541015625, 1.547535400390625, 1.5482972412109375, 1.54859521484375, 1.5482921142578125, 1.5488890380859375, 1.5477381591796875, 1.5481619873046875, 1.54773193359375, 1.54867919921875, 1.547737060546875, 1.5486033935546875, 1.5482552490234376, 1.5477698974609375, 1.5484517822265624, 1.5468912353515625, 1.5473387451171876, 1.5476397705078124, 1.5484375, 1.547588623046875, 1.5468543701171875, 1.54747802734375, 1.547462646484375, 1.5476142578125, 1.5486832275390625, 1.5478487548828126, 1.54682373046875, 1.547431884765625, 1.54697216796875, 1.5476439208984376, 1.547420654296875, 1.547546630859375, 1.5470274658203125, 1.5474892578125, 1.547757568359375, 1.547947021484375, 1.5482930908203125, 1.549538330078125, 1.547505615234375, 1.548253173828125, 1.549297607421875, 1.547484130859375, 1.548220458984375, 1.548078125, 1.547826171875, 1.54688916015625, 1.5470919189453125, 1.5475538330078125, 1.54765625, 1.5476009521484375, 1.5491522216796876, 1.5480196533203125, 1.5481773681640625, 1.5486719970703124, 1.54946044921875, 3.19001904296875, 1.5467274169921874, 1.5485091552734376, 1.54712060546875, 1.5472425537109376, 1.548200927734375, 1.5474647216796875, 1.5484794921875, 1.5470428466796875, 1.5474134521484375, 1.5471728515625, 1.54741455078125, 1.547334716796875, 1.5476378173828125, 1.5483443603515625, 1.5476234130859374, 1.5473817138671875, 1.548241943359375, 1.548495849609375, 1.5476910400390624, 1.5492137451171875, 1.5486300048828125, 1.547925537109375, 1.5481497802734374, 1.5477288818359376, 1.547779052734375, 1.5494564208984376, 1.5475230712890624, 1.547504638671875, 1.5472803955078125, 1.5474217529296874, 1.54842724609375, 1.547715576171875, 1.54804736328125, 1.54720458984375, 1.5474442138671876, 1.5468114013671874, 1.548011474609375, 1.5478446044921874, 1.5482470703125, 1.548304443359375, 1.5482255859375, 1.549054931640625, 1.548583984375, 1.5483463134765625, 1.5490682373046876, 1.5485091552734376, 1.5480699462890626, 1.5479337158203126, 1.5475302734375, 1.5479091796875, 1.548273681640625, 1.5492208251953126, 1.547388916015625, 1.54747802734375, 1.5476285400390626, 1.54806787109375, 1.548291015625, 1.5489659423828126, 1.5477606201171874, 1.5477279052734374, 1.5477841796875, 1.5487047119140624, 3.189357666015625, 1.54832177734375, 1.54777294921875, 1.5475640869140626, 1.548190673828125, 1.54800439453125, 1.54889111328125, 1.548938232421875, 1.547832275390625, 1.5472568359375, 1.5471380615234376, 1.5473438720703125, 1.5473499755859375, 1.548291015625, 1.5484302978515625, 1.547525146484375, 1.54766748046875, 1.54760400390625, 1.54752001953125, 1.547726806640625, 1.54796435546875, 1.5488388671875, 1.5483873291015624, 1.54832177734375, 1.54785791015625, 1.548337158203125, 1.548981201171875, 1.5488040771484375, 1.5480023193359376, 1.547864013671875, 1.5482235107421876, 1.54768994140625, 1.5485205078125, 1.5483873291015624, 1.5479071044921875, 1.54815380859375, 1.5473756103515626, 1.5484302978515625, 1.5488572998046874, 1.54754248046875, 1.548105712890625, 1.5480648193359374, 1.548222412109375, 1.5480238037109375, 1.547357177734375, 1.5477821044921876, 1.5485142822265625, 1.54811181640625, 1.5482706298828126, 1.547442138671875, 1.547783203125, 1.548359619140625, 1.54857373046875, 1.5488572998046874, 1.5484447021484375, 1.5478978271484376, 1.5483546142578124, 1.5478446044921874, 1.54789990234375, 1.548568603515625, 1.5481129150390625, 1.5489515380859376, 1.5481129150390625, 3.18936376953125, 1.5473162841796875, 1.548347412109375, 1.5468809814453126, 1.5473695068359374, 1.5474237060546876, 1.5484447021484375, 1.548760009765625, 1.5489996337890626, 1.547483154296875, 1.547210693359375, 1.5470223388671875, 1.5469271240234375, 1.5475926513671876, 1.54830029296875, 1.5476080322265624, 1.547652099609375, 1.5482685546875, 1.547872314453125, 1.5475855712890625, 1.5479234619140625, 1.5485009765625, 1.54821630859375, 1.54749853515625, 1.54756298828125, 1.5474810791015625, 1.5483934326171875, 1.5483914794921876, 1.5475958251953126, 1.5481610107421875, 1.5478609619140624, 1.5482193603515626, 1.5480648193359374, 1.5481988525390624, 1.5481087646484375, 1.5474114990234376, 1.54781494140625, 1.5477237548828124, 1.5477821044921876, 1.5474083251953126, 1.5495556640625, 1.5478026123046875, 1.54785693359375, 1.547255859375, 1.54735302734375, 1.5477073974609374, 1.547969482421875, 1.5477288818359376, 1.5476070556640624, 1.5480606689453125, 1.548168212890625, 1.5479029541015625, 1.5480186767578126, 1.5485255126953126, 1.5468739013671875, 1.546857421875, 1.54753125, 1.5483514404296874, 1.54785888671875, 1.548099609375, 1.54840478515625, 1.54806884765625, 1.5480648193359374, 3.188536376953125, 1.547326416015625, 1.5486146240234375, 1.5495721435546874, 1.5473060302734376, 1.5487816162109376, 1.549453369140625, 1.5489608154296874, 1.5496785888671876, 1.5480616455078124, 1.54798486328125, 1.5475947265625, 1.549253662109375, 1.5481968994140625, 1.548053466796875, 1.549401123046875, 1.5481220703125, 1.547989990234375, 1.548347412109375, 1.5480863037109376, 1.548441650390625, 1.5491942138671875, 1.5479951171875, 1.5477596435546874, 1.5479132080078124, 1.5479019775390626, 1.5476695556640625, 1.5486505126953125, 1.548642333984375, 1.5483924560546876, 1.548464111328125, 1.5485399169921874, 1.5489935302734374, 1.5492823486328124, 1.549126708984375, 1.5488082275390624, 1.5485020751953125, 1.5481436767578125, 1.5480247802734375, 1.5473438720703125, 1.5484078369140626, 1.5497154541015625, 1.548389404296875, 1.548622802734375, 1.5483453369140625, 1.5479080810546875, 1.548316650390625, 1.548980224609375, 1.5480084228515625, 1.5479080810546875, 1.5483177490234374, 1.54800634765625, 1.5485972900390625, 1.5479234619140625, 1.5486607666015626, 1.548347412109375, 1.5482081298828125, 1.547925537109375, 1.548674072265625, 1.548291015625, 1.55034423828125, 1.54886865234375, 1.5481036376953126, 3.189357666015625, 1.5474586181640626, 1.5491717529296876, 1.5481129150390625, 1.548072998046875, 1.5476490478515625, 1.5485419921875, 1.5480074462890625, 1.54784765625, 1.5483238525390626, 1.547341796875, 1.547925537109375, 1.547810791015625, 1.5478056640625, 1.5477412109375, 1.54859521484375, 1.5481068115234375, 1.548200927734375, 1.547937744140625, 1.547672607421875, 1.547968505859375, 1.5481712646484376, 1.5496058349609374, 1.54743603515625, 1.5484375, 1.548396484375, 1.5478035888671875, 1.5489986572265626, 1.54951171875, 1.547946044921875, 1.5478077392578125, 1.5477493896484376, 1.5488133544921876, 1.5480872802734376, 1.5481773681640625, 1.54931103515625, 1.5479542236328125, 1.5490611572265625, 1.547883544921875, 1.547809814453125, 1.5479930419921875, 1.549116455078125, 1.5483084716796875, 1.54863720703125, 1.54834130859375, 1.548610595703125, 1.5488941650390624, 1.5497103271484376, 1.5490672607421876, 1.5479500732421876, 1.5488173828125, 1.5487139892578126, 1.5484384765625, 1.5487242431640624, 1.5489197998046875, 1.5499970703125, 1.5479736328125, 1.549170654296875, 1.548205078125, 1.5482030029296876, 1.54952294921875, 1.548652587890625, 1.5488470458984376, 3.190570068359375, 1.5475732421875, 1.54899560546875, 1.5490723876953125, 1.548262451171875, 1.5477442626953124, 1.5488890380859375, 1.5484302978515625, 1.5486300048828125, 1.548895263671875, 1.5487139892578126, 1.54817333984375, 1.548970947265625, 1.5481988525390624, 1.5474852294921875, 1.548536865234375, 1.5487886962890625, 1.548294189453125, 1.54817333984375, 1.547431884765625, 1.54737353515625, 1.5477801513671876, 1.5483740234375, 1.547725830078125, 1.547284423828125, 1.5479593505859375, 1.5474329833984375, 1.5482255859375, 1.54836376953125, 1.54859521484375, 1.5475343017578125, 1.5487181396484375, 1.548205078125, 1.5478056640625, 1.5484302978515625, 1.54993359375, 1.5491451416015625, 1.548178466796875, 1.5477718505859375, 1.5477421875, 1.5473060302734376, 1.548078125, 1.548304443359375, 1.5474052734375, 1.5479808349609374, 1.547030517578125, 1.547672607421875, 1.5480975341796874, 1.5481231689453125, 1.547429931640625, 1.5478343505859375, 1.5476173095703125, 1.5473643798828125, 1.547039794921875, 1.5472691650390624, 1.5484384765625, 1.5475506591796875, 1.548432373046875, 1.54777294921875, 1.5470867919921876, 1.54768896484375, 1.5496007080078125, 1.548421142578125, 3.189211181640625, 1.5468963623046874, 1.547869140625, 1.54876416015625, 1.5484989013671875, 1.5492301025390625, 1.5475916748046874, 1.546978271484375, 1.5469158935546874, 1.54728759765625, 1.5486658935546875, 1.5481077880859375, 1.5487529296875, 1.54709912109375, 1.547663330078125, 1.5471912841796875, 1.54923828125, 1.54927001953125, 1.54796435546875, 1.5475865478515625, 1.5476448974609376, 1.547410400390625, 1.548801025390625, 1.5483770751953125, 1.547400146484375, 1.5478026123046875, 1.54826953125, 1.547557861328125, 1.547953125, 1.549897705078125, 1.5482357177734376, 1.5481138916015624, 1.547904052734375, 1.5478404541015625, 1.5473480224609375, 1.54821630859375, 1.5495045166015624, 1.547509765625, 1.5476746826171874, 1.546893310546875, 1.54714013671875, 1.5480177001953126, 1.548304443359375, 1.547969482421875, 1.547404296875, 1.5485450439453126, 1.547778076171875, 1.5482039794921876, 1.549432861328125, 1.54890234375, 1.5484302978515625, 1.5484652099609375, 1.547947998046875, 1.5474166259765625, 1.547483154296875, 1.5496939697265626, 1.5482757568359375, 1.547537353515625, 1.5478927001953124, 1.54752001953125, 1.547925537109375, 1.5486146240234375, 1.5490406494140625]",tokens/s,0.6362950887053097,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2109.853696,2844.2624,0.0,2197.815296,1927.351296,s,10,2.3800795135498047,0.23800795135498043,0.0010254107368052306,0.23830428314208985,0.23930529022216795,0.23934926986694335,0.23938445358276367,"[0.2383564453125, 0.23939324951171875, 0.23619731140136718, 0.23845465087890624, 0.23723452758789063, 0.23663900756835937, 0.23750480651855468, 0.2382521209716797, 0.2387518768310547, 0.23929551696777343]",tokens/s,1075.594317511624,kWh,2.793223439723023e-06,1.5305452026781454e-06,1.2162554949675677e-05,1.6486323592076845e-05,tokens/kWh,15528022.276781643,MB,2109.853696,2844.2624,0.0,2197.815296,2031.97184,s,10,139.4725029296875,13.94725029296875,0.016539028053013265,13.941937500000002,13.97250703125,13.977812109375,13.982056171875,"[13.953560546875, 13.945388671875, 13.94201171875, 13.9831171875, 13.9309609375, 13.9364580078125, 13.92993359375, 13.94186328125, 13.971328125, 13.937880859375]",tokens/s,4.517019389245513,kWh,0.000164597820511391,9.021296607492203e-05,0.0007178143901425277,0.0009726251767288406,tokens/kWh,64773.153633430826,,s,629,141.39150625610358,0.2247877682926924,0.028297149074530483,0.2211778564453125,0.22228438720703125,0.22277959289550783,0.45843198486328124,"[0.22260429382324218, 0.22169497680664063, 0.2212351989746094, 0.22115533447265626, 0.22176870727539064, 0.22137344360351563, 0.22118502807617188, 0.22141644287109374, 0.22102528381347655, 0.22195199584960937, 0.2219018249511719, 0.22150553894042968, 0.22119935607910157, 0.2213509063720703, 0.22125465393066407, 0.22120550537109376, 0.22150962829589843, 0.2213017578125, 0.22201548767089843, 0.2213632049560547, 0.22165196228027345, 0.22151373291015625, 0.22148095703125, 0.22154649353027345, 0.22147174072265624, 0.22180044555664064, 0.22122802734375, 0.22110105895996093, 0.22116249084472656, 0.221444091796875, 0.22163456726074218, 0.22287872314453125, 0.2216417236328125, 0.222202880859375, 0.2216407012939453, 0.22104165649414062, 0.22102323913574218, 0.22114508056640625, 0.22116044616699218, 0.22107955932617188, 0.2210508728027344, 0.22112460327148437, 0.22108979797363282, 0.2215557098388672, 0.22159564208984375, 0.22146662902832032, 0.2216785888671875, 0.22263194274902343, 0.22135398864746095, 0.22125056457519532, 0.22187930297851563, 0.22126797485351563, 0.2213396453857422, 0.22115225219726561, 0.22118502807617188, 0.22127410888671875, 0.22127104187011717, 0.22109901428222656, 0.22111744689941407, 0.22118911743164063, 0.2214871063232422, 0.22157005310058595, 0.46144613647460936, 0.22118707275390626, 0.2218403778076172, 0.2209945526123047, 0.22095564270019533, 0.22111538696289063, 0.22113792419433595, 0.22112973022460938, 0.22129766845703125, 0.22095974731445311, 0.22107244873046875, 0.2212720031738281, 0.22100991821289062, 0.22110617065429689, 0.22098739624023436, 0.22138470458984374, 0.22130073547363283, 0.22108773803710938, 0.22093721008300782, 0.2209566650390625, 0.2209310760498047, 0.2208204803466797, 0.22089932250976563, 0.22095155334472658, 0.22101708984375, 0.22186495971679687, 0.22131610107421876, 0.22127513122558592, 0.22142361450195314, 0.22218751525878908, 0.2213519287109375, 0.2209669189453125, 0.22220083618164063, 0.2210744323730469, 0.22159461975097655, 0.22113792419433595, 0.22117990112304686, 0.2222161865234375, 0.22108262634277343, 0.2210293731689453, 0.22102117919921874, 0.22107955932617188, 0.22112562561035157, 0.22216499328613282, 0.22140518188476563, 0.22183526611328125, 0.22283879089355468, 0.22166937255859376, 0.2215045166015625, 0.22186904907226562, 0.22122700500488282, 0.2214256591796875, 0.22139903259277344, 0.2215004119873047, 0.22137753295898438, 0.22115737915039063, 0.22106419372558594, 0.22163967895507813, 0.22111334228515625, 0.22137753295898438, 0.2212833251953125, 0.2219683837890625, 0.22127923583984374, 0.45813043212890625, 0.22103347778320312, 0.2213949432373047, 0.22112973022460938, 0.22099250793457031, 0.22118502807617188, 0.2218219451904297, 0.22100376892089843, 0.221154296875, 0.22091162109375, 0.22253260803222658, 0.22119935607910157, 0.2210529327392578, 0.22278041076660157, 0.22130482482910158, 0.22150553894042968, 0.2213519287109375, 0.22121983337402343, 0.2217584686279297, 0.22111436462402342, 0.22102015686035156, 0.22090444946289062, 0.22078054809570313, 0.2211778564453125, 0.22104576110839844, 0.2211420135498047, 0.22199909973144533, 0.22127308654785155, 0.22181170654296875, 0.2217164764404297, 0.2210918426513672, 0.2213201904296875, 0.22124339294433593, 0.2210150451660156, 0.22111231994628905, 0.22106008911132813, 0.22112051391601562, 0.22103245544433595, 0.22106419372558594, 0.2211778564453125, 0.22102323913574218, 0.22111846923828124, 0.22116146850585938, 0.2212884521484375, 0.22095872497558594, 0.22103347778320312, 0.221048828125, 0.2216294403076172, 0.22115122985839844, 0.22118400573730468, 0.221127685546875, 0.22102117919921874, 0.22113177490234376, 0.22128025817871094, 0.2211031036376953, 0.22143180847167968, 0.22129254150390626, 0.22108773803710938, 0.22222642517089844, 0.22198886108398438, 0.2217205810546875, 0.2217574462890625, 0.22159461975097655, 0.4614256591796875, 0.22283775329589844, 0.22295756530761718, 0.22274969482421875, 0.2225858612060547, 0.22346035766601563, 0.22301695251464843, 0.22341127014160156, 0.22270252990722655, 0.22250291442871092, 0.22281216430664064, 0.22286746215820313, 0.22281011962890626, 0.22280703735351562, 0.22268620300292968, 0.22271487426757813, 0.22270976257324218, 0.22246092224121095, 0.222814208984375, 0.22279373168945313, 0.22285516357421875, 0.222635009765625, 0.2227783660888672, 0.2227271728515625, 0.22253773498535157, 0.22261862182617187, 0.223025146484375, 0.22222540283203124, 0.22242611694335937, 0.22123930358886718, 0.22103450012207032, 0.2209105987548828, 0.22106008911132813, 0.22086451721191405, 0.2209740753173828, 0.22110208129882813, 0.2218260498046875, 0.2210365447998047, 0.2210713653564453, 0.22134988403320313, 0.22149017333984375, 0.22106419372558594, 0.22114309692382814, 0.22111225891113281, 0.2225971221923828, 0.22118400573730468, 0.22139187622070314, 0.22111538696289063, 0.22112153625488282, 0.22092594909667967, 0.22134783935546876, 0.221127685546875, 0.22192127990722657, 0.2215854034423828, 0.22127308654785155, 0.22121881103515625, 0.2214686737060547, 0.2213396453857422, 0.22123008728027344, 0.22115327453613282, 0.22119218444824218, 0.22103347778320312, 0.22114303588867187, 0.45854925537109376, 0.22104678344726564, 0.22133247375488282, 0.22091571044921876, 0.22102630615234375, 0.2210508728027344, 0.22108364868164063, 0.22100274658203126, 0.22103141784667968, 0.22081741333007812, 0.22105906677246093, 0.22115737915039063, 0.22105702209472655, 0.22100991821289062, 0.22107749938964844, 0.22084402465820313, 0.22094540405273438, 0.22115327453613282, 0.22110719299316406, 0.220980224609375, 0.22154444885253907, 0.22112460327148437, 0.2211031036376953, 0.22097817993164062, 0.2210498504638672, 0.22134375, 0.2211266632080078, 0.22089112854003906, 0.2209566650390625, 0.22103756713867187, 0.2209976348876953, 0.22094540405273438, 0.22101708984375, 0.2214256591796875, 0.22106623840332032, 0.22102528381347655, 0.2213017578125, 0.2209976348876953, 0.221233154296875, 0.22100274658203126, 0.22102220153808594, 0.22098329162597657, 0.2211584014892578, 0.2211092529296875, 0.22123417663574219, 0.22111744689941407, 0.2211235809326172, 0.22127001953125, 0.22116761779785157, 0.22121881103515625, 0.2211031036376953, 0.2210498504638672, 0.221085693359375, 0.22113587951660157, 0.22125978088378906, 0.2211041259765625, 0.221154296875, 0.22228172302246094, 0.22109901428222656, 0.22121267700195313, 0.22124339294433593, 0.2211584014892578, 0.2214686737060547, 0.457933837890625, 0.2211962890625, 0.22194586181640624, 0.22120755004882814, 0.22100889587402345, 0.2212843475341797, 0.22189056396484375, 0.2212884521484375, 0.2211727294921875, 0.22092083740234375, 0.22090956115722657, 0.2211768341064453, 0.2210160675048828, 0.22107545471191406, 0.22154035949707032, 0.22111436462402342, 0.22106521606445312, 0.22105599975585938, 0.22096998596191406, 0.22127615356445313, 0.22098329162597657, 0.22110617065429689, 0.2209003448486328, 0.22102117919921874, 0.22100376892089843, 0.2210365447998047, 0.22115635681152343, 0.2209239044189453, 0.22113996887207032, 0.22109490966796874, 0.22142259216308594, 0.22121983337402343, 0.22105599975585938, 0.2209105987548828, 0.221159423828125, 0.22130482482910158, 0.22137651062011718, 0.22106521606445312, 0.22213119506835938, 0.22088088989257812, 0.2209105987548828, 0.2210150451660156, 0.22098739624023436, 0.22129458618164063, 0.22125363159179687, 0.22113894653320312, 0.22119833374023437, 0.2218956756591797, 0.22099250793457031, 0.22140518188476563, 0.22130892944335936, 0.22184857177734374, 0.22106930541992187, 0.22177484130859376, 0.22129254150390626, 0.22115635681152343, 0.22096485900878907, 0.22116864013671875, 0.22148915100097658, 0.22126797485351563, 0.221384765625, 0.2214911346435547, 0.22094540405273438, 0.45877044677734374, 0.2209187774658203, 0.22111744689941407, 0.22087065124511718, 0.22097509765625, 0.22129971313476562, 0.2210744323730469, 0.2212843475341797, 0.22100991821289062, 0.22094744873046876, 0.2208368682861328, 0.22104473876953126, 0.22097305297851563, 0.2210682830810547, 0.2209914855957031, 0.2208757781982422, 0.2209628143310547, 0.2210426940917969, 0.22097509765625, 0.22097203063964843, 0.22091468811035156, 0.22095872497558594, 0.22083993530273438, 0.22126591491699218, 0.2211420135498047, 0.22217318725585938, 0.22125978088378906, 0.22093618774414062, 0.2212351989746094, 0.22171852111816406, 0.22144717407226563, 0.22102323913574218, 0.221486083984375, 0.220980224609375, 0.2209935302734375, 0.2213939208984375, 0.22103858947753907, 0.22095462036132812, 0.22107647705078126, 0.22094744873046876, 0.22103858947753907, 0.22127206420898438, 0.22109286499023437, 0.2211461181640625, 0.22218240356445312, 0.22110719299316406, 0.2209812469482422, 0.22119833374023437, 0.22108876037597655, 0.22112870788574218, 0.22101913452148436, 0.22104678344726564, 0.2209669189453125, 0.22112870788574218, 0.2210682830810547, 0.22112051391601562, 0.22107034301757814, 0.22104473876953126, 0.22089421081542968, 0.221011962890625, 0.2210160675048828, 0.2210846710205078, 0.2209628143310547, 0.45941656494140626, 0.2209495086669922, 0.221739013671875, 0.2209863739013672, 0.22102117919921874, 0.221048828125, 0.22226022338867188, 0.22110617065429689, 0.2219622344970703, 0.22180557250976562, 0.22233804321289063, 0.2211041259765625, 0.22118911743164063, 0.22098329162597657, 0.2222192687988281, 0.22136012268066407, 0.22214041137695312, 0.2213079071044922, 0.22128947448730468, 0.22100787353515625, 0.221127685546875, 0.22152703857421874, 0.2220943298339844, 0.2211031036376953, 0.22113792419433595, 0.22100889587402345, 0.22107034301757814, 0.22086758422851563, 0.2210048065185547, 0.22096588134765624, 0.2210846710205078, 0.22087680053710937, 0.221154296875, 0.22088607788085937, 0.22132730102539064, 0.2210529327392578, 0.22137548828125, 0.22103450012207032, 0.22103450012207032, 0.22075392150878906, 0.22111949157714844, 0.2211420135498047, 0.2211461181640625, 0.2211041259765625, 0.22117990112304686, 0.2209812469482422, 0.22081843566894532, 0.22104371643066406, 0.22111949157714844, 0.22109286499023437, 0.22118502807617188, 0.22213119506835938, 0.22129049682617188, 0.22118092346191406, 0.22175027465820313, 0.2221670379638672, 0.22145330810546876, 0.221517822265625, 0.22109490966796874, 0.22186087036132812, 0.2214615020751953, 0.22115122985839844, 0.22144102478027344, 0.46024600219726564, 0.2209976348876953, 0.22151475524902345, 0.22144717407226563, 0.2214246368408203, 0.22156083679199218, 0.2218076171875, 0.22113690185546875, 0.2215905303955078, 0.22111949157714844, 0.2214993896484375, 0.22210354614257813, 0.221412353515625, 0.22127104187011717, 0.22124339294433593, 0.22104165649414062, 0.22167552185058595, 0.22163148498535157, 0.2216048583984375, 0.22217523193359376, 0.22170419311523437, 0.22118092346191406, 0.22107034301757814, 0.22108876037597655, 0.22134988403320313, 0.22161715698242188, 0.22139698791503906, 0.22093209838867187, 0.22118707275390626, 0.22141542053222657, 0.22160383605957032, 0.2212833251953125, 0.22284288024902343, 0.2227220458984375, 0.22272000122070312, 0.22289511108398438, 0.22466764831542968, 0.2222950439453125, 0.22271078491210938, 0.22245887756347657, 0.22309580993652345, 0.2229698486328125, 0.22274867248535157, 0.22293504333496095, 0.22252748107910156, 0.22249267578125, 0.22271795654296875, 0.22294834899902344, 0.2227640380859375, 0.22101913452148436, 0.2211420135498047, 0.22163967895507813, 0.22111949157714844, 0.22116761779785157, 0.2211420135498047, 0.22103347778320312, 0.22134066772460936, 0.22165606689453124, 0.22108979797363282, 0.22110823059082033, 0.2213621826171875, 0.22120448303222656, 0.22122802734375, 0.46055218505859374, 0.22127308654785155, 0.22119218444824218, 0.22088088989257812, 0.22082456970214845, 0.22128128051757812, 0.22141439819335937, 0.22119935607910157, 0.22096383666992186, 0.22105804443359375, 0.22094137573242187, 0.2210590057373047, 0.22101094055175782, 0.22181581115722657, 0.22244248962402344, 0.22087271118164062, 0.22137753295898438, 0.22102732849121093, 0.2222520294189453, 0.22111129760742188, 0.22082662963867186, 0.22154853820800782, 0.22087986755371095, 0.2212034606933594, 0.22097920227050782, 0.22116659545898437, 0.22161100769042968, 0.22113075256347656, 0.2215413818359375, 0.22253465270996095, 0.22111949157714844, 0.2211102752685547, 0.2211788787841797, 0.22141952514648439, 0.22129766845703125, 0.22112562561035157, 0.22104063415527345, 0.22099046325683594, 0.22102835083007813, 0.22090444946289062, 0.22094438171386718, 0.2210181121826172, 0.22109907531738282, 0.22128121948242188, 0.22107647705078126, 0.22143795776367187, 0.2210846710205078, 0.22113792419433595, 0.22100274658203126, 0.22100889587402345, 0.2211266632080078, 0.22113587951660157, 0.2213580780029297, 0.2212833251953125, 0.221154296875, 0.22113587951660157, 0.2210672607421875, 0.22181581115722657, 0.22194586181640624, 0.22132325744628906, 0.22108572387695313, 0.22134576416015625, 0.22117170715332032]",tokens/s,4.448640633764008,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1207.41888,879.230976,0.0,232.783872,169.719808,s,10,0.3329092750549316,0.03329092750549316,0.001090057705435725,0.0329870719909668,0.033882015228271486,0.0350985279083252,0.036071738052368164,"[0.03631504058837891, 0.03281526565551758, 0.03245974349975586, 0.03361167907714844, 0.03344601440429688, 0.033440895080566406, 0.032671966552734376, 0.033158878326416015, 0.03245904159545898, 0.0325307502746582]",tokens/s,7689.782748100329,kWh,3.9167167718924527e-07,2.1461758953218489e-07,8.304311279209363e-07,1.4367203946423665e-06,tokens/kWh,178183591.57052574,MB,1207.41888,879.230976,0.0,232.783872,199.792128,s,10,20.506330566406252,2.050633056640625,0.01795433438952847,2.0542513427734375,2.07115517578125,2.0720841796875,2.0728273828125,"[2.05074072265625, 2.0363714599609377, 2.0684541015625, 2.054240966796875, 2.07094873046875, 2.07301318359375, 2.05426171875, 2.0559150390625, 2.0203577880859376, 2.02202685546875]",tokens/s,30.72222004613905,kWh,2.4533342500768937e-05,1.3444361220557862e-05,5.009960961606961e-05,8.80773133373964e-05,tokens/kWh,715280.6734540922,,s,629,20.768719875335666,0.03301863255220301,0.003999927492502233,0.03273830413818359,0.033139096832275394,0.033337548065185546,0.06495629302978514,"[0.033740798950195314, 0.034253822326660154, 0.0341104621887207, 0.03412275314331055, 0.0335206413269043, 0.03323699188232422, 0.03351859283447266, 0.03322982406616211, 0.033159168243408206, 0.03280998229980469, 0.03333222579956055, 0.033258495330810545, 0.032756736755371094, 0.03297075271606445, 0.03364352035522461, 0.03362918472290039, 0.033584129333496096, 0.03300249481201172, 0.03202867126464844, 0.031971328735351565, 0.031821823120117186, 0.031848447799682614, 0.03192934417724609, 0.03202560043334961, 0.03206041717529297, 0.032089088439941404, 0.03197644805908203, 0.03203379058837891, 0.03201638412475586, 0.031941631317138675, 0.03214438247680664, 0.03217407989501953, 0.03201638412475586, 0.031987712860107424, 0.03198054313659668, 0.03194572830200195, 0.03189145660400391, 0.03199590492248535, 0.03189043235778809, 0.03177676773071289, 0.032366592407226565, 0.03329228973388672, 0.0331960334777832, 0.0321341438293457, 0.032966655731201173, 0.03283865737915039, 0.03283456039428711, 0.032866302490234374, 0.032574462890625, 0.03194470405578613, 0.032132095336914065, 0.032039936065673826, 0.03172454452514648, 0.03170099258422852, 0.03186278343200684, 0.03212492752075195, 0.031920127868652344, 0.032054271697998044, 0.03213926315307617, 0.03209830474853516, 0.03199795150756836, 0.03198873519897461, 0.06604799652099609, 0.031955968856811526, 0.03163852882385254, 0.03178700828552246, 0.03185971260070801, 0.03197337532043457, 0.03201945495605469, 0.031987712860107424, 0.032059391021728514, 0.03199795150756836, 0.03196211242675781, 0.03191500854492187, 0.03182694435119629, 0.031764480590820314, 0.031714303970336914, 0.0317573127746582, 0.031927295684814457, 0.03189145660400391, 0.03197235107421875, 0.03200921630859375, 0.03191500854492187, 0.03215462493896484, 0.03199795150756836, 0.03201126480102539, 0.031936511993408204, 0.03203071975708008, 0.03208294296264649, 0.031936511993408204, 0.032161792755126956, 0.03201331329345703, 0.031562751770019534, 0.03183923149108887, 0.0325928955078125, 0.03195187187194824, 0.03276287841796875, 0.03203276824951172, 0.03358924865722656, 0.032881664276123046, 0.033119232177734374, 0.03294617462158203, 0.03283251190185547, 0.03209011077880859, 0.032072704315185545, 0.03290934371948242, 0.03280073547363281, 0.032763904571533206, 0.03282329559326172, 0.03290419387817383, 0.032740352630615234, 0.032478206634521486, 0.03298611068725586, 0.03275059127807617, 0.033102848052978515, 0.03289395141601562, 0.03265331268310547, 0.032115745544433597, 0.03197641563415527, 0.03259494400024414, 0.03297894287109375, 0.03300044631958008, 0.03283865737915039, 0.03288780975341797, 0.03242803192138672, 0.06496460723876953, 0.03309568023681641, 0.03194470405578613, 0.03273830413818359, 0.03288678359985352, 0.03281919860839844, 0.03297689437866211, 0.032912384033203124, 0.032787487030029296, 0.033037311553955076, 0.03294307327270508, 0.032863231658935545, 0.03278847885131836, 0.03306496047973633, 0.032745471954345705, 0.03307212829589844, 0.03291852951049805, 0.03285094451904297, 0.03298406219482422, 0.03316223907470703, 0.03301683044433594, 0.0328611831665039, 0.03299532699584961, 0.03296051025390625, 0.03287449645996094, 0.0329615364074707, 0.03278745651245117, 0.032949249267578126, 0.033023998260498046, 0.03287449645996094, 0.032909313201904294, 0.03280179214477539, 0.0328089599609375, 0.03198464012145996, 0.032054271697998044, 0.03240140914916992, 0.03320729446411133, 0.033165313720703124, 0.03290726470947265, 0.032584705352783204, 0.032707584381103515, 0.032249855041503905, 0.03290521621704102, 0.0328458251953125, 0.032791553497314455, 0.03254579162597656, 0.03258879852294922, 0.03239424133300781, 0.03282227325439453, 0.032894977569580076, 0.03285811233520508, 0.03289708709716797, 0.032930816650390625, 0.03297683334350586, 0.033140735626220705, 0.032868350982666016, 0.03292364883422851, 0.032985088348388675, 0.033018878936767575, 0.0329431037902832, 0.032927745819091796, 0.032976993560791014, 0.03305363082885742, 0.06707913970947266, 0.03323494338989258, 0.03279257583618164, 0.033137664794921876, 0.0331776008605957, 0.03303014373779297, 0.033274879455566404, 0.0330967025756836, 0.033137664794921876, 0.03282534408569336, 0.03309056091308594, 0.033081344604492184, 0.03303424072265625, 0.033309696197509765, 0.03319500732421875, 0.03301171112060547, 0.033345535278320314, 0.03285094451904297, 0.03292876815795898, 0.03282124710083008, 0.03200307083129883, 0.0319866886138916, 0.032074752807617186, 0.031974399566650394, 0.03197644805908203, 0.03283967971801758, 0.03339263916015625, 0.033058815002441407, 0.032753662109375, 0.03252326583862305, 0.03210553741455078, 0.031921152114868165, 0.03210540771484375, 0.0323768310546875, 0.03219968032836914, 0.0321976318359375, 0.03212799835205078, 0.03213516616821289, 0.03212799835205078, 0.03211775970458984, 0.032985088348388675, 0.034108417510986325, 0.033040382385253905, 0.03294617462158203, 0.03294105529785156, 0.032906238555908206, 0.032069633483886716, 0.031744064331054686, 0.03175724792480469, 0.031899648666381834, 0.03206655883789063, 0.03191296005249023, 0.03192934417724609, 0.032126976013183595, 0.03269740676879883, 0.03293075180053711, 0.03260313415527344, 0.03196211242675781, 0.031904767990112305, 0.03212287902832031, 0.03203788757324219, 0.03282944107055664, 0.03291033554077148, 0.06675456237792969, 0.033050624847412106, 0.0328007698059082, 0.03285299301147461, 0.0328089599609375, 0.03288883209228516, 0.033067008972167966, 0.03282022476196289, 0.03274649429321289, 0.03173785591125488, 0.032020481109619144, 0.03192428779602051, 0.03253753662109375, 0.03285606384277344, 0.033326080322265625, 0.03281919860839844, 0.033073150634765625, 0.03273830413818359, 0.03295129776000977, 0.03292364883422851, 0.03303628921508789, 0.03282841491699219, 0.032919551849365236, 0.03291961669921875, 0.032923583984375, 0.032797695159912106, 0.03295948791503906, 0.03298303985595703, 0.032982078552246094, 0.03352467346191406, 0.032589824676513675, 0.033331199645996096, 0.03296255874633789, 0.032672767639160154, 0.032740352630615234, 0.03300864028930664, 0.03226521682739258, 0.03242803192138672, 0.03284275054931641, 0.03410636901855469, 0.03410943984985351, 0.032530433654785154, 0.032048126220703126, 0.032909313201904294, 0.03273625564575195, 0.03282022476196289, 0.033040382385253905, 0.03312844848632813, 0.033032222747802736, 0.03294512176513672, 0.032824321746826174, 0.032772159576416014, 0.032817089080810546, 0.03304959869384766, 0.032717823028564456, 0.0328007698059082, 0.03302195358276367, 0.03298713684082031, 0.03286937713623047, 0.032884735107421875, 0.032917503356933595, 0.03312947082519531, 0.0328243522644043, 0.06682825469970703, 0.032939006805419925, 0.03280998229980469, 0.0324136962890625, 0.032930816650390625, 0.03313868713378906, 0.03297382354736328, 0.032863231658935545, 0.03299327850341797, 0.03307212829589844, 0.03317145538330078, 0.033142784118652346, 0.032942081451416014, 0.033363967895507815, 0.03327897644042969, 0.03276595306396484, 0.03296255874633789, 0.033056766510009765, 0.03283967971801758, 0.03291648101806641, 0.03299225616455078, 0.032946239471435546, 0.032924606323242185, 0.03313868713378906, 0.03290521621704102, 0.03295641708374023, 0.03341516876220703, 0.0333199348449707, 0.03459379196166992, 0.03333631896972656, 0.032895999908447264, 0.03294412612915039, 0.03291648101806641, 0.03218227386474609, 0.032626686096191404, 0.0323061752319336, 0.03225804901123047, 0.03283148956298828, 0.033329151153564454, 0.03292879867553711, 0.03304956817626953, 0.03294822311401367, 0.03261030578613281, 0.03294105529785156, 0.032903167724609376, 0.03311206436157227, 0.03312025451660156, 0.03289190292358399, 0.03233280181884766, 0.032524288177490236, 0.03305779266357422, 0.03231129455566406, 0.0324505615234375, 0.03278950500488281, 0.03302707290649414, 0.03288678359985352, 0.032996353149414064, 0.03286937713623047, 0.03303014373779297, 0.03281203079223633, 0.03252633666992188, 0.032178176879882815, 0.03199084854125977, 0.065176513671875, 0.03205017471313477, 0.03209011077880859, 0.03212287902832031, 0.03207987213134766, 0.03216588973999023, 0.03170816040039062, 0.03165286445617676, 0.03220684814453125, 0.03201228713989258, 0.03200614547729492, 0.03260927963256836, 0.03219046401977539, 0.03249868774414062, 0.03227340698242188, 0.03234201431274414, 0.03262464141845703, 0.03326873779296875, 0.03293286514282227, 0.032851966857910156, 0.032830463409423825, 0.03336908721923828, 0.03311824035644531, 0.03295331192016602, 0.032176128387451174, 0.03207680130004883, 0.032168991088867185, 0.032793567657470706, 0.03295641708374023, 0.032803871154785155, 0.03245257568359375, 0.03248025512695313, 0.03281612777709961, 0.03295129776000977, 0.033175552368164066, 0.03283359909057617, 0.03288671875, 0.03277721786499024, 0.033051647186279294, 0.033258495330810545, 0.03284275054931641, 0.03292364883422851, 0.03303014373779297, 0.03285504150390625, 0.03174399948120117, 0.03240857696533203, 0.033037311553955076, 0.03285299301147461, 0.032894977569580076, 0.032917503356933595, 0.0321341438293457, 0.03308031845092774, 0.032146430969238284, 0.03266559982299805, 0.03310899353027344, 0.032740352630615234, 0.03243212890625, 0.03290828704833984, 0.032982017517089846, 0.032979969024658204, 0.032942081451416014, 0.032345088958740234, 0.03220172882080078, 0.06660710144042968, 0.032917537689208985, 0.032905185699462894, 0.032873470306396486, 0.033800193786621094, 0.0329615364074707, 0.03261542510986328, 0.03257753753662109, 0.0329881591796875, 0.03291545486450195, 0.032912384033203124, 0.032797695159912106, 0.03293183898925781, 0.033181697845458984, 0.03299020767211914, 0.03279564666748047, 0.03290726470947265, 0.03288883209228516, 0.03297177505493164, 0.03243929672241211, 0.032846847534179685, 0.03225600051879883, 0.03281510543823242, 0.033083393096923826, 0.0330332145690918, 0.032736320495605466, 0.032817089080810546, 0.03186278343200684, 0.03185766410827637, 0.03318483352661133, 0.03217606353759766, 0.03176038360595703, 0.032323585510253904, 0.03279052734375, 0.0322949104309082, 0.032471038818359374, 0.03288780975341797, 0.03274140930175781, 0.03304035186767578, 0.03282841491699219, 0.031971328735351565, 0.03230003356933594, 0.03287551879882813, 0.032753662109375, 0.03289907073974609, 0.03249356842041016, 0.03194367980957031, 0.0321710090637207, 0.03300352096557617, 0.03274649429321289, 0.0328540153503418, 0.03268710327148437, 0.03202764892578125, 0.032950271606445314, 0.03294515228271484, 0.03292671966552734, 0.03265740966796875, 0.03194675254821777, 0.03184127998352051, 0.03206860733032227, 0.03196108818054199, 0.03163750457763672, 0.031665151596069335, 0.0648058853149414, 0.03185663986206055, 0.031904767990112305, 0.031893503189086916, 0.032007167816162106, 0.0315729923248291, 0.031927295684814457, 0.03198259162902832, 0.0319682559967041, 0.03189967918395996, 0.03205014419555664, 0.031955968856811526, 0.03200614547729492, 0.03189657592773437, 0.03188121604919433, 0.03202150344848633, 0.0321638412475586, 0.031991840362548825, 0.03199894332885742, 0.03193343925476074, 0.03197542381286621, 0.0318525447845459, 0.03199078369140625, 0.03202252960205078, 0.03216281509399414, 0.03228876876831055, 0.032105472564697264, 0.0329697265625, 0.03331584167480469, 0.031987712860107424, 0.03201638412475586, 0.03187820816040039, 0.03171219253540039, 0.032074752807617186, 0.03211372756958008, 0.03188115119934082, 0.031936511993408204, 0.032328704833984374, 0.032024574279785153, 0.03211980819702148, 0.031898624420166014, 0.03198566436767578, 0.03184332847595215, 0.032132095336914065, 0.031905792236328126, 0.031921152114868165, 0.03194777679443359, 0.03189145660400391, 0.03162112045288086, 0.03145011138916016, 0.03202867126464844, 0.03241164779663086, 0.03273932647705078, 0.03275980758666992, 0.0327086067199707, 0.032747520446777346, 0.032508926391601564, 0.031954944610595705, 0.03173785591125488, 0.03213926315307617, 0.03191910362243652, 0.032194561004638675, 0.03189452743530274, 0.0649349136352539, 0.032023551940917966, 0.03198566436767578, 0.03202560043334961, 0.03201126480102539, 0.03183616065979004, 0.031854591369628905, 0.0317573127746582, 0.031936511993408204, 0.031903743743896484, 0.03194675254821777, 0.03200511932373047, 0.031974399566650394, 0.03211161422729492, 0.031942655563354495, 0.0321638412475586, 0.03193446350097656, 0.03189145660400391, 0.03212799835205078, 0.032, 0.03198361587524414, 0.031937536239624024, 0.032105472564697264, 0.03191193580627441, 0.031916032791137694, 0.03271680068969727, 0.0333383674621582, 0.03406950378417969, 0.03317657470703125, 0.03253247833251953, 0.03198975944519043, 0.03277926254272461, 0.03240140914916992, 0.03290521621704102, 0.03309465789794922, 0.032075775146484374, 0.03243110275268555, 0.03205017471313477, 0.03197747230529785, 0.03197235107421875, 0.031908863067626955, 0.03199283218383789, 0.03201126480102539, 0.03172659111022949, 0.03145113563537598, 0.031459327697753905, 0.031643648147583005, 0.03180441665649414, 0.032189441680908204, 0.031916032791137694, 0.031955968856811526, 0.031971328735351565, 0.03196723175048828, 0.03189657592773437, 0.031959039688110355, 0.03171327972412109, 0.031732736587524416, 0.03177369689941406, 0.032056320190429685, 0.03193343925476074, 0.03195084762573242, 0.03191910362243652, 0.03195084762573242]",tokens/s,30.28593017651422,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa23-5541cdc42e463ac8623afa38;c5f6519e-d1d1-4373-8dd2-26816c758a44) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1662.808064,5516.034048,0.0,4869.586944,4743.593472,s,10,6.1237835083007806,0.6123783508300781,0.001253925145814982,0.6120383911132812,0.6140244384765625,0.6141366088867187,0.6142263452148438,"[0.6138787841796876, 0.614248779296875, 0.6107066650390625, 0.61129736328125, 0.6111465454101562, 0.6114146728515625, 0.6116507568359375, 0.612426025390625, 0.61399951171875, 0.613014404296875]",tokens/s,418.0422114089963,kWh,7.21943411562178e-06,3.95425479798766e-06,3.477342651153299e-05,4.594711542514243e-05,tokens/kWh,5571622.889299289,MB,1662.808064,5516.034048,0.0,4869.586944,4769.651712,s,10,360.87087890624997,36.087087890625,0.01748383820965051,36.082986328125,36.11191953125,36.119573046875,36.125695859375,"[36.11021875, 36.1272265625, 36.08849609375, 36.07920703125, 36.0845625, 36.08481640625, 36.08141015625, 36.0650625, 36.07158203125, 36.078296875]",tokens/s,1.7457767773045123,kWh,0.00042595356252458363,0.00023346088450391426,0.0019820382702698735,0.0026414527172983716,tokens/kWh,23850.51210170259,,s,629,365.80599053955063,0.5815675525271077,0.07276675093795772,0.5726207885742187,0.5739550537109376,0.5744299926757812,1.1846730029296875,"[0.5736171264648438, 0.573517822265625, 0.5738854370117188, 0.57415576171875, 0.5742704467773437, 0.5731041259765625, 0.57312255859375, 0.5737461547851562, 0.5739632568359375, 0.5725850219726563, 0.5728665161132812, 0.5735147705078125, 0.5726494750976563, 0.573000732421875, 0.57260546875, 0.5726546020507812, 0.572073974609375, 0.5728809204101563, 0.572885986328125, 0.5725767822265625, 0.5731676025390625, 0.5745838012695312, 0.5729105834960937, 0.5730211791992188, 0.5729566650390625, 0.5723678588867187, 0.5724866333007812, 0.5726197509765625, 0.5735526123046875, 0.5740676879882812, 0.5729720458984375, 0.5732095947265625, 0.5726760864257813, 0.5724497680664062, 0.5724518432617187, 0.572790771484375, 0.5723668823242187, 0.5727159423828125, 0.5731563720703124, 0.5741424560546875, 0.5723576049804687, 0.5724334106445312, 0.5723883666992188, 0.5724548950195313, 0.5725368041992187, 0.5722142944335937, 0.5739407348632812, 0.5740206298828125, 0.572927001953125, 0.573212646484375, 0.5730181274414062, 0.5728184204101563, 0.5730816040039063, 0.5726177368164063, 0.572484619140625, 0.5727293701171875, 0.5740543823242188, 0.5732669677734376, 0.57483056640625, 0.574424072265625, 0.5744517211914062, 0.5744885864257813, 1.18734130859375, 0.5729505004882812, 0.5725030517578125, 0.5725654907226563, 0.5738137817382812, 0.572379150390625, 0.5741465454101562, 0.574750732421875, 0.573475830078125, 0.5734666137695312, 0.5736826782226563, 0.5750292358398438, 0.5742151489257813, 0.5739274291992188, 0.5746913452148438, 0.5744281616210938, 0.5742459106445312, 0.5746903076171875, 0.5750845336914062, 0.5739735107421875, 0.5736888427734375, 0.574476318359375, 0.5740001220703125, 0.573032470703125, 0.5722992553710937, 0.57221630859375, 0.571994140625, 0.573065185546875, 0.573844482421875, 0.5739673461914062, 0.5742745361328125, 0.5743206787109375, 0.5743236694335937, 0.5738577880859375, 0.5742110595703125, 0.5753764038085938, 0.5745797119140625, 0.57402880859375, 0.5725982666015625, 0.5732260131835938, 0.5737594604492188, 0.5739530029296875, 0.5731553344726562, 0.572927978515625, 0.57257470703125, 0.5744312133789062, 0.5755975952148438, 0.5727283325195313, 0.5721149291992188, 0.572199951171875, 0.5723617553710938, 0.572105712890625, 0.57270068359375, 0.5733294067382813, 0.5723873291015625, 0.57191015625, 0.573065185546875, 0.5727866821289063, 0.5724027099609375, 0.5726883544921875, 0.5723258666992187, 0.5720657958984375, 0.5731727294921874, 1.1836395263671875, 0.5721917724609376, 0.5728533325195313, 0.5732484741210937, 0.5725409545898438, 0.5732628784179687, 0.5741773071289062, 0.5730109252929687, 0.5731737670898438, 0.573043701171875, 0.5734686889648437, 0.5732505493164063, 0.5726914672851563, 0.5729924926757812, 0.5729525756835937, 0.5732260131835938, 0.574719970703125, 0.5727836303710937, 0.5730283813476562, 0.5724508056640625, 0.5733519287109375, 0.5728767700195313, 0.5725736694335938, 0.5743431396484375, 0.5745357055664062, 0.573507568359375, 0.572695556640625, 0.5729177856445312, 0.573022216796875, 0.5733980102539062, 0.5731030883789062, 0.5730037841796874, 0.572927978515625, 0.5738895263671875, 0.573137939453125, 0.57310107421875, 0.5718549194335938, 0.5721885986328125, 0.57208935546875, 0.5725255737304688, 0.5722398681640625, 0.5733836669921875, 0.5725736694335938, 0.57238525390625, 0.5722880249023438, 0.5720064086914063, 0.5721456909179687, 0.57385986328125, 0.5721773681640625, 0.5720176391601562, 0.5720484008789063, 0.5730570068359375, 0.5720934448242188, 0.5720852661132813, 0.5719961547851562, 0.5719992065429688, 0.5727928466796876, 0.5721221313476562, 0.5721630859375, 0.572600341796875, 0.5723125610351563, 0.5724631958007812, 0.5727794799804687, 1.1843287353515626, 0.572779541015625, 0.57261669921875, 0.5734041748046875, 0.5739274291992188, 0.57350146484375, 0.57328125, 0.5731553344726562, 0.5729392700195313, 0.5729403076171875, 0.5732833251953126, 0.57289013671875, 0.5743707885742187, 0.5724887084960938, 0.5720043334960937, 0.5718681640625, 0.5722941284179688, 0.5721036987304687, 0.5721763916015625, 0.57200537109375, 0.5717247924804687, 0.572310546875, 0.5729740600585937, 0.5724682006835937, 0.5719664916992188, 0.5723975830078125, 0.5722286376953125, 0.5723289794921875, 0.572221435546875, 0.5728389282226563, 0.5737000732421875, 0.5728737182617187, 0.5730037841796874, 0.572178466796875, 0.5720411376953125, 0.5717974853515625, 0.5721036987304687, 0.5720719604492187, 0.5719695434570312, 0.5730037841796874, 0.5722941284179688, 0.5718763427734375, 0.5725706176757812, 0.5727713012695312, 0.5752719116210937, 0.5729822998046875, 0.5730355224609375, 0.5741107177734375, 0.5727109375, 0.5726207885742187, 0.5727815551757812, 0.5729136352539063, 0.572896240234375, 0.572600341796875, 0.5730191650390625, 0.5724456787109375, 0.573222900390625, 0.57379736328125, 0.5722828979492187, 0.572052490234375, 0.5719766845703125, 0.5719429321289062, 0.5717380981445312, 1.1848990478515624, 0.5739724731445313, 0.5733632202148438, 0.5733693237304688, 0.5734880981445313, 0.5738117065429688, 0.5729403076171875, 0.5730672607421875, 0.5729863891601562, 0.5730140380859375, 0.57394482421875, 0.5726064453125, 0.5722265625, 0.5720391845703126, 0.5731246337890625, 0.5720433349609375, 0.57220703125, 0.5742510375976563, 0.5736365966796875, 0.572663818359375, 0.5723699340820313, 0.5724794921875, 0.5720340576171875, 0.5722705688476563, 0.5724047241210938, 0.57236376953125, 0.5725368041992187, 0.5725255737304688, 0.5728256225585937, 0.5726392211914062, 0.5721354370117188, 0.5721804809570312, 0.5721774291992188, 0.5723484497070312, 0.5721793823242187, 0.572990478515625, 0.5730048217773438, 0.5721558837890625, 0.572242919921875, 0.5721978759765625, 0.5722470703125, 0.5720596313476562, 0.5724047241210938, 0.5724436645507812, 0.5722009887695313, 0.5737861328125, 0.57619970703125, 0.5726668701171875, 0.5722962036132813, 0.5730099487304687, 0.5725439453125, 0.5726085205078125, 0.5728368530273438, 0.5743206176757812, 0.5726044311523437, 0.5726791381835937, 0.5723678588867187, 0.5721937866210938, 0.572822509765625, 0.5727109375, 0.5722070922851562, 0.5722654418945312, 0.5724968872070313, 1.1851029052734374, 0.5723607177734376, 0.57244775390625, 0.5727620849609375, 0.5731502075195313, 0.5729341430664062, 0.5733734130859375, 0.5732301025390625, 0.572663818359375, 0.5735577392578125, 0.5724036865234375, 0.5722737426757812, 0.5723555297851562, 0.5722726440429687, 0.5723494262695312, 0.5725409545898438, 0.5730109252929687, 0.57223681640625, 0.5723402099609375, 0.5722613525390625, 0.572516357421875, 0.5727579956054687, 0.5724467163085938, 0.572221435546875, 0.5731195068359375, 0.5724169921875, 0.5723781127929688, 0.5720811767578124, 0.5718343505859375, 0.5718425903320312, 0.57302734375, 0.5730027465820312, 0.5725501708984375, 0.5730252685546875, 0.57335498046875, 0.5724067993164063, 0.5720145874023438, 0.5724354858398437, 0.5753599853515625, 0.572169189453125, 0.5720125732421875, 0.5726105346679687, 0.5732147216796875, 0.57269970703125, 0.5723483276367187, 0.572010498046875, 0.5722890014648437, 0.5727958984375, 0.5723873901367188, 0.57226953125, 0.5728460693359375, 0.57364892578125, 0.5736980590820312, 0.5732618408203125, 0.5729075317382812, 0.5730396118164063, 0.5730027465820312, 0.5737277221679687, 0.5737984008789062, 0.574740478515625, 0.5733519287109375, 0.5725675659179688, 0.572537841796875, 1.184806884765625, 0.5725839233398438, 0.5726617431640625, 0.573106201171875, 0.5745018920898437, 0.5735618286132812, 0.573259765625, 0.5744578857421875, 0.5749258422851562, 0.5728265991210938, 0.5725081787109375, 0.5727150268554687, 0.5735505981445312, 0.573075439453125, 0.5722562255859375, 0.57212109375, 0.5720760498046875, 0.5723033447265625, 0.5736908569335938, 0.5737195434570312, 0.5725010375976562, 0.57371337890625, 0.5728123168945313, 0.5738147583007812, 0.571978759765625, 0.5718763427734375, 0.5724252319335937, 0.5723299560546875, 0.5721712646484375, 0.5726679077148438, 0.57254296875, 0.5723995971679687, 0.5719193725585937, 0.5722777709960938, 0.571779052734375, 0.5716900024414062, 0.5719306030273438, 0.5717493896484375, 0.571821044921875, 0.5734307861328125, 0.5732904663085937, 0.5728604125976563, 0.572748779296875, 0.57318603515625, 0.5727365112304688, 0.5732413330078125, 0.57249072265625, 0.5729822998046875, 0.573169677734375, 0.5721641235351562, 0.5722808227539062, 0.5722900390625, 0.57253173828125, 0.5722101440429688, 0.5723197631835938, 0.5721200561523437, 0.5727498168945313, 0.5729525756835937, 0.5725030517578125, 0.5723596801757812, 0.5721190185546875, 0.5728788452148438, 0.57232177734375, 1.18618115234375, 0.5729382934570313, 0.5724763793945312, 0.5728051147460937, 0.572410888671875, 0.5722828979492187, 0.5726187744140625, 0.57223681640625, 0.5720278930664062, 0.5722142944335937, 0.5735720825195313, 0.5724139404296875, 0.5722695922851563, 0.5720924072265625, 0.572410888671875, 0.5720698852539062, 0.5738741455078125, 0.5721026611328125, 0.5729136352539063, 0.5728265991210938, 0.5724375, 0.5722224731445312, 0.57202587890625, 0.573212646484375, 0.572484619140625, 0.5720780639648437, 0.57230859375, 0.57369384765625, 0.5728798828125, 0.572020751953125, 0.5721231079101563, 0.5717554931640625, 0.572156982421875, 0.5722654418945312, 0.5718435668945312, 0.5726340942382813, 0.573137939453125, 0.5722542114257813, 0.5726156616210938, 0.572095458984375, 0.572031982421875, 0.5723658447265625, 0.5724303588867188, 0.5722726440429687, 0.5725307006835938, 0.5731655883789063, 0.5721272583007813, 0.5720043334960937, 0.572042236328125, 0.572242919921875, 0.5721856079101563, 0.5724548950195313, 0.5724661865234375, 0.5730764770507812, 0.5732301025390625, 0.5725450439453125, 0.5724866333007812, 0.5721734008789062, 0.5719572143554688, 0.5721549072265625, 0.572000244140625, 0.5720064086914063, 0.5725757446289063, 1.1873709716796874, 0.5724487915039063, 0.5732085571289063, 0.572284912109375, 0.5727989501953125, 0.5728409423828125, 0.5729740600585937, 0.573497314453125, 0.572516357421875, 0.572705810546875, 0.5721886596679687, 0.5721958618164062, 0.5724036865234375, 0.5723381958007813, 0.57270068359375, 0.5734564208984375, 0.5733170776367188, 0.5739089965820312, 0.5724610595703125, 0.5728818969726562, 0.5730293579101563, 0.57270068359375, 0.572495849609375, 0.5729935302734375, 0.5724405517578125, 0.572822509765625, 0.5726105346679687, 0.572315673828125, 0.5717718505859375, 0.5719408569335938, 0.5722603759765625, 0.5722347412109375, 0.5724713134765625, 0.5740123901367188, 0.5722296142578125, 0.5719521484375, 0.572314697265625, 0.572317626953125, 0.5720811767578124, 0.5719705810546875, 0.5717545166015625, 0.5723504638671875, 0.5726515502929688, 0.5724016723632812, 0.5723504638671875, 0.572231689453125, 0.572205078125, 0.5721190185546875, 0.572031982421875, 0.5724467163085938, 0.573043701171875, 0.5730468139648438, 0.5726412353515625, 0.5725030517578125, 0.5725399169921875, 0.5724713134765625, 0.5720606689453125, 0.5723914184570312, 0.5728767700195313, 0.573179931640625, 0.5726597290039063, 0.572295166015625, 0.5719317016601563, 1.1857969970703126, 0.5727313842773437, 0.572368896484375, 0.572347412109375, 0.5732669677734376, 0.5725573120117188, 0.5721907348632812, 0.5723095092773437, 0.5727498168945313, 0.5725081787109375, 0.5725491333007813, 0.5726720581054687, 0.5733457641601563, 0.5733345336914063, 0.5721354370117188, 0.5723043823242188, 0.5719183349609375, 0.5717770385742188, 0.5724968872070313, 0.5726105346679687, 0.573053955078125, 0.5741567993164063, 0.5731102905273437, 0.5720698852539062, 0.5720145874023438, 0.57215185546875, 0.574278564453125, 0.5722521362304688, 0.5720453491210937, 0.5730723266601563, 0.5723924560546875, 0.5727354736328125, 0.572337158203125, 0.5723197631835938, 0.5723668212890625, 0.572221435546875, 0.5729075317382812, 0.5726556396484375, 0.5724456787109375, 0.5738014526367188, 0.5731779174804688, 0.572559326171875, 0.5726812133789062, 0.5728235473632812, 0.572885986328125, 0.5726618041992187, 0.5722745971679688, 0.5731082153320313, 0.5732781982421875, 0.5720135498046875, 0.5721507568359375, 0.57227880859375, 0.5728726806640625, 0.572346435546875, 0.5726781005859375, 0.5728286743164063, 0.5733345336914063, 0.5729228515625, 0.572737548828125, 0.5724548950195313, 0.5741793212890625, 0.5722265625, 0.572494873046875]",tokens/s,1.7194907034525255,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,2054.77888,5566.365696,0.0,4919.918592,4635.537408,s,10,5.159750671386719,0.5159750671386718,0.0024202673095528563,0.5153813171386719,0.5170659973144531,0.5199656463623047,0.5222853656005859,"[0.5228652954101562, 0.5143955078125, 0.5157650146484375, 0.5144519653320313, 0.5155265502929688, 0.5141107177734375, 0.516309814453125, 0.5146680908203125, 0.516421630859375, 0.515236083984375]",tokens/s,496.14800463061573,kWh,6.087385805116759e-06,3.3354087369571064e-06,2.7887036198498415e-05,3.7309830740572285e-05,tokens/kWh,6861462.379179726,MB,2054.77888,5566.365696,0.0,4919.918592,4794.439168,s,10,301.012869140625,30.101286914062502,0.007474756939868433,30.101162109375,30.1092380859375,30.11128505859375,30.11292263671875,"[30.104474609375, 30.0874609375, 30.10366796875, 30.09865625, 30.095248046875, 30.1085703125, 30.108783203125, 30.11333203125, 30.097923828125, 30.094751953125]",tokens/s,2.0929337732257594,kWh,0.00035535723721815483,0.00019476662748246779,0.0016039330748120972,0.00215405693951272,tokens/kWh,29247.13773548231,,s,629,305.1713383178704,0.4851690593288888,0.06123176770881389,0.4777687072753906,0.47843901367187497,0.47872921142578123,0.9921051538085938,"[0.47712460327148437, 0.4769034118652344, 0.47702426147460936, 0.47709390258789064, 0.47730584716796876, 0.4771205139160156, 0.4773304443359375, 0.47765298461914063, 0.4770846862792969, 0.4785274963378906, 0.47798681640625, 0.47815167236328127, 0.4778588256835937, 0.4773570556640625, 0.477907958984375, 0.47804620361328126, 0.47764480590820313, 0.4777400207519531, 0.4774901733398437, 0.4775546875, 0.4790906982421875, 0.47841998291015625, 0.4775454711914062, 0.4774205322265625, 0.4777062377929687, 0.4773007507324219, 0.47782296752929687, 0.477517822265625, 0.47927194213867186, 0.47922592163085936, 0.4796824951171875, 0.4796200866699219, 0.478603271484375, 0.47771649169921876, 0.4775034790039063, 0.47786904907226563, 0.4779376525878906, 0.4778526611328125, 0.47831039428710936, 0.47768167114257815, 0.4777820129394531, 0.47838104248046875, 0.4779438171386719, 0.4779376525878906, 0.47762432861328125, 0.47779022216796874, 0.47766015625, 0.4779346008300781, 0.47730584716796876, 0.47776666259765627, 0.4777215881347656, 0.4779438171386719, 0.4776980590820312, 0.4786534423828125, 0.47755673217773437, 0.4775034790039063, 0.47755059814453127, 0.47741543579101564, 0.4771686401367187, 0.47783526611328125, 0.4779069519042969, 0.4776560668945313, 0.9921392822265624, 0.4773294067382812, 0.4772085876464844, 0.477370361328125, 0.4770396728515625, 0.4769504699707031, 0.47741748046875, 0.47746356201171875, 0.4771512451171875, 0.47819979858398437, 0.4786329650878906, 0.47734783935546876, 0.477412353515625, 0.4770017395019531, 0.47757003784179686, 0.47735910034179685, 0.47719219970703125, 0.4772116394042969, 0.4775659484863281, 0.47811892700195313, 0.4780257263183594, 0.4779304809570312, 0.4777492370605469, 0.4775045166015625, 0.4776427612304687, 0.4780738525390625, 0.4773498840332031, 0.47781170654296873, 0.4772178039550781, 0.47686654663085937, 0.4779991149902344, 0.47811175537109374, 0.47742669677734373, 0.4774471740722656, 0.47721368408203124, 0.4768757629394531, 0.47745944213867186, 0.4774481811523438, 0.4774799499511719, 0.4782315979003906, 0.4776785583496094, 0.4772024230957031, 0.477707275390625, 0.47777587890625, 0.47741030883789065, 0.47758950805664063, 0.47756494140625, 0.47724337768554687, 0.4777215881347656, 0.47767962646484374, 0.47741543579101564, 0.47842715454101564, 0.4785602111816406, 0.4774819946289062, 0.4774615173339844, 0.4775465087890625, 0.4775475158691406, 0.4778106994628906, 0.4775577697753906, 0.4773294067382812, 0.47762738037109376, 0.47781375122070313, 0.4781895751953125, 0.9939957885742188, 0.47789157104492186, 0.4779029235839844, 0.4775321044921875, 0.4773294067382812, 0.47682968139648435, 0.4777994384765625, 0.478455810546875, 0.4786780090332031, 0.47789056396484375, 0.47756390380859376, 0.4770979919433594, 0.47737344360351563, 0.4772915344238281, 0.4772536315917969, 0.4778649597167969, 0.47748095703125, 0.4771614685058594, 0.47814862060546875, 0.47750143432617187, 0.4773918762207031, 0.47743896484375, 0.4776662902832031, 0.47875582885742185, 0.4773284606933594, 0.4779539794921875, 0.4778567810058594, 0.47754238891601564, 0.47780154418945314, 0.47773382568359374, 0.4781598815917969, 0.4780472412109375, 0.4777594909667969, 0.477765625, 0.47762841796875, 0.47739596557617187, 0.47771444702148436, 0.47763250732421875, 0.4777687072753906, 0.4783943786621094, 0.47779736328125, 0.4777215881347656, 0.47773284912109376, 0.47762841796875, 0.4779315185546875, 0.4779735107421875, 0.4777461853027344, 0.4787445983886719, 0.47787722778320313, 0.4785080261230469, 0.4784373779296875, 0.47873638916015626, 0.47828274536132814, 0.4777001037597656, 0.47793560791015627, 0.4780277709960937, 0.47800115966796874, 0.47787213134765627, 0.47758950805664063, 0.47742156982421874, 0.47888998413085937, 0.478159912109375, 0.47799700927734373, 0.9920173950195312, 0.47721368408203124, 0.4778260498046875, 0.477939697265625, 0.47749530029296877, 0.4772659912109375, 0.4777287292480469, 0.4779949645996094, 0.47825204467773436, 0.4779581298828125, 0.4773775329589844, 0.4774225769042969, 0.47868109130859376, 0.4778076171875, 0.47798785400390625, 0.478139404296875, 0.47792538452148436, 0.4779366455078125, 0.4774615173339844, 0.47746356201171875, 0.477728759765625, 0.47751473999023436, 0.47717376708984377, 0.47695462036132813, 0.4774942626953125, 0.47761920166015626, 0.4774625244140625, 0.4780001220703125, 0.4778526611328125, 0.47788134765625, 0.4775301513671875, 0.47762127685546873, 0.4773498229980469, 0.4779581298828125, 0.4775413818359375, 0.47721881103515623, 0.47748504638671874, 0.4778824462890625, 0.47803488159179686, 0.47796633911132813, 0.47795098876953124, 0.47843328857421874, 0.4778895263671875, 0.47771749877929687, 0.47763766479492187, 0.47745123291015623, 0.47731610107421873, 0.47720550537109374, 0.47810763549804686, 0.47794686889648436, 0.478023681640625, 0.47817214965820315, 0.4775679931640625, 0.4772167663574219, 0.4781783142089844, 0.4780738525390625, 0.4782264404296875, 0.4780769348144531, 0.4779223022460938, 0.4776662902832031, 0.47806668090820315, 0.4780738525390625, 0.47803903198242187, 0.9936957397460937, 0.47736831665039064, 0.4772413330078125, 0.47760589599609377, 0.47733966064453126, 0.4780892028808594, 0.47794790649414065, 0.4774604797363281, 0.47752294921875, 0.4775792541503906, 0.47779022216796874, 0.4776407165527344, 0.4775096435546875, 0.4774523010253906, 0.4773304443359375, 0.4777021484375, 0.477939697265625, 0.4781055908203125, 0.4771205749511719, 0.477898681640625, 0.47731201171875, 0.47782196044921876, 0.4773775329589844, 0.4771747741699219, 0.4777697143554688, 0.47787213134765627, 0.4778188781738281, 0.4779683837890625, 0.47738983154296877, 0.4772915344238281, 0.4775802917480469, 0.47765914916992186, 0.47731201171875, 0.47755877685546877, 0.4773355407714844, 0.4775382995605469, 0.477918212890625, 0.47747378540039065, 0.4770508728027344, 0.4778793029785156, 0.477528076171875, 0.47719937133789064, 0.4775516052246094, 0.4772341613769531, 0.4773990478515625, 0.47757415771484374, 0.47758746337890623, 0.47739495849609376, 0.47834423828125, 0.4778536682128906, 0.4773058776855469, 0.47772262573242186, 0.4777635498046875, 0.4777185363769531, 0.4785080261230469, 0.4780687255859375, 0.4775168151855469, 0.4807618408203125, 0.47829605102539063, 0.47767245483398435, 0.4783206481933594, 0.4779980773925781, 0.4780625915527344, 0.9916989135742188, 0.47712973022460936, 0.4770652160644531, 0.4772781982421875, 0.47782296752929687, 0.47800833129882814, 0.47793356323242187, 0.47763150024414064, 0.47709390258789064, 0.47777484130859377, 0.4777667236328125, 0.47767550659179686, 0.47757720947265625, 0.4771798400878906, 0.47727410888671873, 0.47779531860351565, 0.4776089477539063, 0.4775792541503906, 0.47766937255859376, 0.47783627319335936, 0.47713690185546875, 0.47779226684570314, 0.4778567810058594, 0.4774615173339844, 0.4775833740234375, 0.47805950927734375, 0.47846502685546877, 0.478561279296875, 0.4782633056640625, 0.4781055908203125, 0.47865753173828124, 0.47780044555664064, 0.47805950927734375, 0.4787906494140625, 0.47858688354492185, 0.47910400390625, 0.47804107666015627, 0.4773498840332031, 0.47763455200195315, 0.4782264404296875, 0.47779736328125, 0.47735809326171874, 0.4779683837890625, 0.4779571228027344, 0.47788134765625, 0.47783627319335936, 0.4788439025878906, 0.47873126220703127, 0.47824075317382814, 0.47784039306640624, 0.47777279663085936, 0.47869952392578125, 0.4775126953125, 0.47748504638671874, 0.4778311767578125, 0.47821005249023435, 0.478708740234375, 0.4786903076171875, 0.4786278381347656, 0.4780533752441406, 0.47773080444335936, 0.47787213134765627, 0.47809127807617186, 0.9952440185546875, 0.4780052490234375, 0.47796734619140624, 0.47861553955078123, 0.47840972900390627, 0.478055419921875, 0.4771358642578125, 0.47734375, 0.4771768188476562, 0.47729766845703125, 0.477949951171875, 0.4772239379882813, 0.4777277526855469, 0.47779531860351565, 0.47714407348632815, 0.47731814575195314, 0.47762841796875, 0.47755877685546877, 0.477528076171875, 0.4772034606933594, 0.47703143310546875, 0.47779531860351565, 0.4779202575683594, 0.4790067138671875, 0.478593017578125, 0.4792412109375, 0.47822540283203124, 0.4779069519042969, 0.4779427795410156, 0.47770932006835937, 0.47803802490234376, 0.4777840576171875, 0.47797247314453123, 0.477939697265625, 0.47781991577148436, 0.4778240051269531, 0.47803289794921877, 0.47818240356445313, 0.47798175048828123, 0.4779959716796875, 0.4778680419921875, 0.4780902404785156, 0.47819674682617186, 0.47809945678710936, 0.4779765625, 0.479388671875, 0.4785356750488281, 0.4774143981933594, 0.4776642456054688, 0.47775238037109374, 0.4779253234863281, 0.4778547058105469, 0.4781363220214844, 0.47750860595703126, 0.4780830688476562, 0.47821728515625, 0.4779703674316406, 0.47782196044921876, 0.4780851135253906, 0.4781322326660156, 0.47786392211914064, 0.4776509399414062, 0.4776099853515625, 0.9929461669921875, 0.47778509521484375, 0.48122674560546874, 0.47778509521484375, 0.4777943115234375, 0.47731814575195314, 0.4780748901367188, 0.47792538452148436, 0.47784756469726564, 0.4781158447265625, 0.4780155029296875, 0.47748403930664063, 0.4777891845703125, 0.4779100036621094, 0.4776908874511719, 0.47778713989257815, 0.47860736083984373, 0.47720037841796875, 0.4777891845703125, 0.4777216491699219, 0.4775577087402344, 0.47795404052734375, 0.47750143432617187, 0.47806668090820315, 0.4786780090332031, 0.477744140625, 0.47708673095703125, 0.47796429443359373, 0.4774686584472656, 0.4776775817871094, 0.4779796447753906, 0.47754855346679687, 0.4775096435546875, 0.4782417907714844, 0.47757107543945315, 0.4774205322265625, 0.4781158447265625, 0.4779376525878906, 0.47806362915039063, 0.47810763549804686, 0.4782090148925781, 0.478413818359375, 0.47833804321289064, 0.4782417907714844, 0.4790927429199219, 0.4781537170410156, 0.47856845092773437, 0.4775301208496094, 0.4781506652832031, 0.4774615173339844, 0.4780472412109375, 0.4780318603515625, 0.47794891357421876, 0.47838311767578123, 0.47825408935546876, 0.4780349426269531, 0.47770111083984373, 0.4779857788085938, 0.47789266967773436, 0.47788128662109375, 0.4787712097167969, 0.47795306396484377, 0.47804415893554686, 0.9943838500976563, 0.4787189636230469, 0.47804107666015627, 0.47781375122070313, 0.477370361328125, 0.4773304443359375, 0.47816705322265624, 0.47811993408203124, 0.4777738342285156, 0.47763250732421875, 0.4774573974609375, 0.47708673095703125, 0.47772467041015626, 0.47725567626953125, 0.47783627319335936, 0.47736831665039064, 0.4771112976074219, 0.478445556640625, 0.47864935302734374, 0.4780984191894531, 0.47761102294921876, 0.4776365966796875, 0.4789073791503906, 0.4771778564453125, 0.47777484130859377, 0.47784756469726564, 0.4784169006347656, 0.4775045166015625, 0.47752191162109375, 0.47713177490234376, 0.47731610107421873, 0.47756390380859376, 0.4773212280273437, 0.47744512939453126, 0.4777543640136719, 0.477222900390625, 0.477707275390625, 0.47724850463867186, 0.47723724365234377, 0.4778823547363281, 0.477412353515625, 0.4772392883300781, 0.4777994384765625, 0.47849676513671874, 0.4779949951171875, 0.47791104125976563, 0.47725466918945314, 0.47723629760742187, 0.47771026611328127, 0.478139404296875, 0.4780195922851562, 0.478129150390625, 0.47857867431640627, 0.47747174072265625, 0.47793869018554686, 0.47770932006835937, 0.4779571228027344, 0.47765509033203124, 0.4775464172363281, 0.47778713989257815, 0.477918212890625, 0.4777820129394531, 0.47804107666015627, 0.9942221069335937, 0.47767550659179686, 0.4771686401367187, 0.477431884765625, 0.4771603698730469, 0.47666278076171875, 0.4774676513671875, 0.47758746337890623, 0.47736114501953125, 0.4776488952636719, 0.47701708984375, 0.47684402465820314, 0.47745944213867186, 0.477633544921875, 0.4778076171875, 0.4772536315917969, 0.47761715698242185, 0.47721881103515623, 0.4775362548828125, 0.47752294921875, 0.4779653015136719, 0.47805645751953124, 0.47783526611328125, 0.47724032592773435, 0.4773447570800781, 0.4780175476074219, 0.4793231506347656, 0.4781588439941406, 0.47761715698242185, 0.47719937133789064, 0.4781537170410156, 0.47752294921875, 0.4775311279296875, 0.47761920166015626, 0.4774993896484375, 0.4768399353027344, 0.4775946960449219, 0.47729248046875, 0.47762841796875, 0.4775372924804687, 0.4772720642089844, 0.4776089477539063, 0.4787261352539062, 0.47772262573242186, 0.47747378540039065, 0.47768780517578124, 0.477528076171875, 0.477149169921875, 0.4776355895996094, 0.47729766845703125, 0.4775628662109375, 0.47870361328125, 0.4778188781738281, 0.4776509399414062, 0.4779898986816406, 0.4782561340332031, 0.47833087158203125, 0.47798681640625, 0.4781035461425781, 0.477955078125, 0.47816192626953125, 0.4783472595214844, 0.47877734375]",tokens/s,2.061137207272146,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1440.489472,1709.703168,0.0,1063.256064,942.605312,s,10,0.8710727081298828,0.08710727081298827,0.0020526611434407644,0.0869946403503418,0.08830768203735351,0.09043918418884277,0.09214438591003418,"[0.09257068634033203, 0.08689478302001953, 0.08709449768066406, 0.08734588623046875, 0.0876402587890625, 0.08528950500488282, 0.08547952270507812, 0.08543309020996094, 0.08549046325683594, 0.08783401489257812]",tokens/s,2938.9050719957663,kWh,1.0307077509193605e-06,5.647778186555472e-07,2.6262726324173835e-06,4.221758201992291e-06,tokens/kWh,60638243.06166823,MB,1440.817152,1709.703168,0.0,1063.256064,942.607872,s,10,54.10364501953125,5.410364501953125,0.04442393039305028,5.402393798828125,5.47523671875,5.4771017578125,5.478593789062501,"[5.402685546875, 5.4450302734375, 5.474822265625, 5.478966796875, 5.39001171875, 5.366296875, 5.3479423828125, 5.3592861328125, 5.4365009765625, 5.40210205078125]",tokens/s,11.64431712082563,kWh,6.414297001805283e-05,3.5154539095709204e-05,0.00015373626912418487,0.00025303377823794696,tokens/kWh,248978.61636779693,,s,629,54.788874206542985,0.08710472846827182,0.01042761753839409,0.08542822265625,0.08754749298095703,0.08829050750732421,0.17079922241210937,"[0.08673382568359375, 0.08457215881347656, 0.08464179229736328, 0.08488550567626953, 0.08813362884521485, 0.08856473541259766, 0.08671949005126953, 0.08648601531982422, 0.08687923431396484, 0.08737894439697266, 0.08506470489501954, 0.0844554214477539, 0.0844400634765625, 0.08486911773681641, 0.08474524688720703, 0.08481378936767578, 0.08675328063964843, 0.08531455993652344, 0.08472268676757813, 0.08468172454833985, 0.08468172454833985, 0.0845322265625, 0.08479436492919921, 0.08471552276611329, 0.08475545501708984, 0.08474521636962891, 0.08502886199951172, 0.0846899871826172, 0.0848424301147461, 0.084569091796875, 0.08475955200195312, 0.08495104217529297, 0.08461721801757813, 0.08463571166992187, 0.08458643341064453, 0.08476876831054687, 0.08473804473876953, 0.08473190307617187, 0.0844554214477539, 0.08442777252197266, 0.08484761810302735, 0.08466124725341796, 0.08562483215332031, 0.08599142456054687, 0.08450764465332031, 0.0840970230102539, 0.08538521575927735, 0.08838041687011719, 0.0868485107421875, 0.08694374084472656, 0.08736255645751953, 0.08686080169677735, 0.08700518035888671, 0.08699289703369141, 0.08753254699707032, 0.0868485107421875, 0.08696627044677735, 0.08728985595703125, 0.08770150756835937, 0.08642969512939454, 0.08510566711425781, 0.08681779479980468, 0.1740953674316406, 0.08652799987792968, 0.08564736175537109, 0.0842209243774414, 0.08706150054931641, 0.08500326538085938, 0.08664473724365235, 0.08596275329589843, 0.0853391342163086, 0.08664268493652344, 0.08708096313476563, 0.08690073394775391, 0.08696934509277343, 0.08694783782958984, 0.08664575958251954, 0.08597196960449219, 0.08684748840332031, 0.08683110046386719, 0.08509439849853516, 0.08638566589355469, 0.08632217407226563, 0.08716287994384765, 0.087119873046875, 0.08713215637207031, 0.08506470489501954, 0.08651168060302734, 0.08632109069824219, 0.08708403015136719, 0.08612662506103516, 0.08650748443603516, 0.08707481384277344, 0.08641535949707031, 0.08600678253173828, 0.08589107513427735, 0.08514252471923828, 0.08699494171142579, 0.08681676483154296, 0.08650956726074219, 0.0871383056640625, 0.08712806701660156, 0.08630374145507813, 0.08593612670898437, 0.08639590454101563, 0.08506681823730469, 0.08687712097167968, 0.08704512023925781, 0.08696115112304688, 0.08838451385498047, 0.08482816314697265, 0.08464895629882813, 0.08431206512451171, 0.08453427124023437, 0.08795238494873046, 0.08794931030273437, 0.08684134674072265, 0.08686592102050782, 0.08699801635742188, 0.08754073333740234, 0.087947265625, 0.08687513732910156, 0.08657100677490234, 0.08599350738525391, 0.08582653045654297, 0.17401548767089844, 0.0865423355102539, 0.08694374084472656, 0.08704204559326172, 0.08744652557373046, 0.08659967803955078, 0.08703897857666015, 0.08700109100341796, 0.08715776062011718, 0.08681574249267578, 0.08705433654785157, 0.08619929504394531, 0.08701548767089844, 0.086857666015625, 0.08725811004638671, 0.08656588745117187, 0.08733184051513672, 0.0871731185913086, 0.08719257354736328, 0.08709120178222657, 0.0869713897705078, 0.0868485107421875, 0.08620236968994141, 0.08700415802001953, 0.08701337432861328, 0.0868331527709961, 0.08713420867919922, 0.08696627044677735, 0.08672972869873047, 0.08645836639404297, 0.08661094665527344, 0.08728678131103515, 0.08693452453613282, 0.08721206665039062, 0.08688022613525391, 0.08789299011230468, 0.08672870635986328, 0.08694477081298828, 0.087731201171875, 0.08747007751464844, 0.08717005157470703, 0.08711270141601563, 0.08729190063476562, 0.08698271942138672, 0.08669484710693359, 0.08654745483398438, 0.08669286346435547, 0.08840806579589844, 0.08711475372314453, 0.08676044464111328, 0.08631705474853515, 0.08654956817626953, 0.08599648284912109, 0.08647679901123047, 0.08694989013671875, 0.08696524810791016, 0.08698060607910156, 0.08548556518554687, 0.08556953430175782, 0.0868823013305664, 0.08658534240722657, 0.08709939575195312, 0.08710451507568359, 0.17466163635253906, 0.08715673828125, 0.08728371429443359, 0.08680345916748047, 0.08702361297607422, 0.0872099838256836, 0.08695299530029296, 0.0865228500366211, 0.08702054595947266, 0.08711270141601563, 0.08681267547607421, 0.0862402572631836, 0.08722022247314454, 0.08714342498779297, 0.08641433715820312, 0.08614297485351563, 0.08621363067626953, 0.08743526458740235, 0.0874260482788086, 0.08645123291015624, 0.08728675079345703, 0.08692940521240235, 0.08707891082763672, 0.08719055938720703, 0.08709014129638672, 0.08714854431152344, 0.08714035034179687, 0.08686592102050782, 0.0871751708984375, 0.08751411437988281, 0.08707379150390625, 0.08701952362060547, 0.08471449279785156, 0.08613683319091797, 0.08813772583007813, 0.08714240264892578, 0.08702566528320313, 0.0872959976196289, 0.08773222351074218, 0.08883302307128907, 0.08757759857177734, 0.08561151885986328, 0.08676659393310547, 0.08834764862060547, 0.08476467132568359, 0.08778854370117188, 0.08729497528076172, 0.08688441467285156, 0.08712287902832032, 0.0869744644165039, 0.0868136978149414, 0.08646246337890626, 0.08621875, 0.08678707122802734, 0.08692428588867188, 0.08732262420654296, 0.08703692626953125, 0.08800665283203125, 0.08684236907958984, 0.08649215698242188, 0.0869048309326172, 0.08684236907958984, 0.08680857849121094, 0.1740042266845703, 0.08717619323730469, 0.0870635528564453, 0.0868219223022461, 0.08698159790039063, 0.08682291412353516, 0.08556953430175782, 0.08729804992675781, 0.08690278625488282, 0.08721305847167969, 0.08680857849121094, 0.08707686614990234, 0.08708812713623047, 0.0862003173828125, 0.08698265838623047, 0.08703385925292968, 0.08684031677246094, 0.08691814422607422, 0.08724281311035156, 0.08704608154296875, 0.08688742065429687, 0.08603648376464844, 0.08850739288330078, 0.08509849548339844, 0.08658636474609376, 0.08696934509277343, 0.08461007690429688, 0.08453526306152344, 0.08452095794677734, 0.08469811248779296, 0.084959228515625, 0.08464383697509766, 0.08490086364746094, 0.08458649444580078, 0.08620851135253907, 0.08480665588378906, 0.08471449279785156, 0.08450867462158203, 0.08490086364746094, 0.0845486068725586, 0.08454962921142578, 0.08446873474121094, 0.0846909408569336, 0.0845506591796875, 0.08456806182861328, 0.08460185241699218, 0.08472268676757813, 0.08459366607666016, 0.08461414337158203, 0.0848005142211914, 0.08458854675292969, 0.08446669006347657, 0.08458854675292969, 0.08470323181152344, 0.08453427124023437, 0.08449433898925782, 0.08460192108154296, 0.08460384368896484, 0.08456192016601563, 0.08444825744628906, 0.08477184295654297, 0.0844421157836914, 0.08439193725585938, 0.17255935668945313, 0.08706764984130859, 0.0871720962524414, 0.0869908447265625, 0.08572518157958985, 0.08475033569335938, 0.0848353271484375, 0.08454962921142578, 0.08473804473876953, 0.08482406616210937, 0.08465305328369141, 0.08468479919433594, 0.08473395538330078, 0.08460902404785156, 0.08453529357910156, 0.08463053131103515, 0.0845455322265625, 0.08432128143310547, 0.08473190307617187, 0.08471449279785156, 0.084578369140625, 0.08457823944091797, 0.08466944122314453, 0.08652288055419922, 0.08498278045654296, 0.08480255889892578, 0.08473702239990234, 0.08468787384033204, 0.08633650970458985, 0.0850851821899414, 0.08458751678466797, 0.08468991851806641, 0.08474726104736328, 0.0846909408569336, 0.0885032958984375, 0.08480870056152344, 0.0858071060180664, 0.08694374084472656, 0.08694989013671875, 0.08657920074462891, 0.08678604888916015, 0.08691814422607422, 0.084853759765625, 0.08535858917236328, 0.08599654388427734, 0.08458137512207031, 0.08471347045898438, 0.084495361328125, 0.08461516571044922, 0.0845322265625, 0.08443698883056641, 0.0841891860961914, 0.08461619567871094, 0.0845137939453125, 0.0844031982421875, 0.08491827392578125, 0.08468889617919922, 0.08446873474121094, 0.08431718444824218, 0.0844595489501953, 0.08526640319824219, 0.08521421051025391, 0.0846397476196289, 0.1709055938720703, 0.08506777954101563, 0.08515379333496094, 0.08487731170654297, 0.08472576141357421, 0.08464383697509766, 0.08466124725341796, 0.08488448333740234, 0.08489778900146484, 0.08490598297119141, 0.08465203094482422, 0.0848189468383789, 0.08532991790771484, 0.08477286529541016, 0.08459468841552735, 0.08518246459960938, 0.08490803527832032, 0.0846561279296875, 0.08462028503417969, 0.08444313812255859, 0.0862208023071289, 0.08462643432617188, 0.08517222595214843, 0.08487423706054688, 0.08629145812988281, 0.08506470489501954, 0.0851937255859375, 0.08485785675048828, 0.08452505493164063, 0.0856289291381836, 0.08493059539794921, 0.08512406158447265, 0.08508620452880859, 0.08450457763671874, 0.084515869140625, 0.08459465789794922, 0.08446975708007813, 0.08475852966308593, 0.08493772888183594, 0.08463161468505859, 0.08454649353027344, 0.0845322265625, 0.08463667297363281, 0.08475753784179688, 0.08451376342773438, 0.08450662231445312, 0.08457318115234375, 0.08449842834472657, 0.08456294250488282, 0.08457523345947265, 0.08489676666259766, 0.08472576141357421, 0.087193603515625, 0.08506368255615235, 0.08472064208984376, 0.08463565063476562, 0.08460390472412109, 0.08499712371826172, 0.08570368194580077, 0.08466124725341796, 0.08457625579833984, 0.08450355529785156, 0.08474214172363281, 0.17052569580078125, 0.0847298583984375, 0.08458444976806641, 0.08468275451660157, 0.08454041290283203, 0.08447081756591797, 0.08448406219482422, 0.08457523345947265, 0.0845301742553711, 0.0844247055053711, 0.08477286529541016, 0.08456502532958984, 0.08428745269775391, 0.08627097320556641, 0.08500633239746094, 0.08606105804443359, 0.08684646606445312, 0.08513126373291016, 0.08685874938964844, 0.08501042938232421, 0.08475545501708984, 0.0846346206665039, 0.08462438201904297, 0.08665497589111328, 0.08509542083740235, 0.08484454345703125, 0.08662322998046874, 0.08476467132568359, 0.08460492706298828, 0.0845998077392578, 0.08459878540039062, 0.08473702239990234, 0.08481484985351563, 0.08530022430419922, 0.08468275451660157, 0.08468685150146485, 0.08445645141601563, 0.08475548553466797, 0.08516194915771484, 0.08527359771728515, 0.08474931335449219, 0.08479542541503907, 0.08579888153076172, 0.08668876647949218, 0.08599346923828124, 0.08607129669189453, 0.08541696166992188, 0.08659865570068359, 0.08641024017333984, 0.08462847900390626, 0.08492339324951172, 0.0848875503540039, 0.084674560546875, 0.08466534423828125, 0.08485273742675781, 0.08462643432617188, 0.08465408325195313, 0.08468479919433594, 0.08457011413574218, 0.0847984619140625, 0.08467558288574219, 0.08471142578125, 0.08483334350585937, 0.1702665557861328, 0.08738406372070312, 0.08923648071289063, 0.08826573181152343, 0.08826470184326173, 0.08866099548339844, 0.08817049407958985, 0.08779468536376953, 0.08849612426757812, 0.08879206085205078, 0.08808345794677734, 0.08809164428710937, 0.0881233901977539, 0.08886784362792968, 0.08884019470214843, 0.08862515258789062, 0.08821043395996093, 0.08844905853271484, 0.08820121765136718, 0.08817353820800782, 0.08852275085449218, 0.08854227447509766, 0.08476563262939453, 0.0872243194580078, 0.08712806701660156, 0.08852275085449218, 0.08827391815185547, 0.08493363189697266, 0.08474214172363281, 0.08446873474121094, 0.08490598297119141, 0.08460697937011719, 0.08487014770507813, 0.08471858978271485, 0.08479743957519531, 0.0852490234375, 0.08817971038818359, 0.0883599395751953, 0.08766566467285156, 0.0848189468383789, 0.08454348754882812, 0.08476876831054687, 0.08462950134277344, 0.08455782318115235, 0.0845998077392578, 0.08457523345947265, 0.08428851318359375, 0.08459468841552735, 0.08465920257568359, 0.08506470489501954, 0.08472268676757813, 0.08413491058349609, 0.08476467132568359, 0.08465715026855469, 0.08452915191650391, 0.08448102569580078, 0.0847267837524414, 0.08448614501953125, 0.08460594940185547, 0.08444416046142578, 0.08624127960205077, 0.08508006286621093, 0.08459878540039062, 0.1760184326171875, 0.08723353576660156, 0.08769331359863282, 0.08806092834472656, 0.08757453155517578, 0.08732978820800781, 0.08919449615478516, 0.08813977813720703, 0.08797286224365235, 0.08746189117431641, 0.08733798217773438, 0.08667750549316407, 0.08574054718017578, 0.08622182464599609, 0.08452095794677734, 0.08488345336914062, 0.08462950134277344, 0.08516403198242188, 0.08760736083984375, 0.08629344177246094, 0.08519782257080079, 0.08494694519042968, 0.08468275451660157, 0.08495308685302734, 0.08589212799072266, 0.08511488342285156, 0.08629449462890625, 0.08508927917480469, 0.08481075286865235, 0.08436326599121094, 0.08498995208740234, 0.08503705596923829, 0.08542822265625, 0.08469503784179687, 0.08443392181396485, 0.08501554870605468, 0.0846376953125, 0.08496742248535157, 0.08612351989746093, 0.08490290832519531, 0.08461516571044922, 0.08477081298828125, 0.0848005142211914, 0.08621977233886718, 0.08505651092529297, 0.08459571075439454, 0.08459878540039062, 0.08486918640136719, 0.08474515533447266, 0.08450969696044922, 0.08503501129150391, 0.0847267837524414, 0.08468889617919922, 0.08508927917480469, 0.0886118392944336, 0.08830156707763671, 0.08738508605957031, 0.08487321472167969, 0.0855920639038086, 0.08490496063232422, 0.08478208160400391, 0.08481689453125, 0.08506368255615235]",tokens/s,11.480433009606973,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1312.858112,1030.22592,0.0,383.778816,312.280064,s,10,0.275944766998291,0.0275944766998291,0.0013350624992567249,0.0271289119720459,0.027794796180725095,0.02969363794326782,0.031212711353302003,"[0.03159247970581055, 0.027166879653930665, 0.027137279510498047, 0.02737283134460449, 0.02710416030883789, 0.02712054443359375, 0.027108831405639647, 0.027081024169921874, 0.027172767639160156, 0.027087968826293947]",tokens/s,9277.218871905097,kWh,3.2455721808658853e-07,1.778424162839056e-07,7.707522954780324e-07,1.2731519298485266e-06,tokens/kWh,201075766.3702066,MB,1312.858112,1030.22592,0.0,383.778816,347.089408,s,10,16.864729003906252,1.6864729003906251,0.020323100201592524,1.6794135742187501,1.6910009521484375,1.7188808471679686,1.7411847631835937,"[1.7467607421875, 1.67984912109375, 1.6774495849609374, 1.678377685546875, 1.67897802734375, 1.6768824462890626, 1.6750291748046875, 1.684479248046875, 1.684805419921875, 1.6821175537109374]",tokens/s,37.356070165970515,kWh,1.9882176955741114e-05,1.0895638286132514e-05,4.512764553432279e-05,7.590546077619641e-05,tokens/kWh,829979.8111462949,,s,629,17.07843275070189,0.027151721384263763,0.003315017386101685,0.026595327377319337,0.0273623046875,0.02783129653930664,0.05417021270751953,"[0.029247488021850586, 0.027447296142578126, 0.028238847732543947, 0.029016063690185546, 0.02889727973937988, 0.028690431594848635, 0.028497919082641602, 0.029652992248535157, 0.028964864730834962, 0.02876518440246582, 0.029016063690185546, 0.02860851287841797, 0.02900480079650879, 0.029263872146606446, 0.02835353660583496, 0.028241920471191406, 0.02977484893798828, 0.031457279205322264, 0.028201984405517577, 0.02774220848083496, 0.027778047561645508, 0.02750464057922363, 0.027625471115112304, 0.027877376556396483, 0.027576320648193358, 0.027623424530029295, 0.027853824615478515, 0.027623424530029295, 0.02755583953857422, 0.027467775344848632, 0.027467775344848632, 0.027464704513549806, 0.02751692771911621, 0.027671552658081053, 0.028444671630859376, 0.02751081657409668, 0.027527168273925783, 0.02693833541870117, 0.02656768035888672, 0.02656051254272461, 0.026586111068725587, 0.026575904846191406, 0.02656355285644531, 0.026597375869750976, 0.02668339157104492, 0.026562559127807618, 0.0265799674987793, 0.02705510330200195, 0.02689945602416992, 0.02673766326904297, 0.026646528244018555, 0.026635263442993166, 0.02674892807006836, 0.026631168365478516, 0.026613759994506835, 0.026697727203369142, 0.026650623321533205, 0.02710323143005371, 0.026925119400024414, 0.02659014320373535, 0.027423744201660157, 0.027077632904052733, 0.05460070419311523, 0.026619903564453123, 0.026565631866455077, 0.026629119873046874, 0.026554399490356446, 0.02660246467590332, 0.026628095626831053, 0.02656768035888672, 0.026635263442993166, 0.0266527042388916, 0.026569696426391603, 0.026598463058471678, 0.026578880310058593, 0.0265799674987793, 0.02658406448364258, 0.026687488555908204, 0.026688512802124024, 0.02656358337402344, 0.02654003143310547, 0.026614784240722656, 0.026625024795532228, 0.02652672004699707, 0.026619903564453123, 0.028004352569580077, 0.02753945541381836, 0.02736332893371582, 0.027281408309936524, 0.02712678337097168, 0.026574848175048828, 0.026603519439697267, 0.02654310417175293, 0.026565631866455077, 0.0265031681060791, 0.026557439804077147, 0.02653388786315918, 0.026556415557861326, 0.02654412841796875, 0.02655948829650879, 0.02668441581726074, 0.026625024795532228, 0.02655948829650879, 0.026588159561157225, 0.02654310417175293, 0.026572799682617186, 0.02655129623413086, 0.02656768035888672, 0.026604543685913085, 0.02655955123901367, 0.026596288681030273, 0.026562559127807618, 0.026596351623535155, 0.026594303131103517, 0.026558464050292968, 0.02655232048034668, 0.02659328079223633, 0.02651238441467285, 0.026549247741699217, 0.026588159561157225, 0.02657587242126465, 0.02659328079223633, 0.026556415557861326, 0.02650726318359375, 0.02656870460510254, 0.0541736946105957, 0.026597375869750976, 0.026587135314941408, 0.02655232048034668, 0.026550271987915038, 0.026600448608398438, 0.026652671813964843, 0.026689535140991212, 0.02655129623413086, 0.02653081512451172, 0.02658406448364258, 0.026677248001098632, 0.02651548767089844, 0.02671510314941406, 0.027616256713867186, 0.027520000457763674, 0.026843135833740234, 0.026626047134399415, 0.026536991119384765, 0.02662294387817383, 0.026621952056884765, 0.02655129623413086, 0.026534912109375, 0.026634239196777345, 0.026594303131103517, 0.02670182418823242, 0.0265799674987793, 0.02651136016845703, 0.02654003143310547, 0.02653388786315918, 0.02654412841796875, 0.026617855072021485, 0.026968063354492186, 0.02676736068725586, 0.026615808486938477, 0.02655539131164551, 0.02651545524597168, 0.026661888122558593, 0.026573823928833007, 0.026454015731811522, 0.02652470397949219, 0.026513376235961915, 0.0265031681060791, 0.02651852798461914, 0.026694656372070313, 0.026492927551269533, 0.026570751190185548, 0.026505216598510743, 0.026565631866455077, 0.026583040237426758, 0.026574848175048828, 0.02650931167602539, 0.02651033592224121, 0.026664960861206056, 0.026570751190185548, 0.02651238441467285, 0.026582015991210937, 0.02654515266418457, 0.02649087905883789, 0.02654719924926758, 0.026638336181640625, 0.02657689666748047, 0.026598400115966796, 0.05530112075805664, 0.027283456802368163, 0.027225088119506836, 0.02709503936767578, 0.026944511413574217, 0.02655436706542969, 0.02654310417175293, 0.026695680618286134, 0.026656768798828126, 0.0265850887298584, 0.026588191986083986, 0.02655023956298828, 0.026674175262451173, 0.026564607620239256, 0.026580991744995116, 0.026611711502075194, 0.026570751190185548, 0.0265533447265625, 0.026611711502075194, 0.026570751190185548, 0.026529792785644532, 0.02669977569580078, 0.026549247741699217, 0.02660767936706543, 0.02659424018859863, 0.02671001625061035, 0.02650111961364746, 0.02655539131164551, 0.026587135314941408, 0.02655232048034668, 0.026657791137695314, 0.0265533447265625, 0.026541055679321288, 0.026558464050292968, 0.02651238441467285, 0.026514432907104493, 0.026467327117919923, 0.026565631866455077, 0.02692403221130371, 0.026690559387207033, 0.02655436706542969, 0.026600448608398438, 0.026719232559204102, 0.02657689666748047, 0.026662912368774414, 0.02651136016845703, 0.026564607620239256, 0.026537984848022462, 0.026505216598510743, 0.02651238441467285, 0.02656358337402344, 0.02652262306213379, 0.026663936614990235, 0.026497024536132813, 0.026646528244018555, 0.02652876853942871, 0.026637311935424804, 0.02657587242126465, 0.02652672004699707, 0.02657177543640137, 0.026604543685913085, 0.026583040237426758, 0.026484735488891603, 0.054171646118164066, 0.02660147285461426, 0.026550271987915038, 0.02656972885131836, 0.02654515266418457, 0.02658406448364258, 0.026532863616943358, 0.02650726318359375, 0.02657792091369629, 0.02667622375488281, 0.0265799674987793, 0.02656153678894043, 0.026580991744995116, 0.026556480407714845, 0.02659833526611328, 0.026598400115966796, 0.026611711502075194, 0.026615808486938477, 0.026514432907104493, 0.02654207992553711, 0.02656768035888672, 0.02656870460510254, 0.026633216857910157, 0.026589183807373046, 0.02657177543640137, 0.02655436706542969, 0.026660863876342773, 0.026614784240722656, 0.026558464050292968, 0.02655436706542969, 0.026617855072021485, 0.026604543685913085, 0.026554399490356446, 0.026611679077148436, 0.026650623321533205, 0.026488832473754883, 0.02629631996154785, 0.02631372833251953, 0.0275230712890625, 0.027797504425048827, 0.02753023910522461, 0.02730803108215332, 0.02698854446411133, 0.026604543685913085, 0.02653081512451172, 0.026624000549316407, 0.02660147285461426, 0.026764287948608398, 0.026658815383911134, 0.02656870460510254, 0.026557439804077147, 0.02654617691040039, 0.026639360427856446, 0.026580991744995116, 0.026534912109375, 0.02653593635559082, 0.026594303131103517, 0.026620927810668944, 0.026596351623535155, 0.02657587242126465, 0.026604543685913085, 0.026604543685913085, 0.02653388786315918, 0.05420851135253906, 0.0265677433013916, 0.02660960006713867, 0.02667519950866699, 0.026580991744995116, 0.026594303131103517, 0.026562559127807618, 0.026665983200073243, 0.026590208053588867, 0.02653900718688965, 0.026637311935424804, 0.026638336181640625, 0.02654617691040039, 0.026590208053588867, 0.026562559127807618, 0.026696704864501954, 0.026629119873046874, 0.02652774429321289, 0.02652057647705078, 0.02653081512451172, 0.0265533447265625, 0.02654207992553711, 0.026694656372070313, 0.02658406448364258, 0.026640384674072266, 0.02657792091369629, 0.026534912109375, 0.02651545524597168, 0.026521600723266602, 0.02656153678894043, 0.02651852798461914, 0.026580991744995116, 0.02653388786315918, 0.026529792785644532, 0.0265799674987793, 0.026592256546020508, 0.026693632125854492, 0.02651545524597168, 0.026639360427856446, 0.027520000457763674, 0.02674380874633789, 0.026613759994506835, 0.026694656372070313, 0.026521600723266602, 0.026641408920288087, 0.026607616424560547, 0.026558464050292968, 0.026610687255859376, 0.026630144119262695, 0.02669260787963867, 0.026634271621704102, 0.02657072067260742, 0.02654003143310547, 0.02657587242126465, 0.026605567932128905, 0.026534912109375, 0.026697727203369142, 0.026587135314941408, 0.02652060890197754, 0.026654687881469727, 0.026598400115966796, 0.026714111328125, 0.02658406448364258, 0.054166526794433595, 0.02671615982055664, 0.026641408920288087, 0.026625024795532228, 0.0265994873046875, 0.026596288681030273, 0.02654719924926758, 0.026572799682617186, 0.026620927810668944, 0.026605567932128905, 0.02669977569580078, 0.02654316711425781, 0.026589120864868164, 0.026582015991210937, 0.026604543685913085, 0.026550271987915038, 0.026619903564453123, 0.02656051254272461, 0.026521600723266602, 0.026631168365478516, 0.02628505516052246, 0.02677350425720215, 0.026702848434448243, 0.02657177543640137, 0.02660966491699219, 0.02656358337402344, 0.02651545524597168, 0.0265533447265625, 0.026504192352294922, 0.02660147285461426, 0.02653900718688965, 0.026639360427856446, 0.0265850887298584, 0.026626047134399415, 0.02654003143310547, 0.026556447982788087, 0.026569696426391603, 0.026558464050292968, 0.02659328079223633, 0.026556415557861326, 0.02654617691040039, 0.02668339157104492, 0.026589183807373046, 0.026498048782348634, 0.026669055938720702, 0.026529792785644532, 0.026572799682617186, 0.02655129623413086, 0.0265164794921875, 0.026590208053588867, 0.02657792091369629, 0.026616832733154298, 0.026598400115966796, 0.026660863876342773, 0.026573823928833007, 0.026595327377319337, 0.026570751190185548, 0.0265164794921875, 0.02639366340637207, 0.026553279876708986, 0.02658406448364258, 0.026657791137695314, 0.02649497604370117, 0.05412044906616211, 0.026598400115966796, 0.026583040237426758, 0.026651647567749022, 0.02652262306213379, 0.026586111068725587, 0.02655129623413086, 0.026594303131103517, 0.02712678337097168, 0.02734489631652832, 0.026644479751586913, 0.026637311935424804, 0.026594303131103517, 0.026646528244018555, 0.026602495193481446, 0.027348991394042968, 0.027696128845214843, 0.027305984497070314, 0.027241472244262696, 0.02698854446411133, 0.026565631866455077, 0.026608640670776368, 0.027296768188476563, 0.027320320129394532, 0.027320320129394532, 0.027268096923828124, 0.02708684730529785, 0.026829824447631836, 0.026608640670776368, 0.02674073600769043, 0.02660147285461426, 0.026590208053588867, 0.02655129623413086, 0.02654207992553711, 0.02661075210571289, 0.0265860481262207, 0.02667622375488281, 0.026572799682617186, 0.026582015991210937, 0.026586111068725587, 0.02653081512451172, 0.026566656112670898, 0.02655232048034668, 0.026702848434448243, 0.026602495193481446, 0.02654617691040039, 0.026550271987915038, 0.026645503997802734, 0.026550271987915038, 0.026616832733154298, 0.02660966491699219, 0.026596351623535155, 0.026595327377319337, 0.026586111068725587, 0.02654617691040039, 0.02654617691040039, 0.026786815643310546, 0.02656153678894043, 0.026608640670776368, 0.026602495193481446, 0.026529792785644532, 0.026586111068725587, 0.026638336181640625, 0.05429862213134766, 0.026634239196777345, 0.026537984848022462, 0.026639360427856446, 0.02672127914428711, 0.02692915153503418, 0.027014144897460936, 0.026755071640014647, 0.026573823928833007, 0.026562559127807618, 0.026572799682617186, 0.026690559387207033, 0.02657177543640137, 0.026617855072021485, 0.026610687255859376, 0.026606592178344726, 0.026598400115966796, 0.026573823928833007, 0.026597375869750976, 0.026514432907104493, 0.026582015991210937, 0.02659328079223633, 0.026673152923583986, 0.027404319763183593, 0.026896352767944335, 0.026578943252563478, 0.026537984848022462, 0.026552352905273437, 0.02658710479736328, 0.026514432907104493, 0.02651238441467285, 0.026514432907104493, 0.02652262306213379, 0.02651238441467285, 0.02653081512451172, 0.026626047134399415, 0.026565631866455077, 0.026716224670410155, 0.026623935699462892, 0.026580991744995116, 0.02654617691040039, 0.026556415557861326, 0.026479616165161132, 0.02652876853942871, 0.026670080184936523, 0.026643455505371092, 0.026827775955200195, 0.026611711502075194, 0.027028480529785157, 0.027226112365722657, 0.027289600372314454, 0.02755583953857422, 0.0273623046875, 0.0273623046875, 0.02728550338745117, 0.02791116714477539, 0.027173887252807616, 0.0267775993347168, 0.026597375869750976, 0.02654515266418457, 0.02656153678894043, 0.026599424362182617, 0.026617855072021485, 0.05418803024291992, 0.026620927810668944, 0.026647552490234375, 0.02657689666748047, 0.02651238441467285, 0.026595327377319337, 0.026607616424560547, 0.026603519439697267, 0.026594303131103517, 0.026619903564453123, 0.02672230339050293, 0.026630144119262695, 0.026639360427856446, 0.02667622375488281, 0.026677248001098632, 0.026631168365478516, 0.026617855072021485, 0.026565631866455077, 0.026587135314941408, 0.0265164794921875, 0.026616832733154298, 0.026588159561157225, 0.026612735748291014, 0.026612735748291014, 0.02656768035888672, 0.026558464050292968, 0.026629119873046874, 0.026508287429809572, 0.026702848434448243, 0.02671615982055664, 0.0265533447265625, 0.026586111068725587, 0.026626047134399415, 0.02655232048034668, 0.02658406448364258, 0.02654412841796875, 0.02653081512451172, 0.0265799674987793, 0.026612735748291014, 0.026603519439697267, 0.02635366439819336, 0.026727424621582032, 0.02656153678894043, 0.026595327377319337, 0.026583040237426758, 0.026492927551269533, 0.026572799682617186, 0.02653593635559082, 0.026678272247314453, 0.02653900718688965, 0.02690559959411621, 0.02734284782409668, 0.02688102340698242, 0.026624000549316407, 0.02656358337402344, 0.026578943252563478, 0.027030527114868166, 0.027415552139282227, 0.02731724739074707, 0.027236352920532225, 0.027314176559448244, 0.027259904861450194, 0.027412479400634765]",tokens/s,36.83007739537158,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: CodeGenForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpirdn8wy3/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemm.py"", line 102, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1610.354688,2254.962688,0.0,1608.515584,1463.6928,s,10,1.209205665588379,0.1209205665588379,0.0011005379634326184,0.1205662727355957,0.1215480598449707,0.12274063606262207,0.12369469703674317,"[0.12393321228027344, 0.12128304290771484, 0.12008969879150391, 0.12004278564453125, 0.12018284606933594, 0.12025001525878906, 0.12034620666503906, 0.12078633880615235, 0.12113209533691406, 0.121159423828125]",tokens/s,2117.0922969124094,kWh,1.4171419990441157e-06,7.762810317477228e-07,6.305967346357218e-06,8.499390377149056e-06,tokens/kWh,30119807.261502665,MB,1610.952704,2254.962688,0.0,1608.515584,1560.974848,s,10,70.14807763671874,7.0148077636718735,0.00472223733563277,7.012995361328125,7.0220806640625,7.0226084472656245,7.023030673828125,"[7.017875, 7.0128837890625, 7.0119638671875, 7.01192919921875, 7.01310693359375, 7.0105234375, 7.0078984375, 7.02196337890625, 7.01679736328125, 7.02313623046875]",tokens/s,8.981001635748731,kWh,8.273579535030184e-05,4.5345370896494765e-05,0.0003660393860852451,0.0004941205523320418,tokens/kWh,127499.25034015773,,s,629,71.1210855636597,0.11307008833650187,0.01439647456429658,0.11126681518554687,0.11167272796630859,0.11198136138916015,0.23187720581054685,"[0.11166719818115234, 0.11130470275878906, 0.11149517059326172, 0.11213107299804688, 0.11133849334716797, 0.11115827178955077, 0.11096371459960938, 0.11128832244873046, 0.11117977905273438, 0.11118182373046875, 0.11122688293457031, 0.1112647705078125, 0.11119513702392578, 0.11100774383544922, 0.111087646484375, 0.11108963012695312, 0.11133439636230469, 0.11160678100585937, 0.11127910614013672, 0.1123737564086914, 0.1114972152709961, 0.11121971130371094, 0.11113676452636718, 0.1111009292602539, 0.11132621002197265, 0.11129344177246094, 0.11116851043701172, 0.11124531555175782, 0.1112442855834961, 0.11126681518554687, 0.11126579284667969, 0.11136102294921875, 0.11114291381835938, 0.11164262390136719, 0.1114081268310547, 0.11137843322753906, 0.11116134643554687, 0.11157810974121093, 0.1113333740234375, 0.11127091217041016, 0.11125965118408203, 0.11129241943359375, 0.11120745849609374, 0.11137942504882813, 0.11125247955322265, 0.11137229156494141, 0.11129138946533203, 0.11126988983154297, 0.11214335632324218, 0.11184333038330078, 0.11213414764404298, 0.11166515350341796, 0.11144300842285157, 0.11137529754638673, 0.11148185729980468, 0.11138457489013671, 0.11156582641601563, 0.1114777603149414, 0.1114081268310547, 0.11125965118408203, 0.11127603149414063, 0.11128425598144531, 0.2332733154296875, 0.11126271820068359, 0.11134259033203125, 0.1112136001586914, 0.11113475036621094, 0.1112872314453125, 0.11119516754150391, 0.11118179321289062, 0.11123712158203125, 0.11110195159912109, 0.11118694305419922, 0.11108454132080078, 0.11117158508300781, 0.1111357421875, 0.1112248306274414, 0.11135078430175781, 0.1113016357421875, 0.11121049499511719, 0.11146649932861329, 0.11143270111083985, 0.11139379119873047, 0.11125862121582031, 0.11111116790771484, 0.11113369750976562, 0.11115213012695313, 0.11124736022949219, 0.11156070709228516, 0.11131289672851563, 0.11127193450927735, 0.11115929412841796, 0.11136819458007813, 0.11125145721435546, 0.11122073364257813, 0.11129753875732422, 0.11136614227294922, 0.11128217315673829, 0.11125145721435546, 0.11142047882080078, 0.11116230773925781, 0.11125759887695312, 0.1110487060546875, 0.11122994995117187, 0.11132518768310547, 0.1113026885986328, 0.11126576232910156, 0.11120947265625, 0.11332198333740234, 0.11140608215332032, 0.11135590362548828, 0.11126579284667969, 0.11120845031738281, 0.11143065643310547, 0.1113917465209961, 0.11126988983154297, 0.11125350189208984, 0.11131187438964844, 0.11131903839111328, 0.11127091217041016, 0.11129446411132812, 0.11129446411132812, 0.11138969421386719, 0.1114603500366211, 0.11127808380126954, 0.23181004333496094, 0.11102105712890625, 0.1111910400390625, 0.11175424194335938, 0.11164672088623047, 0.11138969421386719, 0.11125247955322265, 0.11111219024658203, 0.11101798248291016, 0.11089920043945313, 0.11102413177490235, 0.11136723327636719, 0.1116630401611328, 0.11121561431884766, 0.11134259033203125, 0.11122585296630859, 0.11111116790771484, 0.11116339111328125, 0.11112857818603515, 0.11112242889404297, 0.11111014556884766, 0.11112966156005859, 0.11107218933105469, 0.11174297332763672, 0.11127603149414063, 0.11119821166992187, 0.11121663665771485, 0.11118080139160157, 0.11127808380126954, 0.1111583023071289, 0.11111011505126953, 0.11111014556884766, 0.11128832244873046, 0.11131903839111328, 0.1111562271118164, 0.11147058868408204, 0.11126681518554687, 0.1112279052734375, 0.11108051300048828, 0.11122681427001953, 0.11114086151123047, 0.11119923400878906, 0.11140201568603515, 0.11115004730224609, 0.1115494384765625, 0.11138969421386719, 0.11126886749267578, 0.11192524719238281, 0.111351806640625, 0.11120127868652344, 0.11122994995117187, 0.11163238525390624, 0.11214540863037109, 0.11136921691894532, 0.11118592071533204, 0.11130879974365235, 0.11120845031738281, 0.11130470275878906, 0.1112442855834961, 0.11167743682861328, 0.11148799896240234, 0.11126988983154297, 0.11132012939453124, 0.23181817626953125, 0.11116236877441406, 0.11126585388183594, 0.11114182281494141, 0.11116748809814453, 0.1111695327758789, 0.11111014556884766, 0.11164057922363281, 0.11159449768066407, 0.11120127868652344, 0.11121766662597657, 0.11119206237792968, 0.11146649932861329, 0.11107635498046875, 0.11124121856689453, 0.11109375762939454, 0.11119718170166015, 0.11117465972900391, 0.11109478759765624, 0.11113471984863281, 0.11132415771484375, 0.11108249664306641, 0.11122585296630859, 0.11124838256835938, 0.11113267517089843, 0.11119821166992187, 0.11118386840820313, 0.11122380828857421, 0.11127808380126954, 0.11117056274414062, 0.11127500915527344, 0.111388671875, 0.11147878265380859, 0.111246337890625, 0.11121868896484376, 0.1112616958618164, 0.1112811508178711, 0.11174400329589844, 0.11160575866699218, 0.11130368041992188, 0.11123001861572265, 0.11139884948730469, 0.11166207885742188, 0.11131187438964844, 0.11219455718994141, 0.11126377868652344, 0.11129955291748046, 0.11126886749267578, 0.11122278594970703, 0.11126886749267578, 0.1113169937133789, 0.11135282897949218, 0.11117362976074219, 0.1112442855834961, 0.11130060577392578, 0.11121766662597657, 0.11142655944824219, 0.111246337890625, 0.11133132934570313, 0.11140409851074219, 0.11129542541503906, 0.11128422546386718, 0.11133952331542969, 0.23190016174316405, 0.11121459197998047, 0.11119718170166015, 0.11123302459716797, 0.11124736022949219, 0.1111551971435547, 0.1117624282836914, 0.11134259033203125, 0.11116851043701172, 0.11123609924316406, 0.11112960052490234, 0.11113983917236328, 0.11123097229003906, 0.11135692596435547, 0.11133030700683594, 0.1111756820678711, 0.11099750518798829, 0.11165388488769531, 0.11119923400878906, 0.11110399627685547, 0.11134361267089844, 0.11165695953369141, 0.11130470275878906, 0.11117874908447266, 0.11137843322753906, 0.11113881683349609, 0.11112754821777343, 0.11104460906982422, 0.11110195159912109, 0.1110456314086914, 0.11133542633056641, 0.11121971130371094, 0.11121459197998047, 0.11156275177001954, 0.11139590454101563, 0.11135379028320312, 0.11121868896484376, 0.11118592071533204, 0.11130982208251954, 0.11158528137207031, 0.11178291320800782, 0.11122176361083984, 0.11130470275878906, 0.11126681518554687, 0.111246337890625, 0.11125452423095702, 0.11126374053955078, 0.11133030700683594, 0.11145932769775391, 0.11136000061035156, 0.11132621002197265, 0.11127705383300782, 0.11152998352050782, 0.1112442855834961, 0.11130265808105469, 0.11128729248046874, 0.11133952331542969, 0.11251200103759766, 0.11131084442138672, 0.1113221435546875, 0.11128931427001953, 0.11129036712646484, 0.11130879974365235, 0.2321817626953125, 0.11112652587890624, 0.11125247955322265, 0.11120230102539062, 0.11118284606933594, 0.11110912322998047, 0.11104768371582031, 0.11113471984863281, 0.11123814392089844, 0.11114495849609375, 0.11111321258544922, 0.11121971130371094, 0.11117874908447266, 0.11118592071533204, 0.11127603149414063, 0.11113369750976562, 0.11118796539306641, 0.11104364776611328, 0.11120121765136719, 0.11109683227539062, 0.11134873962402343, 0.11129241943359375, 0.11124531555175782, 0.11129138946533203, 0.1111695327758789, 0.11111936187744141, 0.11120230102539062, 0.11098214721679688, 0.11109478759765624, 0.11121772766113282, 0.11126573181152344, 0.11151564788818359, 0.11180134582519531, 0.1111551971435547, 0.11124018859863281, 0.11106508636474609, 0.11120845031738281, 0.11125452423095702, 0.11243007659912109, 0.11165286254882813, 0.11120947265625, 0.11119308471679687, 0.11124224090576172, 0.11133747100830078, 0.1112647705078125, 0.11123200225830078, 0.11110912322998047, 0.11123097229003906, 0.11112140655517579, 0.11116134643554687, 0.11120230102539062, 0.11124838256835938, 0.11131597137451171, 0.11115929412841796, 0.11126681518554687, 0.11124326324462891, 0.11124940490722657, 0.11136819458007813, 0.1113364486694336, 0.11219558715820313, 0.11167436981201172, 0.11133542633056641, 0.11132006072998046, 0.23260365295410157, 0.1113016357421875, 0.11109069061279297, 0.11130675506591797, 0.11117772674560547, 0.11111116790771484, 0.11099852752685548, 0.11096575927734376, 0.11118592071533204, 0.11106201934814453, 0.11124940490722657, 0.1109964828491211, 0.11128729248046874, 0.11114086151123047, 0.11112041473388672, 0.11092476654052734, 0.11116646575927734, 0.11122585296630859, 0.11110297393798828, 0.11115929412841796, 0.1110282211303711, 0.11134361267089844, 0.11134054565429688, 0.11161497497558594, 0.11118796539306641, 0.11125145721435546, 0.1112965087890625, 0.11122898864746093, 0.11125344085693359, 0.11107839965820313, 0.1114081268310547, 0.11136511993408203, 0.11113164520263671, 0.11111936187744141, 0.11107635498046875, 0.11111628723144532, 0.1109964828491211, 0.11101900482177735, 0.11115827178955077, 0.11171942138671875, 0.11166413116455078, 0.11109580993652343, 0.11116649627685547, 0.11109782409667969, 0.11119821166992187, 0.11115110778808594, 0.11119206237792968, 0.11109580993652343, 0.1113333740234375, 0.111388671875, 0.11111014556884766, 0.11112242889404297, 0.11119926452636719, 0.11117769622802734, 0.11110707092285156, 0.11125350189208984, 0.111246337890625, 0.11176140594482421, 0.11179315185546874, 0.11137229156494141, 0.1111234588623047, 0.1113917465209961, 0.11129138946533203, 0.23285554504394532, 0.11228876495361328, 0.1119969253540039, 0.11132012939453124, 0.1113087387084961, 0.1110118408203125, 0.11107635498046875, 0.11119821166992187, 0.11114905548095703, 0.11114291381835938, 0.11154227447509765, 0.111283203125, 0.11145011138916015, 0.11127091217041016, 0.11112140655517579, 0.11115110778808594, 0.11166719818115234, 0.11149517059326172, 0.11157708740234375, 0.11116441345214843, 0.11162931060791016, 0.11222220611572266, 0.11127398681640625, 0.11119721221923828, 0.11121660614013672, 0.11111833953857422, 0.11153817749023437, 0.11167436981201172, 0.11123817443847656, 0.11156681823730469, 0.11157299041748046, 0.11123200225830078, 0.11113267517089843, 0.11120339202880859, 0.11119302368164062, 0.11129446411132812, 0.11130265808105469, 0.11117874908447266, 0.11140914916992188, 0.11178803253173829, 0.11143987274169923, 0.111388671875, 0.11134054565429688, 0.11126271820068359, 0.11172557067871093, 0.11178189086914063, 0.11163648223876953, 0.11228876495361328, 0.11165491485595704, 0.11137843322753906, 0.11123712158203125, 0.11235433959960937, 0.11163645172119141, 0.11138969421386719, 0.11162419128417969, 0.11138253021240234, 0.11193548583984375, 0.11135289764404296, 0.11133433532714844, 0.11140198516845704, 0.11156582641601563, 0.11123814392089844, 0.11139584350585938, 0.23310745239257813, 0.11154329681396484, 0.11122585296630859, 0.11103334045410156, 0.11112242889404297, 0.11111321258544922, 0.11110502624511719, 0.11125759887695312, 0.11132006072998046, 0.11105689239501954, 0.11160371398925781, 0.11119308471679687, 0.11138662719726562, 0.11129039764404297, 0.11160777282714844, 0.1111203842163086, 0.11147058868408204, 0.11210137939453126, 0.11113471984863281, 0.11226214599609376, 0.11134054565429688, 0.11112960052490234, 0.11109683227539062, 0.11101696014404297, 0.11132927703857422, 0.11150438690185546, 0.11192934417724609, 0.11167231750488281, 0.11156684875488282, 0.11118489837646485, 0.11123814392089844, 0.11113683319091797, 0.11117254638671875, 0.11120333099365234, 0.11124940490722657, 0.11127398681640625, 0.11138355255126953, 0.11188531494140624, 0.11153510284423829, 0.11116134643554687, 0.11122892761230468, 0.11117874908447266, 0.11126892852783203, 0.11124422454833985, 0.11132723236083984, 0.111246337890625, 0.1125038070678711, 0.11145523071289062, 0.1111695327758789, 0.11121663665771485, 0.11127193450927735, 0.11120537567138672, 0.11170098876953125, 0.11128729248046874, 0.11114701080322266, 0.112110595703125, 0.11148185729980468, 0.11123916625976563, 0.1111910400390625, 0.11130879974365235, 0.11141222381591796, 0.11127603149414063, 0.11140608215332032, 0.23328665161132814, 0.11162009429931641, 0.11125350189208984, 0.11111116790771484, 0.11114086151123047, 0.1111234588623047, 0.11114803314208985, 0.11112454223632813, 0.11118073272705079, 0.11184435272216797, 0.11149311828613281, 0.1110835189819336, 0.11113676452636718, 0.11192729949951172, 0.11148902130126953, 0.1111900177001953, 0.1112074203491211, 0.11114393615722656, 0.11168256378173828, 0.11129446411132812, 0.11139584350585938, 0.11125043487548827, 0.11120435333251953, 0.11126886749267578, 0.11185561370849609, 0.11130675506591797, 0.11252838134765625, 0.11186688232421875, 0.11209420776367188, 0.11134873962402343, 0.11136819458007813, 0.11132006072998046, 0.11117056274414062, 0.1115688934326172, 0.111425537109375, 0.11140096282958985, 0.11195801544189453, 0.11133439636230469, 0.11169280242919923, 0.11158016204833984, 0.11150745391845703, 0.11149209594726563, 0.11174915313720703, 0.11140297698974609, 0.11168153381347656, 0.11251609802246093, 0.11149823760986328, 0.1112125473022461, 0.11124947357177735, 0.11128211212158202, 0.11113369750976562, 0.11132518768310547, 0.11279666900634766, 0.11159654235839844, 0.11184844970703126, 0.11136102294921875, 0.11125350189208984, 0.11140096282958985, 0.11135078430175781, 0.11131084442138672, 0.11126579284667969, 0.11146348571777344, 0.111246337890625]",tokens/s,8.844071979708314,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2221.408256,3330.801664,0.0,2684.35456,2447.595008,s,10,2.311121505737305,0.2311121505737305,0.0008602880707090342,0.23093637084960938,0.23197196044921875,0.23249637908935547,0.23291591400146486,"[0.2330207977294922, 0.23185542297363282, 0.23051023864746092, 0.23036329650878906, 0.2304237823486328, 0.2300004119873047, 0.23067584228515625, 0.2311968994140625, 0.231619384765625, 0.23145542907714844]",tokens/s,1107.6873256749418,kWh,2.719861554979074e-06,1.4903567604051204e-06,1.2427491002590687e-05,1.6637709317974884e-05,tokens/kWh,15386733.54050159,MB,2222.546944,3330.801664,0.0,2684.35456,2597.68064,s,10,135.98273144531248,13.598273144531248,0.005626597165862265,13.59927783203125,13.6021646484375,13.60570927734375,13.60854498046875,"[13.60925390625, 13.5985986328125, 13.5982265625, 13.6012080078125, 13.59995703125, 13.5903544921875, 13.5950361328125, 13.600318359375, 13.601376953125, 13.5884013671875]",tokens/s,4.63294120734267,kWh,0.00016051297066488652,8.797306729544289e-05,0.0007245357930120089,0.0009730218309723382,tokens/kWh,64746.74873126358,,s,629,137.8431466979981,0.21914649713513207,0.027476947243743017,0.21579168701171875,0.2161471496582031,0.2163871795654297,0.4466235400390625,"[0.2167510986328125, 0.21612850952148438, 0.2157936706542969, 0.21595852661132814, 0.21579776000976564, 0.21578341674804688, 0.21598515319824219, 0.21577626037597655, 0.21600563049316407, 0.21667738342285156, 0.2159656982421875, 0.21651968383789064, 0.21600767517089844, 0.21581004333496093, 0.21593600463867188, 0.21574656677246093, 0.2157589111328125, 0.21606707763671876, 0.2157782440185547, 0.21572096252441406, 0.2157322235107422, 0.21579058837890625, 0.21588172912597656, 0.21599232482910155, 0.2161080322265625, 0.21578341674804688, 0.21588172912597656, 0.21578341674804688, 0.21624217224121095, 0.21602406311035155, 0.2159482879638672, 0.21581619262695312, 0.21592169189453125, 0.21585302734375, 0.2157742004394531, 0.21580697631835938, 0.2158233642578125, 0.21585305786132813, 0.216163330078125, 0.2158858184814453, 0.21590425109863282, 0.2159718475341797, 0.2159482879638672, 0.21587660217285157, 0.2160394287109375, 0.21591244506835938, 0.2157660217285156, 0.21589913940429686, 0.21592576599121094, 0.21606399536132812, 0.21596365356445313, 0.2158428192138672, 0.21607833862304687, 0.21651866149902343, 0.21638246154785157, 0.21635379028320312, 0.21680844116210937, 0.21638656616210938, 0.21633331298828126, 0.21572709655761718, 0.21570457458496095, 0.21581517028808594, 0.448078857421875, 0.21560012817382812, 0.21530323791503905, 0.21556626892089845, 0.21550796508789063, 0.2154915771484375, 0.21565951538085937, 0.21555711364746094, 0.21552742004394532, 0.21577728271484375, 0.2157127685546875, 0.21635789489746093, 0.21575782775878907, 0.21568818664550782, 0.2154915771484375, 0.2156810302734375, 0.21565440368652344, 0.21555404663085936, 0.21562060546875, 0.21577728271484375, 0.2157557830810547, 0.21564006042480469, 0.21555815124511718, 0.21571583557128907, 0.2157373504638672, 0.21600973510742189, 0.21567079162597655, 0.2159482879638672, 0.21565235900878907, 0.2156820526123047, 0.21579469299316406, 0.21556941223144532, 0.215583740234375, 0.21569842529296876, 0.21576191711425782, 0.21580294799804686, 0.21571781921386718, 0.215583740234375, 0.21599334716796875, 0.2168739776611328, 0.21618482971191405, 0.21625753784179688, 0.21591142272949218, 0.21590835571289063, 0.21615206909179688, 0.21587557983398437, 0.21748121643066406, 0.21573324584960937, 0.21585816955566406, 0.21574758911132813, 0.21579161071777345, 0.21602613830566406, 0.21589602661132812, 0.21573837280273436, 0.21600767517089844, 0.2161459197998047, 0.216195068359375, 0.21681561279296874, 0.21595545959472656, 0.2159800262451172, 0.21614183044433594, 0.21579263305664062, 0.21579168701171875, 0.44681414794921875, 0.21564927673339843, 0.21552024841308592, 0.2156093444824219, 0.21581517028808594, 0.21569024658203126, 0.21543324279785156, 0.2153850555419922, 0.21555917358398438, 0.21604454040527343, 0.2156441650390625, 0.21566259765625, 0.2157373504638672, 0.21574348449707031, 0.2156165771484375, 0.2157229461669922, 0.21567692565917967, 0.21569024658203126, 0.2158540802001953, 0.21572198486328126, 0.2158008270263672, 0.21594009399414063, 0.21594009399414063, 0.2173450164794922, 0.21601280212402343, 0.21582643127441406, 0.21562265014648438, 0.216121337890625, 0.21586329650878905, 0.21575065612792968, 0.21581721496582032, 0.21577626037597655, 0.21595135498046875, 0.21587046813964844, 0.2158223419189453, 0.21579263305664062, 0.21578854370117187, 0.21617765808105469, 0.21579571533203126, 0.2159615936279297, 0.2160906219482422, 0.2161459197998047, 0.21604864501953125, 0.2158551025390625, 0.2156943359375, 0.21593498229980468, 0.2158254089355469, 0.21669273376464843, 0.21583258056640625, 0.21568307495117187, 0.21580288696289063, 0.21581210327148437, 0.21588070678710938, 0.21590118408203124, 0.2158018493652344, 0.2157803497314453, 0.2158192596435547, 0.2158192596435547, 0.2157670440673828, 0.21574758911132813, 0.21588890075683595, 0.2157137908935547, 0.21567079162597655, 0.44661248779296875, 0.215657470703125, 0.21567999267578125, 0.21604351806640626, 0.2158858184814453, 0.21600665283203124, 0.21593600463867188, 0.2156145324707031, 0.21560723876953125, 0.21591346740722656, 0.21581619262695312, 0.21606809997558593, 0.216342529296875, 0.21595750427246094, 0.21586636352539063, 0.21600154113769532, 0.2158540802001953, 0.21593907165527343, 0.21594522094726562, 0.21608345031738282, 0.21599334716796875, 0.21589401245117187, 0.21608038330078125, 0.21600665283203124, 0.21591448974609376, 0.21582131958007814, 0.21565542602539062, 0.2157004852294922, 0.21572921752929688, 0.21575570678710937, 0.215878662109375, 0.21577113342285156, 0.2156390380859375, 0.21572709655761718, 0.21576191711425782, 0.215878662109375, 0.21596774291992188, 0.21590835571289063, 0.21587660217285157, 0.21585305786132813, 0.2158356475830078, 0.21571685791015624, 0.21579263305664062, 0.21578956604003907, 0.21685247802734375, 0.2158223419189453, 0.21567692565917967, 0.21570661926269533, 0.21589503479003908, 0.21621554565429688, 0.2161643524169922, 0.21638758850097656, 0.215878662109375, 0.21575885009765625, 0.21584588623046874, 0.21589401245117187, 0.2158745574951172, 0.21597900390625, 0.21583769226074218, 0.21566361999511718, 0.2158182373046875, 0.21573017883300782, 0.21579776000976564, 0.44662783813476564, 0.21568511962890624, 0.2158233642578125, 0.21589605712890625, 0.2156615753173828, 0.21600154113769532, 0.21570355224609375, 0.2157639617919922, 0.21581312561035157, 0.21569024658203126, 0.215552001953125, 0.21569638061523438, 0.21593087768554686, 0.2157076416015625, 0.21581004333496093, 0.21580902099609375, 0.21563699340820314, 0.21653094482421875, 0.21587660217285157, 0.21565440368652344, 0.21584588623046874, 0.2159288330078125, 0.21573426818847657, 0.21571583557128907, 0.21572096252441406, 0.21561036682128906, 0.21595750427246094, 0.21581210327148437, 0.215841796875, 0.21570559692382812, 0.21621452331542967, 0.2162554931640625, 0.216015869140625, 0.21625958251953126, 0.21600767517089844, 0.2159052734375, 0.2159800262451172, 0.21584077453613282, 0.21585714721679689, 0.2158612518310547, 0.21581517028808594, 0.2157496337890625, 0.21570252990722658, 0.21573837280273436, 0.21577215576171874, 0.21601689147949218, 0.21586534118652342, 0.21566053771972657, 0.21617971801757813, 0.215804931640625, 0.2159964141845703, 0.215910400390625, 0.21567692565917967, 0.21574143981933594, 0.215993408203125, 0.21583967590332032, 0.21577626037597655, 0.21723341369628907, 0.2157936706542969, 0.21567181396484375, 0.21573017883300782, 0.2158602294921875, 0.21591346740722656, 0.44625918579101564, 0.2154598388671875, 0.21547109985351562, 0.215942138671875, 0.2155018310546875, 0.2154967041015625, 0.21560525512695314, 0.21534719848632813, 0.2155335693359375, 0.21555609130859374, 0.21548646545410155, 0.21554893493652344, 0.21556838989257812, 0.2154239959716797, 0.21581210327148437, 0.21597080993652343, 0.2159831085205078, 0.21576191711425782, 0.21572813415527345, 0.21552543640136718, 0.21562054443359374, 0.21571994018554688, 0.21557862854003906, 0.21559500122070313, 0.21570867919921874, 0.21560115051269532, 0.21548851013183593, 0.21564210510253906, 0.21561856079101563, 0.21550079345703124, 0.2157137908935547, 0.21574348449707031, 0.21571481323242186, 0.21626162719726563, 0.21590016174316407, 0.2157127685546875, 0.21570867919921874, 0.21562982177734374, 0.21705215454101562, 0.21569229125976563, 0.21560012817382812, 0.2157373504638672, 0.21579058837890625, 0.21566464233398439, 0.21566361999511718, 0.21576191711425782, 0.21585101318359376, 0.21570867919921874, 0.21573529052734375, 0.21557554626464845, 0.2158305206298828, 0.2157742004394531, 0.2156083221435547, 0.2155704345703125, 0.21572813415527345, 0.21567082214355468, 0.21573321533203124, 0.2156513214111328, 0.21562777709960937, 0.21566265869140624, 0.21600965881347656, 0.21582028198242187, 0.21637837219238282, 0.4470773620605469, 0.2156072998046875, 0.21567079162597655, 0.21566566467285156, 0.21550079345703124, 0.21559091186523438, 0.21565542602539062, 0.21559500122070313, 0.2156697540283203, 0.21570661926269533, 0.21575474548339843, 0.21560426330566407, 0.2156011199951172, 0.21565235900878907, 0.21573939514160156, 0.2161090545654297, 0.2160199737548828, 0.21577523803710938, 0.21591552734375, 0.21555815124511718, 0.2156441650390625, 0.21569842529296876, 0.21553868103027343, 0.21665382385253906, 0.21579571533203126, 0.21555711364746094, 0.2155888671875, 0.21576499938964844, 0.215762939453125, 0.21582028198242187, 0.2158582458496094, 0.2157608337402344, 0.21596368408203126, 0.21594313049316408, 0.2157445068359375, 0.2157639617919922, 0.21581414794921874, 0.21576502990722657, 0.21569635009765625, 0.215762939453125, 0.2158223419189453, 0.21592268371582032, 0.21613157653808593, 0.21586329650878905, 0.21606501770019532, 0.21634661865234375, 0.21581517028808594, 0.21568716430664062, 0.215804931640625, 0.21572096252441406, 0.215689208984375, 0.21578239440917968, 0.21577523803710938, 0.21575167846679688, 0.21579776000976564, 0.21582028198242187, 0.21593600463867188, 0.2157178955078125, 0.21569024658203126, 0.21573324584960937, 0.21574758911132813, 0.21596876525878905, 0.21589605712890625, 0.44737637329101565, 0.21560012817382812, 0.2154035186767578, 0.21556224060058593, 0.21559091186523438, 0.21557760620117186, 0.2155878448486328, 0.2156195831298828, 0.21551922607421875, 0.21568511962890624, 0.21572813415527345, 0.21568818664550782, 0.21565542602539062, 0.21562265014648438, 0.21565440368652344, 0.21580799865722655, 0.21636402893066406, 0.2157178955078125, 0.2158018493652344, 0.21578341674804688, 0.21576499938964844, 0.21569740295410156, 0.21566464233398439, 0.21570970153808594, 0.2157936706542969, 0.21613772583007812, 0.21595237731933595, 0.21639680480957033, 0.21639474487304688, 0.21601791381835939, 0.2160025634765625, 0.21584999084472656, 0.2159288330078125, 0.21633024597167969, 0.21584793090820312, 0.2159964141845703, 0.21607936096191407, 0.2160148468017578, 0.21588992309570312, 0.21595852661132814, 0.21601689147949218, 0.21620632934570314, 0.21618380737304688, 0.216595458984375, 0.21603021240234374, 0.21584690856933594, 0.21581619262695312, 0.21571994018554688, 0.21570252990722658, 0.21567692565917967, 0.21569638061523438, 0.21571072387695311, 0.2158970947265625, 0.2156810302734375, 0.2158981170654297, 0.2158602294921875, 0.21611932373046874, 0.21608546447753907, 0.21588479614257813, 0.21581004333496093, 0.21603225708007812, 0.21623910522460937, 0.21606809997558593, 0.44815972900390627, 0.21594316101074218, 0.21585305786132813, 0.21599130249023438, 0.21577932739257813, 0.21586534118652342, 0.21586431884765625, 0.21613055419921876, 0.21584895324707032, 0.21639578247070312, 0.21571994018554688, 0.215657470703125, 0.21565951538085937, 0.2156748809814453, 0.21572607421875, 0.21584690856933594, 0.21561138916015626, 0.21668966674804688, 0.2158008270263672, 0.21577113342285156, 0.21580902099609375, 0.2158673858642578, 0.21579673767089844, 0.21592063903808595, 0.21624319458007812, 0.21564927673339843, 0.21612953186035155, 0.21587046813964844, 0.2156810302734375, 0.21577830505371093, 0.2156943359375, 0.2155827178955078, 0.21565542602539062, 0.21575372314453126, 0.21602610778808592, 0.2159052734375, 0.2158039093017578, 0.21560421752929687, 0.21574656677246093, 0.2161326141357422, 0.21603021240234374, 0.21574041748046874, 0.21587251281738282, 0.21601791381835939, 0.21597900390625, 0.2160343017578125, 0.21582028198242187, 0.21569126892089843, 0.2157424621582031, 0.21565235900878907, 0.21565440368652344, 0.2158602294921875, 0.21615206909179688, 0.21636915588378905, 0.2160148468017578, 0.21692825317382813, 0.21595960998535157, 0.21593696594238282, 0.21585101318359376, 0.21585101318359376, 0.21587251281738282, 0.2156134338378906, 0.21572813415527345, 0.44772250366210936, 0.2154598388671875, 0.21554896545410157, 0.21563491821289063, 0.21536154174804686, 0.2154977264404297, 0.21561651611328125, 0.2155335693359375, 0.21565338134765624, 0.21574867248535157, 0.21558367919921875, 0.2155714874267578, 0.21574960327148437, 0.2156390380859375, 0.21553868103027343, 0.21560838317871095, 0.21563385009765626, 0.2156810302734375, 0.2155704345703125, 0.2155704345703125, 0.21559603881835937, 0.21553663635253906, 0.21553152465820313, 0.21560525512695314, 0.21560421752929687, 0.21559397888183593, 0.21667225646972657, 0.21568818664550782, 0.21559910583496095, 0.21570457458496095, 0.2156513214111328, 0.21557554626464845, 0.21559193420410155, 0.21564210510253906, 0.21565338134765624, 0.21612953186035155, 0.2163251190185547, 0.21550592041015626, 0.215910400390625, 0.21579469299316406, 0.2157455291748047, 0.215762939453125, 0.21571891784667968, 0.21559295654296876, 0.21559091186523438, 0.2155847625732422, 0.21548442077636717, 0.21573426818847657, 0.21578341674804688, 0.21575270080566405, 0.21571583557128907, 0.2156134338378906, 0.2155878448486328, 0.21560012817382812, 0.21608242797851562, 0.2159646759033203, 0.2156615753173828, 0.21582438659667968, 0.21574348449707031, 0.215689208984375, 0.21561856079101563, 0.21556121826171876, 0.2156380157470703]",tokens/s,4.563157582132701,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3927.220224,12732.33408,0.0,12085.886976,11337.364992,s,10,11.005286376953126,1.1005286376953127,0.00219951912246486,1.1001186523437498,1.1034800170898438,1.1035955383300782,1.1036879553222656,"[1.1009342041015624, 1.102259765625, 1.0977261962890625, 1.099001708984375, 1.0989405517578126, 1.0975576171875, 1.0993031005859375, 1.103454345703125, 1.1023978271484376, 1.1037110595703126]",tokens/s,232.615482443152,kWh,1.2971816278166244e-05,7.107321095700172e-06,6.386246775661464e-05,8.394160513048104e-05,tokens/kWh,3049739.156191579,MB,3927.220224,12732.33408,0.0,12085.886976,11686.79936,s,10,645.4452890625,64.54452890625001,0.006590572212624524,64.54356250000001,64.553694140625,64.5538998046875,64.5540643359375,"[64.5536484375, 64.5513828125, 64.538359375, 64.55410546875, 64.5438203125, 64.5392109375, 64.53455859375, 64.5433046875, 64.5480546875, 64.53884375]",tokens/s,0.9760703357445925,kWh,0.0007619009707371396,0.00041758989640207795,0.003727131037257969,0.004906621904397187,tokens/kWh,12839.791047184834,,s,629,654.3247843627931,1.0402619783192257,0.1307385835871094,1.0244464111328124,1.0251733154296874,1.0253723876953125,2.12362228515625,"[1.0239293212890626, 1.0247884521484374, 1.0239590454101561, 1.0238433227539063, 1.0245233154296876, 1.0249482421875, 1.0245938720703125, 1.024648193359375, 1.0243778076171874, 1.0244761962890625, 1.0242918701171875, 1.02411474609375, 1.0238392333984374, 1.02443310546875, 1.0240072021484374, 1.025123291015625, 1.0243983154296874, 1.0239702758789062, 1.0238289794921875, 1.024879638671875, 1.0246707763671874, 1.0245980224609375, 1.0249052734375, 1.0246092529296875, 1.0252646484375, 1.025154052734375, 1.0244208984375, 1.0245867919921876, 1.025153076171875, 1.0249769287109376, 1.025039306640625, 1.0247618408203125, 1.0240020751953125, 1.0246666259765624, 1.024626708984375, 1.024693359375, 1.0244617919921875, 1.024501708984375, 1.02472802734375, 1.0254991455078124, 1.024680908203125, 1.025005615234375, 1.0249237060546874, 1.0251907958984374, 1.0256405029296876, 1.0250250244140624, 1.025184814453125, 1.02483251953125, 1.024786376953125, 1.0252471923828126, 1.0244935302734375, 1.0253773193359375, 1.024436279296875, 1.025090576171875, 1.025364990234375, 1.0249923095703124, 1.0241658935546876, 1.024015380859375, 1.0247249755859376, 1.0251048583984375, 1.0238597412109376, 1.02401123046875, 2.12693603515625, 1.0248826904296875, 1.024394287109375, 1.02513671875, 1.02491650390625, 1.024427978515625, 1.02502294921875, 1.0245028076171876, 1.0245499267578124, 1.024806884765625, 1.024963623046875, 1.024541748046875, 1.02464306640625, 1.0238269653320313, 1.024679931640625, 1.025101806640625, 1.0239959106445313, 1.0239354858398437, 1.024395263671875, 1.0241566162109375, 1.0247188720703124, 1.0248836669921875, 1.0245191650390626, 1.0240450439453126, 1.024563232421875, 1.0246707763671874, 1.0240665283203125, 1.02398974609375, 1.02415869140625, 1.024153564453125, 1.0244813232421874, 1.0248365478515624, 1.0246881103515626, 1.0250660400390625, 1.024818115234375, 1.02487451171875, 1.0241402587890625, 1.0241505126953125, 1.0248140869140625, 1.0247342529296875, 1.0252052001953125, 1.024384033203125, 1.0249267578125, 1.0247813720703125, 1.0250096435546876, 1.02519091796875, 1.0286304931640624, 1.0247679443359374, 1.0250260009765626, 1.02523291015625, 1.0252830810546876, 1.024384033203125, 1.0238484497070313, 1.0244556884765625, 1.0247547607421874, 1.0238433227539063, 1.0240604248046874, 1.0238505249023437, 1.0239692993164062, 1.02425390625, 1.0245499267578124, 1.0240184326171875, 1.0239478149414063, 2.123869140625, 1.024112548828125, 1.0244178466796876, 1.024606201171875, 1.024331787109375, 1.024970703125, 1.0241341552734375, 1.0240655517578126, 1.0246256103515625, 1.024216064453125, 1.02445361328125, 1.0246973876953125, 1.02462158203125, 1.0241884765625, 1.024816162109375, 1.0244024658203126, 1.0240604248046874, 1.0241331787109376, 1.02439013671875, 1.024101318359375, 1.02460107421875, 1.0241126708984376, 1.0239580078125, 1.0260316162109375, 1.024153564453125, 1.024626708984375, 1.025184814453125, 1.024384033203125, 1.0247044677734376, 1.0242744140625, 1.0245263671875, 1.0240440673828124, 1.0243983154296874, 1.0242017822265626, 1.025048583984375, 1.02445263671875, 1.0241658935546876, 1.024067626953125, 1.0245570068359375, 1.0243236083984375, 1.0241505126953125, 1.0240552978515625, 1.0239324340820313, 1.02377880859375, 1.0250537109375, 1.0241873779296875, 1.0242979736328126, 1.0260521240234375, 1.0243245849609375, 1.0240809326171876, 1.024226318359375, 1.02384228515625, 1.0239518432617187, 1.02460107421875, 1.0242713623046875, 1.023963134765625, 1.0244454345703125, 1.02419970703125, 1.0243572998046875, 1.025553466796875, 1.0245345458984374, 1.0243184814453126, 1.02401123046875, 2.12367041015625, 1.0241259765625, 1.0242652587890626, 1.0244208984375, 1.0240543212890625, 1.0244075927734375, 1.0240450439453126, 1.024143310546875, 1.024236572265625, 1.0248232421875, 1.0240543212890625, 1.0240615234375, 1.0252451171875, 1.0245201416015626, 1.02453857421875, 1.024142333984375, 1.024111572265625, 1.0240758056640624, 1.0245355224609376, 1.0249923095703124, 1.0250772705078126, 1.025503173828125, 1.024585693359375, 1.025353759765625, 1.0251724853515625, 1.025302490234375, 1.0245509033203124, 1.02447412109375, 1.025395751953125, 1.0254765625, 1.025158203125, 1.02477001953125, 1.025292236328125, 1.024942138671875, 1.024954345703125, 1.024868408203125, 1.025138671875, 1.0246287841796875, 1.0256414794921875, 1.025081298828125, 1.0248232421875, 1.023910888671875, 1.0245693359375, 1.0238443603515626, 1.027009521484375, 1.0242816162109376, 1.0239989624023438, 1.0243726806640625, 1.0255267333984375, 1.024779296875, 1.0242899169921875, 1.0245653076171874, 1.0248477783203125, 1.0242303466796876, 1.025016845703125, 1.024173095703125, 1.0242969970703124, 1.024927734375, 1.0247515869140624, 1.02410546875, 1.0243441162109375, 1.024101318359375, 1.024564208984375, 2.12349853515625, 1.0242017822265626, 1.024427978515625, 1.0245703125, 1.0245938720703125, 1.025349609375, 1.02506396484375, 1.02497900390625, 1.0247310791015625, 1.0254193115234376, 1.0243072509765625, 1.02436865234375, 1.02426416015625, 1.024543701171875, 1.0246932373046875, 1.0242508544921876, 1.0247291259765625, 1.0247679443359374, 1.0248099365234375, 1.0243450927734374, 1.0237440185546876, 1.0239437255859376, 1.02409619140625, 1.0248201904296874, 1.024573486328125, 1.0249298095703125, 1.0247445068359375, 1.024711669921875, 1.0242037353515625, 1.025076171875, 1.0243707275390626, 1.0241719970703125, 1.0248785400390625, 1.0241934814453124, 1.0238074951171876, 1.0240870361328125, 1.0238863525390625, 1.024194580078125, 1.0243511962890626, 1.023699951171875, 1.0239385375976562, 1.0239354858398437, 1.0245355224609376, 1.0244464111328124, 1.02446484375, 1.0242958984375, 1.024153564453125, 1.0246614990234375, 1.0249881591796874, 1.0237716674804687, 1.02404296875, 1.0248468017578125, 1.0250352783203125, 1.024362548828125, 1.0239723510742187, 1.02413623046875, 1.0246624755859375, 1.029676025390625, 1.02432568359375, 1.0238546142578124, 1.0238064575195311, 1.0237429809570313, 1.02468603515625, 2.1217158203125, 1.0237695922851562, 1.024711669921875, 1.02485498046875, 1.0252984619140626, 1.0250035400390625, 1.0246727294921876, 1.024594970703125, 1.0253424072265624, 1.024754638671875, 1.0237081298828126, 1.024232421875, 1.024865234375, 1.0245396728515626, 1.02403173828125, 1.02407275390625, 1.02489599609375, 1.0252052001953125, 1.0250966796875, 1.023773681640625, 1.02441162109375, 1.02495947265625, 1.0239385375976562, 1.0248038330078124, 1.0244832763671874, 1.0240286865234376, 1.0238095092773438, 1.02410546875, 1.024607177734375, 1.0238873901367187, 1.0241719970703125, 1.0245919189453125, 1.0244515380859376, 1.0247874755859374, 1.0245714111328126, 1.02497998046875, 1.0246953125, 1.024564208984375, 1.02820654296875, 1.02466455078125, 1.02426220703125, 1.02436962890625, 1.0242867431640625, 1.0247608642578125, 1.023847412109375, 1.0239989624023438, 1.0239672241210938, 1.024320556640625, 1.0242447509765624, 1.02379931640625, 1.02389453125, 1.024280517578125, 1.0238443603515626, 1.0239979248046875, 1.02392626953125, 1.02389453125, 1.0241024169921875, 1.0241402587890625, 1.024089111328125, 1.0238341064453125, 1.0239702758789062, 1.0240225830078125, 1.02510595703125, 2.124275634765625, 1.02431640625, 1.0250086669921874, 1.0248448486328126, 1.0252420654296874, 1.024489501953125, 1.0244403076171875, 1.0246031494140626, 1.0247711181640624, 1.0237265625, 1.0236641235351562, 1.0239344482421875, 1.0241915283203125, 1.023867919921875, 1.02379931640625, 1.0237245483398438, 1.024006103515625, 1.023836181640625, 1.024227294921875, 1.02468603515625, 1.0239273071289063, 1.0240594482421874, 1.0241719970703125, 1.024067626953125, 1.023867919921875, 1.0238515014648437, 1.025666015625, 1.0245263671875, 1.024973876953125, 1.02382080078125, 1.0239006958007812, 1.0243861083984376, 1.0252000732421875, 1.025047607421875, 1.0245989990234374, 1.0247177734375, 1.023931396484375, 1.0238443603515626, 1.0247506103515625, 1.0237757568359376, 1.0241033935546875, 1.0239365844726562, 1.0244884033203125, 1.0237399291992189, 1.024362548828125, 1.024405517578125, 1.024089111328125, 1.0244864501953126, 1.02445263671875, 1.02447509765625, 1.024986083984375, 1.0248734130859376, 1.0243768310546875, 1.024090087890625, 1.024077880859375, 1.0242088623046874, 1.0244178466796876, 1.0243809814453124, 1.024343017578125, 1.024501708984375, 1.0247445068359375, 1.024690185546875, 1.0249298095703125, 2.128819091796875, 1.02439111328125, 1.0247823486328125, 1.0247977294921875, 1.02436767578125, 1.0251878662109375, 1.025364990234375, 1.0248714599609374, 1.024216064453125, 1.023978515625, 1.024204833984375, 1.024175048828125, 1.0241976318359376, 1.02432568359375, 1.024288818359375, 1.024332763671875, 1.02521142578125, 1.0247762451171876, 1.0244351806640626, 1.0237880249023437, 1.0244085693359375, 1.024279541015625, 1.024522216796875, 1.0245447998046875, 1.0244136962890624, 1.02485302734375, 1.0252779541015624, 1.0244249267578125, 1.0243072509765625, 1.024364501953125, 1.0249554443359374, 1.02441064453125, 1.02491748046875, 1.025059814453125, 1.0243563232421875, 1.024710693359375, 1.0254488525390626, 1.0243583984375, 1.024216064453125, 1.0244423828125, 1.024673828125, 1.0236497802734374, 1.0240399169921874, 1.02411572265625, 1.0241024169921875, 1.0244669189453126, 1.024669677734375, 1.0245672607421874, 1.0250875244140625, 1.0249993896484375, 1.024100341796875, 1.0239754028320311, 1.0242611083984374, 1.0236723022460938, 1.0240758056640624, 1.025005615234375, 1.0246123046875, 1.024385009765625, 1.024511962890625, 1.0244013671875, 1.0243123779296874, 1.0240665283203125, 1.0242939453125, 2.1268427734375, 1.0253404541015625, 1.0243931884765625, 1.0240972900390626, 1.0241934814453124, 1.0252298583984376, 1.024215087890625, 1.0247833251953125, 1.024521240234375, 1.023973388671875, 1.0242017822265626, 1.0258052978515626, 1.024546875, 1.0249359130859375, 1.025333251953125, 1.0250526123046875, 1.02436962890625, 1.0253035888671875, 1.0246348876953124, 1.0245201416015626, 1.02516015625, 1.0252349853515625, 1.02502294921875, 1.024257080078125, 1.0243369140625, 1.0239395751953124, 1.02425390625, 1.0249779052734376, 1.024067626953125, 1.0241177978515625, 1.0244976806640624, 1.025036376953125, 1.02519189453125, 1.02483349609375, 1.0254356689453126, 1.0247762451171876, 1.024606201171875, 1.0247310791015625, 1.024310302734375, 1.0240450439453126, 1.024733154296875, 1.0244976806640624, 1.0241402587890625, 1.0240235595703124, 1.025154052734375, 1.023847412109375, 1.0244290771484375, 1.0241136474609376, 1.0241822509765626, 1.0238689575195312, 1.0249298095703125, 1.0245478515625, 1.0244771728515625, 1.0245509033203124, 1.024611328125, 1.0240921630859374, 1.024890869140625, 1.0240205078125, 1.024279541015625, 1.0245560302734376, 1.0247720947265626, 1.024206787109375, 1.0244300537109374, 2.128713623046875, 1.024427001953125, 1.0249287109375, 1.025007568359375, 1.0242989501953126, 1.025701904296875, 1.0250526123046875, 1.024421875, 1.02443115234375, 1.0251038818359375, 1.024236572265625, 1.0237388916015624, 1.0245765380859375, 1.023847412109375, 1.0237470703125, 1.0244495849609374, 1.024459716796875, 1.024206787109375, 1.02432666015625, 1.024251953125, 1.0239293212890626, 1.024359375, 1.02411669921875, 1.0238505249023437, 1.024151611328125, 1.025427490234375, 1.0240450439453126, 1.024953369140625, 1.02416796875, 1.0247802734375, 1.0241229248046875, 1.024373779296875, 1.024141357421875, 1.0242396240234375, 1.02449560546875, 1.0251766357421874, 1.024015380859375, 1.0237122802734375, 1.0239969482421876, 1.0238975830078125, 1.0237726440429689, 1.02384130859375, 1.02431640625, 1.0243133544921874, 1.0245919189453125, 1.024943115234375, 1.0246246337890625, 1.024611328125, 1.024716796875, 1.024035888671875, 1.0247413330078126, 1.024973876953125, 1.024796630859375, 1.0246318359375, 1.0259609375, 1.0238064575195311, 1.0238228759765624, 1.02377880859375, 1.02478955078125, 1.02438916015625, 1.0245723876953126, 1.0243604736328125, 1.02432568359375]",tokens/s,0.9612963088545465,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3956.158464,12732.33408,0.0,12085.886976,11337.370624,s,10,10.987366088867189,1.098736608886719,0.001768964710489349,1.0985178833007812,1.1007381103515623,1.101008154296875,1.101224189453125,"[1.1001180419921874, 1.1012781982421875, 1.0972359619140626, 1.0976529541015625, 1.097395263671875, 1.09569873046875, 1.0975172119140626, 1.0993828125, 1.1004088134765626, 1.1006781005859374]",tokens/s,232.99487605076595,kWh,1.295269936323166e-05,7.0975890679255816e-06,6.178085498018682e-05,8.183114341134406e-05,tokens/kWh,3128393.2904756567,MB,3956.158464,12732.33408,0.0,12085.886976,11686.804992,s,10,644.0755078125,64.40755078125,0.02043927244380867,64.406017578125,64.42852109374999,64.437791796875,64.445208359375,"[64.41949609375, 64.4215078125, 64.3984453125, 64.3965, 64.3855703125, 64.37932421875, 64.38755078125, 64.41358984375, 64.4470625, 64.4264609375]",tokens/s,0.9781461837288221,kWh,0.0007606077267395126,0.000416880381825722,0.003642305219397435,0.00481979332796267,tokens/kWh,13071.09988191758,,s,629,652.9415617065439,1.038062896194822,0.13045675837303516,1.0222479248046874,1.02322421875,1.0235658081054688,2.119142734375,"[1.0216539916992187, 1.0224589233398438, 1.0223789672851562, 1.0224598999023438, 1.0228684692382812, 1.0230978393554688, 1.0232852783203126, 1.0228684692382812, 1.0223707885742188, 1.0226544799804687, 1.0221055908203125, 1.022455810546875, 1.0223565063476563, 1.0220953369140624, 1.02287255859375, 1.0226544799804687, 1.0230180053710938, 1.0228029174804687, 1.022360595703125, 1.0219008178710938, 1.02221826171875, 1.0227445678710938, 1.0227865600585937, 1.0227056884765624, 1.0226657104492187, 1.0222479248046874, 1.0227005615234375, 1.0229114990234376, 1.0229288940429688, 1.0229381103515625, 1.0223861694335938, 1.0226493530273437, 1.0219724731445312, 1.02327294921875, 1.0225264892578125, 1.021897705078125, 1.0218536987304687, 1.0230333251953125, 1.0221629638671874, 1.022266357421875, 1.0232105102539062, 1.0222919921875, 1.0218916015625, 1.0232176513671876, 1.0224496459960937, 1.0222622680664062, 1.0223595581054687, 1.0233876342773438, 1.0225960693359375, 1.0226697998046874, 1.0229954833984376, 1.0221025390625, 1.0217092895507813, 1.0235074462890625, 1.0223370361328126, 1.0225868530273436, 1.0227589111328126, 1.0223380737304688, 1.0219468994140626, 1.02196533203125, 1.0226288452148438, 1.0231378173828125, 2.124905517578125, 1.0222356567382813, 1.021960205078125, 1.022509033203125, 1.0222704467773438, 1.0222008056640626, 1.0229309692382813, 1.0228899536132812, 1.0222418212890625, 1.022761962890625, 1.0235012817382811, 1.0224527587890626, 1.0228264770507813, 1.0230067138671874, 1.022603271484375, 1.022150634765625, 1.0228582153320311, 1.0227486572265625, 1.0225458984375, 1.023267822265625, 1.0228500366210938, 1.0230599975585937, 1.02280908203125, 1.0225940551757813, 1.0225387573242188, 1.0220328979492188, 1.0222684326171876, 1.0227660522460937, 1.0227394409179686, 1.0236610717773438, 1.0233159790039061, 1.02287158203125, 1.0244444580078125, 1.0236958618164063, 1.022750732421875, 1.0225233764648438, 1.0231818237304688, 1.0220595092773437, 1.0220052490234375, 1.0232534790039063, 1.0219489135742188, 1.02171337890625, 1.0222592163085937, 1.0224414672851563, 1.023140869140625, 1.0223493041992187, 1.0228746337890624, 1.0218157958984375, 1.02228173828125, 1.02193359375, 1.0220175170898438, 1.0220697631835938, 1.02232470703125, 1.0216427612304688, 1.0218946533203126, 1.0215690307617187, 1.022439453125, 1.02179736328125, 1.0224251098632813, 1.0220114135742187, 1.0223197021484376, 1.0227127685546875, 1.0225541381835936, 2.119232421875, 1.0225018920898437, 1.022055419921875, 1.0218680419921875, 1.0223175659179689, 1.0221229858398437, 1.0219366455078125, 1.0225018920898437, 1.0220114135742187, 1.0218792724609376, 1.0220257568359374, 1.0222643432617187, 1.0220001220703125, 1.0221516723632813, 1.0221260986328125, 1.0216611938476563, 1.0218567504882812, 1.0222837524414063, 1.022792724609375, 1.0223380737304688, 1.0225889282226563, 1.0225274658203125, 1.0219089965820312, 1.0221567993164062, 1.0223544311523438, 1.0222837524414063, 1.0220943603515624, 1.022055419921875, 1.0220431518554687, 1.0217420654296876, 1.022118896484375, 1.0222120971679687, 1.02186083984375, 1.0227660522460937, 1.0224302368164062, 1.0219202270507812, 1.0222950439453125, 1.022434326171875, 1.0217748413085936, 1.0216837158203125, 1.021929443359375, 1.0220933227539062, 1.0220226440429687, 1.0222172241210938, 1.021971435546875, 1.0217420654296876, 1.0220123901367189, 1.0219243774414062, 1.0223441772460937, 1.022044189453125, 1.022455810546875, 1.0223093872070312, 1.0240828857421875, 1.0233630981445312, 1.0228223876953124, 1.0220809936523438, 1.022814208984375, 1.0219632568359376, 1.0219100341796874, 1.021834228515625, 1.0227333374023437, 1.0218475341796875, 1.0219889526367187, 2.119702392578125, 1.0218157958984375, 1.0222847900390626, 1.0225991821289062, 1.0219386596679687, 1.0222909545898438, 1.0225694580078124, 1.02200732421875, 1.0223073120117188, 1.0221957397460937, 1.0222427978515625, 1.0221383666992188, 1.0221312255859376, 1.022075927734375, 1.0216263427734376, 1.0212208862304688, 1.02229296875, 1.0224076538085938, 1.023636474609375, 1.0234900512695313, 1.023056884765625, 1.0234654541015624, 1.0236641235351562, 1.02160791015625, 1.0215352172851562, 1.0216673583984375, 1.021822998046875, 1.0217000732421875, 1.022213134765625, 1.02185986328125, 1.0224988403320312, 1.022687255859375, 1.0223042602539063, 1.0217113647460938, 1.0225919799804688, 1.022482421875, 1.022055419921875, 1.0219806518554688, 1.023088623046875, 1.0223472900390624, 1.021929443359375, 1.0223493041992187, 1.0222120971679687, 1.0214901733398438, 1.0219417724609374, 1.0221721801757813, 1.0221773071289062, 1.0221752319335937, 1.0220165405273438, 1.0218884887695312, 1.0218833618164063, 1.0219745483398437, 1.0215157470703125, 1.0211686401367188, 1.0218997802734375, 1.0223994750976562, 1.0220114135742187, 1.022298095703125, 1.02169189453125, 1.0218690795898437, 1.021822998046875, 1.0221168823242188, 1.0219120483398438, 2.118912109375, 1.0223964233398437, 1.0216151123046875, 1.022171142578125, 1.0217677001953125, 1.0219642944335938, 1.0219970703125, 1.0222827758789061, 1.0216611938476563, 1.021549560546875, 1.0215291137695313, 1.0223114013671875, 1.022255126953125, 1.0225100708007813, 1.0217799682617188, 1.02200830078125, 1.0224179077148436, 1.0221260986328125, 1.0218782958984376, 1.0218884887695312, 1.0225499877929687, 1.022076904296875, 1.0219366455078125, 1.0222387084960938, 1.0217011108398437, 1.0217328491210937, 1.02257666015625, 1.0215403442382813, 1.0214144287109375, 1.0214072265625, 1.0221701049804688, 1.0217277221679688, 1.0222387084960938, 1.0218997802734375, 1.0216539916992187, 1.0216837158203125, 1.0219857788085938, 1.0216980590820313, 1.0217205810546874, 1.0214840087890624, 1.02214453125, 1.0218076171875, 1.0222633056640624, 1.0221834106445313, 1.0223790283203125, 1.0221107177734374, 1.022223388671875, 1.0214297485351562, 1.0217717895507812, 1.0216028442382812, 1.0226585693359376, 1.0215946044921875, 1.0224056396484376, 1.0217778930664063, 1.0220472412109376, 1.0222387084960938, 1.0221731567382812, 1.0224568481445313, 1.0223892211914063, 1.021928466796875, 1.0224486694335937, 1.0220421142578124, 1.022455810546875, 2.117295166015625, 1.0219315185546876, 1.0214788818359375, 1.0215782470703125, 1.021528076171875, 1.0218690795898437, 1.0219069213867187, 1.0218506469726563, 1.0224097290039063, 1.0219745483398437, 1.0218424072265626, 1.021686767578125, 1.0215823364257812, 1.0215782470703125, 1.021676513671875, 1.0219089965820312, 1.02179736328125, 1.0221465454101561, 1.0219561157226562, 1.021676513671875, 1.0220635986328126, 1.0227415161132813, 1.0216591186523438, 1.0214799194335937, 1.021432861328125, 1.0216959838867188, 1.021422607421875, 1.0225029296875, 1.0220267333984374, 1.021981689453125, 1.022392333984375, 1.0219192504882812, 1.021675537109375, 1.0215321655273437, 1.0215844116210937, 1.0223329467773437, 1.0216427612304688, 1.0223380737304688, 1.0223411254882813, 1.0224465942382812, 1.0220513305664063, 1.021770751953125, 1.0217267456054688, 1.0211819458007811, 1.0219478759765626, 1.0219786376953126, 1.0217195434570312, 1.02196533203125, 1.0216980590820313, 1.0216949462890625, 1.0220328979492188, 1.02213427734375, 1.0221680908203126, 1.02171337890625, 1.0219458618164063, 1.021823974609375, 1.022129150390625, 1.0220892333984375, 1.0217769165039063, 1.022181396484375, 1.0221486206054688, 1.0219581298828124, 1.0223165283203124, 2.1193359375, 1.021507568359375, 1.0222807006835937, 1.0218096923828126, 1.0217789306640626, 1.0224578857421875, 1.0224793701171875, 1.0222202758789063, 1.0224015502929686, 1.0220513305664063, 1.0227291870117188, 1.0218936157226564, 1.0227210083007812, 1.0215659790039062, 1.021507568359375, 1.0218588256835937, 1.0215946044921875, 1.0212608032226562, 1.0221383666992188, 1.021644775390625, 1.0220400390625, 1.0219151611328126, 1.0219458618164063, 1.0218168334960938, 1.0220155029296876, 1.0220830688476563, 1.0217257080078126, 1.0215249633789063, 1.0215782470703125, 1.0214522705078124, 1.021971435546875, 1.0216908569335938, 1.0222796630859374, 1.0217891845703124, 1.0220093383789062, 1.0221598510742187, 1.0217533569335937, 1.021591552734375, 1.021834228515625, 1.0217297973632813, 1.0220697631835938, 1.0221373291015625, 1.021681640625, 1.021823974609375, 1.022286865234375, 1.0229125366210938, 1.0224752807617188, 1.0218772583007814, 1.0224363403320313, 1.02299853515625, 1.021749267578125, 1.022983154296875, 1.0218731689453124, 1.0215403442382813, 1.022688232421875, 1.0222745361328125, 1.0221066284179687, 1.021812744140625, 1.0228449096679688, 1.0219735107421875, 1.0219089965820312, 1.0222427978515625, 1.0222807006835937, 2.121678955078125, 1.0220349731445313, 1.0220318603515626, 1.022308349609375, 1.0220912475585937, 1.0223062744140625, 1.0220667114257813, 1.0221168823242188, 1.0219243774414062, 1.0236375122070311, 1.0226729125976564, 1.022350341796875, 1.0221209716796875, 1.0218065795898437, 1.0220042114257812, 1.02285107421875, 1.0233190307617188, 1.0220626220703124, 1.0221178588867188, 1.0219304809570313, 1.0225458984375, 1.0225377197265626, 1.0221055908203125, 1.0222172241210938, 1.0222172241210938, 1.0232381591796875, 1.0236713256835936, 1.02211376953125, 1.0223145141601562, 1.0224046020507813, 1.0224127807617187, 1.0221404418945312, 1.0220615844726562, 1.0223544311523438, 1.0224671020507812, 1.0220564575195312, 1.02242919921875, 1.0219437866210936, 1.0222633056640624, 1.0225182495117187, 1.0220728149414062, 1.0216908569335938, 1.0225111083984375, 1.0221690673828125, 1.0233764038085937, 1.0224005126953124, 1.0228797607421876, 1.022582763671875, 1.0221362915039063, 1.0220574951171875, 1.0226176147460937, 1.023177734375, 1.022688232421875, 1.0220369873046875, 1.0229730224609375, 1.0224833984375, 1.0232719116210938, 1.0233231201171875, 1.0232688598632813, 1.0227711791992187, 1.02242919921875, 1.0218782958984376, 1.0227630004882813, 2.123450439453125, 1.0225910034179688, 1.022224365234375, 1.0221588745117187, 1.0227947387695313, 1.0223759155273437, 1.0220062866210937, 1.021928466796875, 1.0225244140625, 1.0222254028320312, 1.0220328979492188, 1.0240491943359376, 1.0243778076171874, 1.0222274780273437, 1.0225735473632813, 1.02270361328125, 1.0226463012695313, 1.0230241088867187, 1.023151123046875, 1.0230661010742188, 1.022940185546875, 1.0235166625976562, 1.0226575317382813, 1.0224097290039063, 1.02308349609375, 1.0247874755859374, 1.0235719604492188, 1.0233917236328125, 1.023362060546875, 1.0236497802734374, 1.0237828979492187, 1.0238555908203124, 1.024322509765625, 1.0232586059570312, 1.0231644287109376, 1.023604736328125, 1.0232422485351562, 1.0228469848632813, 1.0233712768554688, 1.0233978881835937, 1.0237593383789063, 1.02187109375, 1.0220492553710938, 1.0218803100585938, 1.023657958984375, 1.0220001220703125, 1.0220830688476563, 1.0218035278320312, 1.02250390625, 1.02404296875, 1.0239928588867186, 1.0232227783203125, 1.0231644287109376, 1.0232197265625, 1.0240625, 1.0235238647460938, 1.022983154296875, 1.0219345703125, 1.0223042602539063, 1.022192626953125, 1.0235565795898438, 1.0230742797851562, 1.0222684326171876, 2.122977294921875, 1.021991943359375, 1.0222418212890625, 1.022287841796875, 1.0231869506835938, 1.0230241088867187, 1.022814208984375, 1.022866455078125, 1.0224425048828125, 1.0229033203125, 1.0226390991210939, 1.0222633056640624, 1.0229155883789063, 1.0221915893554687, 1.0231552124023438, 1.0233661499023436, 1.0227711791992187, 1.0222151489257814, 1.0229217529296875, 1.0231818237304688, 1.0223114013671875, 1.0220543823242187, 1.02232373046875, 1.0222356567382813, 1.0230077514648437, 1.0234613647460937, 1.0228264770507813, 1.0229319458007813, 1.024480224609375, 1.0225131225585937, 1.0220369873046875, 1.0220902099609375, 1.0221383666992188, 1.0218895263671874, 1.0225633544921875, 1.0228480224609375, 1.0221834106445313, 1.0225643310546875, 1.0235330810546874, 1.02322998046875, 1.0229186401367187, 1.0226974487304688, 1.0228060302734374, 1.022993408203125, 1.023494140625, 1.0230015869140625, 1.0222899169921875, 1.0222520141601563, 1.0227855224609375, 1.0226903076171876, 1.022087158203125, 1.0217963256835938, 1.0218424072265626, 1.0221260986328125, 1.0221178588867188, 1.0227445678710938, 1.023115234375, 1.0224527587890626, 1.0226483154296875, 1.022645263671875, 1.022792724609375, 1.0225899658203126, 1.0227425537109376]",tokens/s,0.9633327649660273,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,5945.012224,19933.954048,0.0,19287.506944,18376.2688,s,10,24.42857763671875,2.442857763671875,0.001982296286527604,2.4421226806640624,2.4459630615234373,2.446148400878906,2.4462966723632813,"[2.44311767578125, 2.441929443359375, 2.441054443359375, 2.44091455078125, 2.441591064453125, 2.440709716796875, 2.44231591796875, 2.446333740234375, 2.445921875, 2.444689208984375]",tokens/s,104.79529500530754,kWh,2.881985050108698e-05,1.5794228899012522e-05,0.00013673797050139935,0.00018135204990149886,tokens/kWh,1411619.0036950014,MB,5946.937344,19933.954048,0.0,19287.506944,18871.985152,s,10,1453.1662812499999,145.316628125,0.01677016451823821,145.3091796875,145.3392140625,145.34826328125,145.35550265625,"[145.31590625, 145.308921875, 145.3094375, 145.306953125, 145.3573125, 145.337203125, 145.304828125, 145.303359375, 145.319953125, 145.30240625]",tokens/s,0.4335360709430169,kWh,0.0017155244359870752,0.0009402592492990879,0.00808164824309121,0.010737431928377374,tokens/kWh,5867.324740238933,,s,629,1472.8442895507828,2.3415648482524345,0.29007180854476533,2.306472900390625,2.307840771484375,2.308159619140625,4.7478279296874994,"[2.30830078125, 2.30712939453125, 2.305491943359375, 2.305958984375, 2.306070556640625, 2.306164794921875, 2.306714599609375, 2.306167724609375, 2.30651708984375, 2.306343994140625, 2.306281494140625, 2.3058994140625, 2.306171875, 2.305965087890625, 2.3060673828125, 2.305934326171875, 2.306259033203125, 2.30677197265625, 2.30823828125, 2.3065693359375, 2.3062998046875, 2.3065107421875, 2.305712158203125, 2.3060634765625, 2.30664404296875, 2.30611865234375, 2.305919921875, 2.306153564453125, 2.30573876953125, 2.307145751953125, 2.307577880859375, 2.30742529296875, 2.3060234375, 2.306620361328125, 2.30689892578125, 2.306671630859375, 2.30721337890625, 2.306621337890625, 2.30584423828125, 2.306515869140625, 2.306025390625, 2.307158935546875, 2.307966064453125, 2.3080498046875, 2.30725927734375, 2.30751123046875, 2.30584326171875, 2.306826171875, 2.30643505859375, 2.306669677734375, 2.3066572265625, 2.306987060546875, 2.3064228515625, 2.30631640625, 2.3060048828125, 2.3066552734375, 2.306758544921875, 2.306385986328125, 2.306724853515625, 2.30693994140625, 2.3067626953125, 2.306996337890625, 4.747927734375, 2.3060458984375, 2.306315185546875, 2.305916015625, 2.30605615234375, 2.30666455078125, 2.306251708984375, 2.306472900390625, 2.306552734375, 2.30586376953125, 2.3063408203125, 2.306269287109375, 2.306483154296875, 2.3060849609375, 2.306608154296875, 2.306598876953125, 2.306947021484375, 2.306627685546875, 2.30656298828125, 2.306156494140625, 2.305994873046875, 2.305953857421875, 2.3070966796875, 2.307031005859375, 2.306716552734375, 2.30727587890625, 2.305818603515625, 2.30561181640625, 2.306303955078125, 2.30622607421875, 2.30601513671875, 2.306629638671875, 2.306018310546875, 2.307220458984375, 2.307166259765625, 2.30818310546875, 2.30658251953125, 2.306062255859375, 2.3065908203125, 2.3057265625, 2.306356201171875, 2.30654150390625, 2.306720703125, 2.306249755859375, 2.307949462890625, 2.307349609375, 2.30730859375, 2.306469970703125, 2.306974609375, 2.30639501953125, 2.30609716796875, 2.30603369140625, 2.306250732421875, 2.306716552734375, 2.306428955078125, 2.3060244140625, 2.306080810546875, 2.306798583984375, 2.306280517578125, 2.306587646484375, 2.306314208984375, 2.3067607421875, 2.307210205078125, 4.74750048828125, 2.30618017578125, 2.306073486328125, 2.306304931640625, 2.3058740234375, 2.306080810546875, 2.3063828125, 2.30624560546875, 2.305426513671875, 2.306010009765625, 2.3058330078125, 2.306188232421875, 2.305746826171875, 2.30639501953125, 2.305995849609375, 2.305857421875, 2.305976318359375, 2.306387939453125, 2.307175537109375, 2.3076884765625, 2.30548486328125, 2.305490966796875, 2.3058544921875, 2.30586474609375, 2.306364501953125, 2.30583203125, 2.306301025390625, 2.306060302734375, 2.306914306640625, 2.308274169921875, 2.307685302734375, 2.3084912109375, 2.306903076171875, 2.30611669921875, 2.306157470703125, 2.30603466796875, 2.306026611328125, 2.30809912109375, 2.30824658203125, 2.30763623046875, 2.30674853515625, 2.306740234375, 2.30609521484375, 2.305721435546875, 2.30651806640625, 2.307560546875, 2.30752880859375, 2.306269287109375, 2.30721630859375, 2.306631591796875, 2.30655908203125, 2.3057724609375, 2.30613818359375, 2.306503662109375, 2.306617431640625, 2.307072021484375, 2.30647802734375, 2.305734619140625, 2.3066552734375, 2.306449462890625, 2.30620361328125, 2.30637158203125, 2.306595947265625, 4.74802880859375, 2.306186279296875, 2.305531982421875, 2.305669189453125, 2.306135986328125, 2.3058759765625, 2.306019287109375, 2.306431884765625, 2.306522216796875, 2.30567529296875, 2.3055830078125, 2.306220947265625, 2.30667578125, 2.306291748046875, 2.306946044921875, 2.30719287109375, 2.3062763671875, 2.30605517578125, 2.3061484375, 2.306335693359375, 2.30618115234375, 2.30607666015625, 2.30662646484375, 2.30637158203125, 2.30767724609375, 2.308170654296875, 2.30727978515625, 2.306090087890625, 2.3074775390625, 2.30679248046875, 2.308010009765625, 2.306175048828125, 2.3065908203125, 2.306902099609375, 2.307230712890625, 2.30744677734375, 2.307564453125, 2.306438232421875, 2.306532470703125, 2.306220947265625, 2.306926513671875, 2.30791162109375, 2.30597021484375, 2.30626416015625, 2.306064453125, 2.305848388671875, 2.30631005859375, 2.305681396484375, 2.306680908203125, 2.3066787109375, 2.306912353515625, 2.30727587890625, 2.30600390625, 2.306438232421875, 2.306324462890625, 2.305838134765625, 2.305875, 2.305974365234375, 2.30620166015625, 2.306227294921875, 2.3058759765625, 2.306018310546875, 2.306186279296875, 4.7475712890625, 2.3056015625, 2.305901611328125, 2.30658251953125, 2.307598388671875, 2.307072998046875, 2.30600390625, 2.308192138671875, 2.307829833984375, 2.30786865234375, 2.307883056640625, 2.306185302734375, 2.305741943359375, 2.30852392578125, 2.307919921875, 2.30782763671875, 2.30862841796875, 2.308886474609375, 2.308350830078125, 2.308662353515625, 2.308884521484375, 2.30721337890625, 2.307564453125, 2.307115966796875, 2.307040283203125, 2.30601025390625, 2.307072998046875, 2.305795166015625, 2.3058740234375, 2.306260009765625, 2.3076474609375, 2.3075, 2.305462158203125, 2.305919921875, 2.306641845703125, 2.308442138671875, 2.30645654296875, 2.306227294921875, 2.306621337890625, 2.306598876953125, 2.3067822265625, 2.306314208984375, 2.307274658203125, 2.306165771484375, 2.30636962890625, 2.306336669921875, 2.3068681640625, 2.307556396484375, 2.307939208984375, 2.306641845703125, 2.308420654296875, 2.30788720703125, 2.30803857421875, 2.307458984375, 2.308116455078125, 2.3085107421875, 2.30814306640625, 2.309021728515625, 2.30691845703125, 2.3075400390625, 2.30881787109375, 2.30786962890625, 2.308601806640625, 4.747927734375, 2.30775, 2.307747802734375, 2.306754638671875, 2.30704638671875, 2.30609521484375, 2.305871826171875, 2.305828857421875, 2.306450439453125, 2.3074384765625, 2.30639404296875, 2.306155517578125, 2.30721240234375, 2.3061943359375, 2.306260986328125, 2.3063408203125, 2.305946533203125, 2.30626708984375, 2.3059384765625, 2.306785400390625, 2.30590576171875, 2.307837890625, 2.308127685546875, 2.307727294921875, 2.30624755859375, 2.30803759765625, 2.308209716796875, 2.306083740234375, 2.308106201171875, 2.30796484375, 2.307409912109375, 2.307322998046875, 2.3070556640625, 2.30601416015625, 2.30601220703125, 2.30685400390625, 2.3067841796875, 2.3062333984375, 2.30788818359375, 2.3078798828125, 2.307981201171875, 2.308041748046875, 2.307943359375, 2.3078779296875, 2.306785400390625, 2.306217041015625, 2.30694189453125, 2.3059833984375, 2.30669921875, 2.30706787109375, 2.307828857421875, 2.3062119140625, 2.30769970703125, 2.30689990234375, 2.306872314453125, 2.30632763671875, 2.307541015625, 2.307304443359375, 2.306280517578125, 2.305987548828125, 2.3064677734375, 2.30799365234375, 2.30702490234375, 4.74871826171875, 2.306419677734375, 2.30594775390625, 2.305490966796875, 2.3053935546875, 2.306144287109375, 2.305890380859375, 2.30618310546875, 2.30565380859375, 2.305838134765625, 2.3064453125, 2.306163818359375, 2.305967041015625, 2.30632958984375, 2.30599365234375, 2.30660205078125, 2.306093017578125, 2.30550927734375, 2.3062958984375, 2.30859375, 2.307629150390625, 2.305786865234375, 2.30675244140625, 2.30698291015625, 2.307533935546875, 2.306155517578125, 2.30588916015625, 2.30687744140625, 2.30660205078125, 2.30624658203125, 2.30635205078125, 2.30655078125, 2.30631005859375, 2.307313720703125, 2.306417724609375, 2.306740234375, 2.306871337890625, 2.30619140625, 2.3060859375, 2.305734619140625, 2.3065068359375, 2.30648828125, 2.306944091796875, 2.30637060546875, 2.3063173828125, 2.305910888671875, 2.306567138671875, 2.305996826171875, 2.306691162109375, 2.306502685546875, 2.306821044921875, 2.306130859375, 2.30672900390625, 2.307013671875, 2.306767822265625, 2.306404296875, 2.30620458984375, 2.306543701171875, 2.30641552734375, 2.30656298828125, 2.306662353515625, 2.306578369140625, 2.30736083984375, 4.75277099609375, 2.306112548828125, 2.305987548828125, 2.306069580078125, 2.305553466796875, 2.305196044921875, 2.306450439453125, 2.306493408203125, 2.306361328125, 2.305670166015625, 2.305954833984375, 2.30636328125, 2.305942626953125, 2.305912841796875, 2.3060244140625, 2.306492431640625, 2.306491455078125, 2.3065087890625, 2.306021484375, 2.30662255859375, 2.306044921875, 2.305946533203125, 2.306109375, 2.3060673828125, 2.30651708984375, 2.306368408203125, 2.306166748046875, 2.305617919921875, 2.3070966796875, 2.30624462890625, 2.30612060546875, 2.30616259765625, 2.306585693359375, 2.3061943359375, 2.305982421875, 2.30618212890625, 2.30691015625, 2.307801025390625, 2.30774169921875, 2.305775634765625, 2.30598974609375, 2.307757080078125, 2.307713134765625, 2.3067412109375, 2.306273193359375, 2.306452392578125, 2.30667578125, 2.306123779296875, 2.307871826171875, 2.3065498046875, 2.306324462890625, 2.3063818359375, 2.306296875, 2.306356201171875, 2.306536376953125, 2.30616162109375, 2.306343994140625, 2.3068017578125, 2.307167236328125, 2.3066982421875, 2.3068271484375, 2.30626611328125, 2.306765869140625, 4.75146533203125, 2.305406005859375, 2.306103271484375, 2.306335693359375, 2.306105224609375, 2.305650634765625, 2.305935302734375, 2.3065302734375, 2.306123779296875, 2.306839599609375, 2.306525146484375, 2.3065908203125, 2.3057275390625, 2.3065927734375, 2.306326416015625, 2.305699951171875, 2.306766845703125, 2.306298828125, 2.306177978515625, 2.306017333984375, 2.3064453125, 2.307205078125, 2.3069912109375, 2.30652099609375, 2.307852294921875, 2.30791357421875, 2.307249267578125, 2.30723388671875, 2.308533203125, 2.307859375, 2.307365966796875, 2.30736474609375, 2.306230224609375, 2.306595947265625, 2.306797607421875, 2.306817138671875, 2.307945556640625, 2.306934814453125, 2.306532470703125, 2.306154541015625, 2.306238525390625, 2.306093994140625, 2.306994140625, 2.30653125, 2.30648828125, 2.30630810546875, 2.306410400390625, 2.30582177734375, 2.30702392578125, 2.306610107421875, 2.30685888671875, 2.30723388671875, 2.307060791015625, 2.30736083984375, 2.307283935546875, 2.306974609375, 2.307143798828125, 2.30702685546875, 2.307147705078125, 2.306912353515625, 2.306466796875, 2.30637060546875, 2.305808349609375, 4.75029296875, 2.3062138671875, 2.305847412109375, 2.305751953125, 2.306163818359375, 2.305503173828125, 2.306188232421875, 2.305998779296875, 2.306451416015625, 2.305594482421875, 2.30626513671875, 2.30557080078125, 2.306021484375, 2.305986572265625, 2.30660498046875, 2.306206787109375, 2.305995849609375, 2.306838623046875, 2.306428955078125, 2.306533447265625, 2.306038818359375, 2.306441162109375, 2.306575439453125, 2.30774267578125, 2.30721435546875, 2.307527587890625, 2.30657421875, 2.3058974609375, 2.30637255859375, 2.305725341796875, 2.30624560546875, 2.306049072265625, 2.3060224609375, 2.306079833984375, 2.306041748046875, 2.305879150390625, 2.30664404296875, 2.306240478515625, 2.30664697265625, 2.306007080078125, 2.306346923828125, 2.3059384765625, 2.305849365234375, 2.306935791015625, 2.306491455078125, 2.306400146484375, 2.3062744140625, 2.306458740234375, 2.306747314453125, 2.307177490234375, 2.306862060546875, 2.306808837890625, 2.306905029296875, 2.30681298828125, 2.306693115234375, 2.30618017578125, 2.30659375, 2.306906005859375, 2.30719482421875, 2.306628662109375, 2.306923583984375, 2.306654296875, 2.3069306640625]",tokens/s,0.4270648326252095,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1554.096128,1957.167104,0.0,1310.72,1163.82464,s,10,1.286997444152832,0.1286997444152832,0.0010709154419692667,0.12870168304443358,0.13024390106201172,0.13026507034301757,0.13028200576782226,"[0.13028623962402344, 0.13023919677734375, 0.1282391662597656, 0.12805363464355468, 0.1269244155883789, 0.12729046630859375, 0.12865869140625, 0.1287446746826172, 0.1295413055419922, 0.1290196533203125]",tokens/s,1989.1259393177147,kWh,1.4998026454163673e-06,8.218217431655106e-07,6.2844740852399835e-06,8.60609847382186e-06,tokens/kWh,29746347.985524923,MB,1554.096128,1959.264256,0.0,1312.817152,1232.774656,s,10,75.16664355468751,7.516664355468751,0.019324999879587898,7.517852294921875,7.5454459960937506,7.548604345703125,7.551131025390625,"[7.5517626953125, 7.5180322265625, 7.52255224609375, 7.52390966796875, 7.544744140625, 7.51767236328125, 7.49461669921875, 7.49106689453125, 7.50185205078125, 7.5004345703125]",tokens/s,8.3813772999142,kWh,8.89009176429398e-05,4.872418928564946e-05,0.00036540068437377195,0.0005030257913023612,tokens/kWh,125242.08716393956,,s,629,76.20247959136968,0.12114861620249542,0.01533479707935131,0.1192816619873047,0.12045271148681641,0.12072119903564453,0.24720068908691412,"[0.12236902618408203, 0.12192870330810547, 0.12110336303710938, 0.12021862030029297, 0.12056371307373047, 0.11989708709716797, 0.11929497528076172, 0.11829862213134766, 0.11859661102294922, 0.11861504364013672, 0.11821469116210938, 0.11968099212646484, 0.11959193420410157, 0.12045926666259765, 0.12021043395996094, 0.1199452133178711, 0.11970047760009765, 0.12003225708007813, 0.120163330078125, 0.11992678070068359, 0.11977011108398437, 0.11847372436523437, 0.11856588745117187, 0.11883724975585938, 0.11842662048339844, 0.11851676940917968, 0.11948745727539062, 0.11911475372314453, 0.119552001953125, 0.1215068130493164, 0.120257568359375, 0.1204623031616211, 0.12012236785888672, 0.12004659271240234, 0.12008038330078125, 0.12001689910888672, 0.11995954895019531, 0.12004761505126953, 0.12005683135986328, 0.12010189056396485, 0.12003635406494141, 0.11968307495117188, 0.12004863739013671, 0.12036300659179687, 0.11988992309570312, 0.1200558090209961, 0.1202534408569336, 0.12022169494628906, 0.12032921600341796, 0.12026265716552734, 0.12077056121826171, 0.12018585968017578, 0.11901235198974609, 0.1193861083984375, 0.11910348510742187, 0.11925606536865234, 0.11975270080566407, 0.12020941162109375, 0.11999846649169922, 0.12033843231201172, 0.11914854431152344, 0.12003942108154297, 0.2506158142089844, 0.12027391815185547, 0.11959091186523438, 0.11991551971435546, 0.1192816619873047, 0.11801292419433594, 0.11855052947998047, 0.11803955078125, 0.11798016357421875, 0.11803241729736329, 0.11873174285888671, 0.12019200134277344, 0.11996057891845703, 0.11952947235107422, 0.11857612609863281, 0.11973939514160156, 0.11993702697753907, 0.11889049530029297, 0.11839385223388672, 0.11943730926513672, 0.11985203552246093, 0.11852082824707032, 0.11822898864746094, 0.11862739562988281, 0.11842758178710938, 0.11852185821533204, 0.1185054702758789, 0.11835596466064453, 0.11835699462890625, 0.11841331481933594, 0.11834674835205078, 0.11860070037841797, 0.12062003326416015, 0.11992473602294922, 0.12043059539794922, 0.12157746887207031, 0.12022579193115235, 0.11964927673339844, 0.12050534057617188, 0.12049407958984375, 0.12015615844726563, 0.11910553741455078, 0.12046438598632812, 0.1204510726928711, 0.12023603057861328, 0.11975885009765624, 0.12176998138427735, 0.12023705291748046, 0.12054937744140624, 0.11995750427246094, 0.12132864379882813, 0.11925917053222657, 0.1186283187866211, 0.11835699462890625, 0.11851776123046875, 0.11839794921875, 0.11879730987548828, 0.11951107025146485, 0.11866006469726563, 0.11900109100341796, 0.11836723327636718, 0.11853414154052734, 0.11843583679199218, 0.24882893371582032, 0.11982233428955077, 0.11939225769042969, 0.12049919891357422, 0.12002508544921875, 0.1198919677734375, 0.12023910522460937, 0.11994217681884765, 0.11870614624023437, 0.11836006164550782, 0.11821260833740234, 0.11851570892333985, 0.11971686553955078, 0.12000460815429688, 0.11984076690673828, 0.11931136322021485, 0.11905126190185547, 0.11986329650878906, 0.11975373077392579, 0.11980595397949219, 0.1196267547607422, 0.11956735992431641, 0.12024422454833984, 0.12106342315673828, 0.11977728271484375, 0.1197127685546875, 0.11865599822998046, 0.11831404876708984, 0.11899692535400391, 0.11852082824707032, 0.11829145812988281, 0.11852595520019531, 0.11846348571777343, 0.11836518096923829, 0.11844812774658203, 0.11833757019042969, 0.11830780792236328, 0.11964620971679688, 0.11897650909423828, 0.11840921783447265, 0.11968511962890625, 0.11841024017333984, 0.11969535827636718, 0.11970559692382812, 0.12029644775390624, 0.12001078033447266, 0.11961955261230468, 0.12051967620849609, 0.11861196899414063, 0.11842150115966797, 0.11837133026123046, 0.12028313446044922, 0.11990322875976563, 0.1188751983642578, 0.12033939361572266, 0.11918643188476563, 0.1187583999633789, 0.12003942108154297, 0.11934515380859376, 0.12007730865478515, 0.12025138854980469, 0.12019916534423829, 0.12015615844726563, 0.24848793029785157, 0.11997798156738282, 0.11991763305664062, 0.12037625885009766, 0.12008345794677734, 0.11944652557373046, 0.12011827087402344, 0.12008448028564453, 0.11987149047851563, 0.12019404602050782, 0.12016851043701172, 0.11977823638916016, 0.11987865447998047, 0.11973426818847656, 0.12008038330078125, 0.11942092895507812, 0.11997491455078126, 0.11880242919921875, 0.11979673767089843, 0.1197127685546875, 0.11939532470703125, 0.11976703643798828, 0.11997494506835937, 0.12019094085693359, 0.12064665222167968, 0.12001996612548828, 0.12060364532470703, 0.12040294647216797, 0.11997388458251954, 0.11948646545410156, 0.11998822021484375, 0.11993907165527344, 0.11955097961425781, 0.11953561401367188, 0.11927756500244141, 0.11897036743164062, 0.11999334716796875, 0.11905126190185547, 0.11846553802490234, 0.11841843414306641, 0.11854847717285157, 0.11835596466064453, 0.11830995178222656, 0.11828428649902344, 0.1203987808227539, 0.11818905639648437, 0.1193707504272461, 0.11814604949951171, 0.11847782135009766, 0.1183078384399414, 0.11799350738525391, 0.11804771423339844, 0.11817372894287109, 0.11848700714111328, 0.11852082824707032, 0.11849932861328125, 0.12020735931396484, 0.120089599609375, 0.12004761505126953, 0.12012134552001953, 0.11869286346435547, 0.11855974578857421, 0.11863142395019531, 0.24572006225585938, 0.1183477783203125, 0.11846041870117187, 0.11811737823486328, 0.1181470718383789, 0.11830067443847657, 0.11928985595703125, 0.11911065673828125, 0.1185771484375, 0.11835187530517578, 0.12018994903564453, 0.12008550262451172, 0.11990534210205078, 0.1200219497680664, 0.12004659271240234, 0.11994931030273437, 0.12035686492919923, 0.12041522979736329, 0.12053298950195312, 0.12031999969482422, 0.12058419036865234, 0.12013158416748047, 0.12075929260253906, 0.11833036804199219, 0.1206794204711914, 0.12085862731933594, 0.12161433410644532, 0.11919667053222656, 0.1187041244506836, 0.11844300842285156, 0.11833650970458984, 0.11833856201171875, 0.1186355209350586, 0.11820236968994141, 0.12034559631347656, 0.12019712066650391, 0.12016947174072265, 0.11850035095214843, 0.12005375671386719, 0.12051763153076171, 0.11887615966796874, 0.11807539367675782, 0.11823411560058594, 0.11905741119384766, 0.12032307434082032, 0.1189775390625, 0.12029132843017579, 0.12084019470214843, 0.12052992248535156, 0.1207357406616211, 0.12054220581054688, 0.12078284454345703, 0.12055244445800781, 0.12059852600097656, 0.12064358520507812, 0.12082486724853515, 0.1207326431274414, 0.12082278442382813, 0.12053196716308594, 0.12073065948486328, 0.12090262603759766, 0.12064870452880859, 0.12058112335205078, 0.24601292419433593, 0.11817881774902343, 0.11808255767822265, 0.11800780487060547, 0.118255615234375, 0.11830067443847657, 0.11829145812988281, 0.11941478729248046, 0.12005375671386719, 0.1198202896118164, 0.12005785369873047, 0.11806412506103516, 0.11867750549316407, 0.11829145812988281, 0.11833548736572265, 0.11830989074707031, 0.11806822204589844, 0.11833241271972657, 0.11837747192382812, 0.11845120239257813, 0.11834572601318359, 0.11841228485107422, 0.11827609252929687, 0.11805286407470703, 0.11896729278564454, 0.11836313629150391, 0.11981619262695313, 0.12392044830322266, 0.1205656967163086, 0.12015821075439453, 0.11990016174316406, 0.12003533172607422, 0.11934003448486329, 0.1199974365234375, 0.1193154525756836, 0.11959500885009766, 0.11863040161132812, 0.1188106231689453, 0.12067327880859376, 0.11914444732666016, 0.11830477142333984, 0.11853721618652344, 0.1185269775390625, 0.11941990661621094, 0.11858329772949219, 0.1186693115234375, 0.12036608123779297, 0.12004557037353515, 0.1200558090209961, 0.12050841522216797, 0.12022681427001954, 0.12039065551757812, 0.12011212921142578, 0.12010495758056641, 0.12036608123779297, 0.1201817626953125, 0.12043673706054688, 0.12033843231201172, 0.12007526397705078, 0.12000972747802735, 0.12006505584716796, 0.11996975708007812, 0.11998822021484375, 0.2494791717529297, 0.12030156707763671, 0.11901952362060547, 0.11824127960205078, 0.1192816619873047, 0.11887308502197266, 0.11826175689697266, 0.11869388580322265, 0.11826687622070313, 0.11836211395263672, 0.11810918426513672, 0.11887104034423829, 0.11972198486328126, 0.11960012817382812, 0.1192468490600586, 0.118002685546875, 0.11875942230224609, 0.11941478729248046, 0.11890585327148437, 0.11843382263183594, 0.1191526107788086, 0.11959500885009766, 0.11970668792724609, 0.11957036590576171, 0.11933695983886719, 0.11909327697753906, 0.11842966461181641, 0.11905228424072266, 0.11911888122558593, 0.11894780731201172, 0.11848089599609375, 0.1181890869140625, 0.11840406036376953, 0.1190297622680664, 0.11982848358154297, 0.11972096252441407, 0.11864371490478516, 0.11924582672119141, 0.11929804992675781, 0.11825459289550781, 0.11951718139648437, 0.11916287994384765, 0.11947417449951171, 0.11800064086914062, 0.11845017242431641, 0.11818089294433594, 0.11930210876464843, 0.1199810562133789, 0.11853517150878906, 0.11870310211181641, 0.11945164489746093, 0.11844096374511719, 0.1184194564819336, 0.118508544921875, 0.11926220703125, 0.12017356872558593, 0.11892838287353516, 0.1193021469116211, 0.11871849822998047, 0.11844911956787109, 0.11832115173339844, 0.11867545318603516, 0.11844403076171875, 0.24766259765625, 0.11916802978515625, 0.12123133087158203, 0.12028313446044922, 0.11920588684082031, 0.11884953308105468, 0.11862528228759765, 0.11994931030273437, 0.11886182403564453, 0.11885363006591797, 0.12013670349121094, 0.11913011169433593, 0.11992473602294922, 0.11888435363769531, 0.11946598052978516, 0.11882707214355469, 0.1207070083618164, 0.11865702056884765, 0.11839590454101563, 0.11839078521728516, 0.11947724914550781, 0.11919769287109375, 0.11886080169677735, 0.11848601531982422, 0.11876966094970703, 0.11810099029541016, 0.11880960083007812, 0.11817369842529298, 0.11927142333984375, 0.11965235137939453, 0.118761474609375, 0.11863654327392578, 0.11901439666748047, 0.11827814483642578, 0.11898368072509766, 0.11854233551025391, 0.11844915008544922, 0.11848397064208985, 0.11839078521728516, 0.11844608306884766, 0.11859455871582031, 0.11865190124511718, 0.11820441436767579, 0.11907686614990234, 0.11970867156982422, 0.11921715545654298, 0.11824230194091796, 0.11921817779541016, 0.11916390228271484, 0.11929804992675781, 0.11828326416015625, 0.11838361358642578, 0.11848499298095704, 0.11860991668701172, 0.11808153533935548, 0.11826588439941406, 0.11823715209960937, 0.11888639831542969, 0.11884031677246094, 0.11812454223632812, 0.11833446502685546, 0.11847577667236328, 0.11946905517578126, 0.24866712951660155, 0.11849318695068359, 0.11980188751220704, 0.11915773010253906, 0.11846041870117187, 0.1203589096069336, 0.1205381088256836, 0.11978342437744141, 0.11976601409912109, 0.11947417449951171, 0.11962163543701172, 0.11930316925048828, 0.11854541015625, 0.11897344207763672, 0.11885158538818359, 0.11875020599365234, 0.1181839370727539, 0.11931443023681641, 0.11823616027832032, 0.11900313568115234, 0.11816345977783203, 0.11854847717285157, 0.11989708709716797, 0.11902668762207032, 0.11941683197021484, 0.11941580963134765, 0.11852185821533204, 0.11982540893554687, 0.11974553680419922, 0.11874918365478515, 0.11918950653076171, 0.11970355224609375, 0.11946189117431641, 0.11857100677490234, 0.11914035034179687, 0.11940966033935548, 0.1185689926147461, 0.11831394958496094, 0.11820441436767579, 0.11945574188232422, 0.11997388458251954, 0.1203240966796875, 0.11903180694580077, 0.11818495941162109, 0.11810201263427735, 0.11788288116455078, 0.11807539367675782, 0.11814604949951171, 0.11880038452148438, 0.11919872283935547, 0.11881983947753906, 0.11975987243652343, 0.11975373077392579, 0.11953568267822266, 0.12003424072265625, 0.11991961669921875, 0.12044185638427735, 0.11880550384521485, 0.11889356994628907, 0.11842253112792969, 0.11843379211425781, 0.11815936279296875, 0.11811634826660156, 0.24846543884277345, 0.11927139282226562, 0.11798732757568359, 0.11792998504638671, 0.11840512084960937, 0.11863859558105469, 0.11815321350097656, 0.11805184173583984, 0.11871437072753906, 0.11836109161376954, 0.1180794906616211, 0.11815424346923828, 0.1181317138671875, 0.11853004455566406, 0.11842150115966797, 0.11837542724609375, 0.11854847717285157, 0.11832217407226563, 0.11862220764160156, 0.11864985656738282, 0.11943526458740235, 0.11942912292480469, 0.11993395233154297, 0.12021145629882812, 0.12013568115234376, 0.12069990539550782, 0.12175667572021484, 0.12003942108154297, 0.11987763214111329, 0.11907788848876953, 0.1194076156616211, 0.11910553741455078, 0.11873177337646484, 0.11934413146972657, 0.11961958312988281, 0.11883519744873047, 0.11877273559570313, 0.11876761627197266, 0.12034662628173828, 0.12008140563964843, 0.11964415740966797, 0.12007014465332032, 0.12065280151367187, 0.11880754852294922, 0.11892736053466797, 0.11855974578857421, 0.11855052947998047, 0.11903590393066406, 0.11845938873291016, 0.11849523162841796, 0.1184716796875, 0.11835289764404297, 0.11853619384765625, 0.11834368133544922, 0.11937382507324219, 0.11912806701660156, 0.11848806762695313, 0.11961753845214844, 0.1190113296508789, 0.11965132904052735, 0.11860281372070312, 0.11934611511230468, 0.11991145324707031]",tokens/s,8.254324575433342,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa53e-5dddb95c5609a1d50bf7f90c;1898565b-d7f9-4e0b-af1a-22c8efdbbbeb) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1979.4944,5480.382464,0.0,4833.93536,4503.282688,s,10,5.706418334960937,0.5706418334960938,0.0011422166985642673,0.5705734558105469,0.5721191223144532,0.5723321746826172,0.5725026165771484,"[0.5707071533203125, 0.5725452270507813, 0.570284912109375, 0.5698196411132812, 0.5708470458984375, 0.569266357421875, 0.5704397583007812, 0.571673828125, 0.57207177734375, 0.5687626342773437]",tokens/s,448.6176529182773,kWh,6.72348948356546e-06,3.6841831159411714e-06,3.142670724010705e-05,4.183437983961368e-05,tokens/kWh,6119368.829691346,MB,1979.4944,5480.382464,0.0,4833.93536,4688.699392,s,10,334.87464062500004,33.487464062499996,0.006284503345517177,33.486009765625,33.4966609375,33.498351953125,33.499704765625005,"[33.50004296875, 33.49628515625, 33.487125, 33.48791796875, 33.48489453125, 33.48313671875, 33.490703125, 33.48450390625, 33.48125, 33.47878125]",tokens/s,1.8813010111013093,kWh,0.0003953183352579305,0.00021666870724086642,0.0018133419907958965,0.002425329033294693,tokens/kWh,25975.85693946752,,s,629,339.4783874511717,0.5397112678079045,0.0678655066405644,0.531504150390625,0.5319763916015625,0.5321570190429687,1.1021465185546875,"[0.531127197265625, 0.5313873901367188, 0.5311314086914063, 0.5313760986328125, 0.5314212036132813, 0.5316044921875, 0.5316137084960938, 0.5314949340820313, 0.5322680053710938, 0.531794921875, 0.5314816284179688, 0.5319925537109375, 0.531863525390625, 0.5316638793945313, 0.5320120239257813, 0.5315921630859375, 0.5318819580078125, 0.5321605224609375, 0.53214208984375, 0.531884033203125, 0.5315399780273438, 0.5322833862304688, 0.532052978515625, 0.5314406127929687, 0.5317867431640625, 0.5322066040039063, 0.531673095703125, 0.53195263671875, 0.5317314453125, 0.5317376098632812, 0.5320519409179687, 0.5320724487304688, 0.531541015625, 0.5316557006835938, 0.53180517578125, 0.5318154296875, 0.5315655517578125, 0.5316137084960938, 0.5321820068359375, 0.531557373046875, 0.5312399291992187, 0.5314426879882812, 0.5312901000976562, 0.53134130859375, 0.531072998046875, 0.5317980346679687, 0.5320304565429688, 0.5319249877929687, 0.5319178466796874, 0.5318133544921875, 0.531989501953125, 0.5314345092773437, 0.531373046875, 0.531641357421875, 0.5318492431640625, 0.5319464721679688, 0.5321226196289063, 0.5319137573242188, 0.5322987670898438, 0.531788818359375, 0.53163623046875, 0.5317109985351562, 1.1043450927734375, 0.531673095703125, 0.5321574096679688, 0.5315389404296875, 0.5321922607421875, 0.531525634765625, 0.5317652587890624, 0.531631103515625, 0.5321697387695312, 0.5315164184570312, 0.5316024169921875, 0.5313341674804688, 0.5321021728515625, 0.5321441040039062, 0.5318215942382812, 0.531609619140625, 0.5315983276367188, 0.5313085327148438, 0.5319906005859375, 0.5317283325195312, 0.5320519409179687, 0.5316751098632813, 0.5315983276367188, 0.5317775268554688, 0.5322587890625, 0.5316044921875, 0.5319588012695312, 0.5312645263671875, 0.5314641723632813, 0.5313402709960937, 0.531599365234375, 0.5311528930664062, 0.5314417114257812, 0.5315245361328125, 0.5315297241210938, 0.531078125, 0.53165771484375, 0.5314437255859376, 0.5313720092773437, 0.53186767578125, 0.531873779296875, 0.5317017822265625, 0.53191064453125, 0.5315491943359375, 0.531904541015625, 0.53142529296875, 0.532937744140625, 0.5315389404296875, 0.5317601318359375, 0.5315614624023437, 0.5316034545898437, 0.5312798461914062, 0.5315686645507812, 0.53125634765625, 0.5316055297851563, 0.5312440185546875, 0.5317672729492188, 0.5314345092773437, 0.5318533325195313, 0.5322587890625, 0.5322587890625, 0.5315594482421875, 0.5315952758789062, 1.10245166015625, 0.5316915283203125, 0.5318369140625, 0.531167236328125, 0.5314037475585938, 0.5311907958984375, 0.531399658203125, 0.5313310546875, 0.5314713745117188, 0.5310955810546875, 0.5314068603515625, 0.5314180908203125, 0.5313095703125, 0.5320089721679687, 0.532147216796875, 0.5318717041015625, 0.5314703369140625, 0.5313587036132813, 0.5315277099609375, 0.5315932006835937, 0.5320427856445312, 0.5314682006835938, 0.5316864013671875, 0.5312532348632812, 0.531578857421875, 0.5313218383789062, 0.5315369262695312, 0.5313751220703125, 0.5314324340820312, 0.5314744262695312, 0.53136181640625, 0.5313638305664062, 0.5315297241210938, 0.5316331787109375, 0.5315000610351562, 0.5318041381835937, 0.5314805908203125, 0.5310812377929688, 0.5316290283203124, 0.531251220703125, 0.5314805908203125, 0.5312870483398437, 0.5313802490234375, 0.5314498291015625, 0.531378173828125, 0.5312542724609375, 0.531356689453125, 0.5313423461914063, 0.5315717163085938, 0.5312665405273438, 0.5319905395507812, 0.5315286865234375, 0.5317857055664063, 0.5316792602539062, 0.5319916381835937, 0.5315020141601563, 0.5320150756835937, 0.531610595703125, 0.5316116333007812, 0.531620849609375, 0.5315635375976563, 0.53176318359375, 0.5320653076171875, 1.1017093505859374, 0.5309368286132813, 0.5314478149414062, 0.531114990234375, 0.5318041381835937, 0.531794921875, 0.5318615112304688, 0.5316116333007812, 0.5316034545898437, 0.5310453491210938, 0.5313955688476563, 0.5313310546875, 0.5316249389648438, 0.5315194702148438, 0.5316045532226562, 0.5311835327148438, 0.5314447631835938, 0.53148876953125, 0.5315830688476563, 0.5314241943359375, 0.5317130126953125, 0.5312276611328125, 0.5314457397460938, 0.5311815795898438, 0.5318450927734375, 0.5317929077148438, 0.5315983276367188, 0.5316392822265625, 0.5314345092773437, 0.5313597412109375, 0.5314928588867187, 0.53110986328125, 0.5317550048828125, 0.531198974609375, 0.5312501831054688, 0.5313074951171874, 0.5314006958007812, 0.5310812377929688, 0.5314263305664062, 0.53121435546875, 0.5314088745117187, 0.5312266235351563, 0.5319813232421875, 0.5324287719726563, 0.53281494140625, 0.532264892578125, 0.5315143432617188, 0.531272705078125, 0.5317017822265625, 0.5313546142578125, 0.531945556640625, 0.5313545532226562, 0.53178369140625, 0.5314877319335938, 0.5314969482421875, 0.5313597412109375, 0.5315460815429688, 0.5314918212890625, 0.5317130126953125, 0.5316566772460938, 0.5317969970703125, 0.5315768432617187, 0.53257421875, 1.1031951904296875, 0.5310873413085937, 0.531431396484375, 0.5310433349609375, 0.5313966064453125, 0.5312911376953126, 0.5316239624023438, 0.5313822631835937, 0.5315317993164063, 0.53124609375, 0.5313659057617187, 0.5311549682617187, 0.5316239624023438, 0.531420166015625, 0.5314857177734374, 0.531357666015625, 0.5316966552734375, 0.5313771362304688, 0.5317969970703125, 0.5319751586914062, 0.531504150390625, 0.5317877807617187, 0.53207958984375, 0.531504150390625, 0.5317980346679687, 0.5316649169921875, 0.5317243041992188, 0.5313710327148438, 0.53178369140625, 0.5310208129882813, 0.5313812255859375, 0.5311692504882812, 0.5312440185546875, 0.530914306640625, 0.531140625, 0.5312010498046875, 0.5316300659179688, 0.5319229736328125, 0.5317314453125, 0.5311651611328125, 0.5311211547851562, 0.531178466796875, 0.5316658935546875, 0.5312122802734375, 0.5314109497070313, 0.5314529418945313, 0.5315552978515625, 0.531398681640625, 0.5312880859375, 0.5313443603515625, 0.53163623046875, 0.5312163696289063, 0.5315215454101563, 0.5311047973632812, 0.5318932495117188, 0.531894287109375, 0.5316249389648438, 0.5334948120117188, 0.5316904907226563, 0.5312880859375, 0.5315880737304688, 0.5312071533203125, 0.5317755126953125, 1.1012259521484375, 0.5312655639648437, 0.531800048828125, 0.5310771484375, 0.5316597900390625, 0.5314877319335938, 0.5313556518554687, 0.531009521484375, 0.5315706787109375, 0.5319342041015624, 0.5316925659179688, 0.5315604248046875, 0.5314334716796875, 0.5310894165039063, 0.531968994140625, 0.53129931640625, 0.5314898071289063, 0.5309706420898438, 0.5316198120117187, 0.5311743774414063, 0.53146826171875, 0.5311262817382812, 0.5313914794921875, 0.5310750732421875, 0.5313494873046875, 0.53096142578125, 0.5314263916015625, 0.5310238037109375, 0.5317867431640625, 0.5314058227539062, 0.531926025390625, 0.5319659423828125, 0.5314160766601562, 0.531178466796875, 0.5314283447265625, 0.5311897583007813, 0.5313648681640625, 0.5312819213867187, 0.53264794921875, 0.531478515625, 0.5314662475585937, 0.5313802490234375, 0.53146728515625, 0.531188720703125, 0.531399658203125, 0.5311815795898438, 0.5315440673828125, 0.5322147827148438, 0.531962890625, 0.5316351928710937, 0.5318799438476562, 0.5312614135742187, 0.5315430297851562, 0.5312973022460937, 0.531641357421875, 0.5314744262695312, 0.5315491943359375, 0.5312296752929687, 0.531863525390625, 0.5313689575195313, 0.5315000610351562, 0.5313095703125, 0.5315379028320313, 1.1023165283203125, 0.5312081909179688, 0.5320724487304688, 0.5311488037109375, 0.5318819580078125, 0.5312911376953126, 0.5314949340820313, 0.5315706787109375, 0.5317857055664063, 0.5313474731445312, 0.5318563842773437, 0.5311016845703125, 0.5315194702148438, 0.53150927734375, 0.5318553466796875, 0.5311559448242188, 0.5314652099609375, 0.5313792114257813, 0.5314171142578125, 0.5314180908203125, 0.5321830444335938, 0.5318389892578125, 0.5316331787109375, 0.5316177368164062, 0.5313392944335937, 0.5313668823242188, 0.5315061645507813, 0.5312501831054688, 0.5319915771484375, 0.5316126708984374, 0.5317601318359375, 0.5316557006835938, 0.53146826171875, 0.5313116455078125, 0.5315369262695312, 0.5313966064453125, 0.5317078857421875, 0.5314437255859376, 0.5320560913085938, 0.5326489868164063, 0.5319014282226563, 0.531631103515625, 0.5314703369140625, 0.5317478637695312, 0.5316761474609375, 0.5316065063476563, 0.531609619140625, 0.5315194702148438, 0.5318502197265625, 0.5314826049804687, 0.5319669799804687, 0.5315072021484375, 0.5315419921875, 0.5310453491210938, 0.5313034057617188, 0.5311867065429687, 0.5317181396484375, 0.531683349609375, 0.5318041381835937, 0.5316966552734375, 0.5320130615234375, 0.5314692993164063, 0.5316198120117187, 1.103932373046875, 0.5310914306640625, 0.531578857421875, 0.5308538818359375, 0.5313659057617187, 0.5314221801757812, 0.5314703369140625, 0.5309716186523438, 0.5315850830078125, 0.5310443115234375, 0.5311928100585938, 0.53098291015625, 0.5311600341796875, 0.5316239624023438, 0.531800048828125, 0.531646484375, 0.5318225708007812, 0.5314242553710937, 0.5319229736328125, 0.53176318359375, 0.5316587524414063, 0.53146826171875, 0.531483642578125, 0.5312266235351563, 0.5317498779296875, 0.5314273071289063, 0.5314631958007813, 0.5311232299804688, 0.5317816162109374, 0.5313494873046875, 0.5318584594726562, 0.5314795532226563, 0.5324308471679687, 0.5313074951171874, 0.5318523559570313, 0.5310309448242188, 0.5311488037109375, 0.5311488037109375, 0.5314488525390625, 0.530966552734375, 0.5313894653320312, 0.5310975952148438, 0.5314426879882812, 0.5313054809570312, 0.5314180908203125, 0.5313187866210938, 0.5313914794921875, 0.5315809326171875, 0.5312921752929688, 0.5312911376953126, 0.531684326171875, 0.5320560913085938, 0.5315133666992188, 0.5317980346679687, 0.5318328247070313, 0.5314559936523438, 0.5321195678710937, 0.5318225708007812, 0.5315963134765626, 0.5315829467773437, 0.5318184814453125, 0.531431396484375, 0.5319127197265625, 1.103847412109375, 0.531304443359375, 0.5315451049804687, 0.531357666015625, 0.5316792602539062, 0.5319639282226563, 0.5315829467773437, 0.5313710327148438, 0.5315205078125, 0.5312491455078125, 0.5314611206054688, 0.5316341552734375, 0.5313760986328125, 0.5311549682617187, 0.531583984375, 0.5310637817382813, 0.5312645263671875, 0.53108837890625, 0.5316741333007813, 0.531162109375, 0.5312429809570313, 0.5314140014648437, 0.531314697265625, 0.5313054809570312, 0.531968017578125, 0.531599365234375, 0.5316085815429688, 0.5312634887695312, 0.5314406127929687, 0.53096240234375, 0.5313423461914063, 0.5313720092773437, 0.5316188354492187, 0.5309050903320313, 0.5311948852539062, 0.5310873413085937, 0.5315645141601563, 0.5312706298828125, 0.5313065185546875, 0.5313433837890625, 0.53119384765625, 0.5309767456054687, 0.5315348510742187, 0.532041748046875, 0.5317161254882813, 0.5314119873046875, 0.5313494873046875, 0.5311979370117188, 0.5316075439453125, 0.5313341674804688, 0.5313423461914063, 0.531177490234375, 0.5312553100585937, 0.531019775390625, 0.5315665893554687, 0.5312440185546875, 0.5314283447265625, 0.5314006958007812, 0.5314877319335938, 0.5311262817382812, 0.5316321411132813, 0.5312686157226563, 0.5320028076171875, 1.103824951171875, 0.5310279541015624, 0.53146826171875, 0.5310341186523437, 0.5313740844726562, 0.5310853271484375, 0.5314447631835938, 0.5311447143554687, 0.5314212036132813, 0.531420166015625, 0.5311948852539062, 0.5308630981445313, 0.5313853149414063, 0.5309020385742188, 0.53150830078125, 0.5310576171875, 0.5312839965820313, 0.5317191772460937, 0.5318482055664062, 0.5312532348632812, 0.5314641723632813, 0.5309531860351563, 0.5312553100585937, 0.5309808349609375, 0.5311590576171875, 0.5310494995117188, 0.531863525390625, 0.5317703857421875, 0.5314180908203125, 0.53118359375, 0.5314396362304687, 0.5311129150390625, 0.5312420043945313, 0.5310259399414062, 0.5311918334960938, 0.5312973022460937, 0.5321564331054688, 0.5315194702148438, 0.5316976928710937, 0.5313034057617188, 0.5316218872070313, 0.531430419921875, 0.5316812744140625, 0.5315625, 0.5316239624023438, 0.5310105590820312, 0.5315389404296875, 0.5312665405273438, 0.5317191772460937, 0.5312142944335938, 0.5314641723632813, 0.53167822265625, 0.5313607788085938, 0.5311918334960938, 0.5314918212890625, 0.53161474609375, 0.5318031616210938, 0.5315338134765625, 0.5315020751953125, 0.5313065185546875, 0.531968994140625, 0.5318994140625, 0.5317734375]",tokens/s,1.8528425468336207,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,2108.223488,15043.395584,0.0,14396.94848,13898.251776,s,10,16.774060791015625,1.6774060791015626,0.0018297475804955295,1.6770918579101561,1.6789085327148436,1.6804502990722656,1.6816837121582031,"[1.6819920654296876, 1.67540869140625, 1.6774444580078125, 1.676206298828125, 1.6767392578125, 1.6753848876953126, 1.6765943603515625, 1.6776787109375, 1.67856591796875, 1.678046142578125]",tokens/s,152.61659248136053,kWh,1.9787035716904537e-05,1.0841899168262898e-05,9.527879844519888e-05,0.0001259077333303663,tokens/kWh,2033234.919163287,MB,2108.223488,15043.395584,0.0,14396.94848,14315.959808,s,10,982.2773984375,98.22773984375,0.006322715714689567,98.22801562500001,98.23432265625,98.236829296875,98.238834609375,"[98.233765625, 98.224484375, 98.2393359375, 98.2251640625, 98.22903125, 98.2138984375, 98.2302578125, 98.2295859375, 98.224875, 98.227]",tokens/s,0.6413666862356148,kWh,0.001159520428114467,0.0006355184711477705,0.005722492522434801,0.0075175314216970384,tokens/kWh,8380.410598374077,,s,629,995.810533813476,1.5831646006573552,0.19919555203142542,1.559141357421875,1.55962734375,1.5598268310546874,3.23526474609375,"[1.5585750732421875, 1.55860986328125, 1.5594239501953124, 1.5595242919921875, 1.558529052734375, 1.5586417236328125, 1.559109619140625, 1.5591219482421874, 1.5598243408203125, 1.5595867919921875, 1.55911572265625, 1.5588905029296876, 1.5587757568359375, 1.5587236328125, 1.559189453125, 1.558750244140625, 1.5590482177734375, 1.5591536865234374, 1.5591956787109376, 1.55924169921875, 1.558993896484375, 1.559488525390625, 1.559013427734375, 1.5594393310546875, 1.5592908935546874, 1.5589068603515626, 1.55905126953125, 1.559194580078125, 1.5587266845703125, 1.5591065673828124, 1.5589693603515624, 1.558802490234375, 1.558849609375, 1.559245849609375, 1.5592437744140626, 1.559234619140625, 1.55915771484375, 1.5592222900390624, 1.5598233642578125, 1.55966162109375, 1.559911376953125, 1.5597353515625, 1.559793701171875, 1.5596707763671875, 1.55985302734375, 1.55943115234375, 1.5597127685546874, 1.5593175048828125, 1.55945263671875, 1.559732177734375, 1.559920654296875, 1.5596063232421875, 1.5595447998046874, 1.5596646728515624, 1.559162841796875, 1.5595458984375, 1.55953662109375, 1.5590595703125, 1.559057373046875, 1.5596236572265625, 1.5592509765625, 1.559478271484375, 3.234529296875, 1.55871337890625, 1.5585946044921875, 1.5587962646484375, 1.5588270263671875, 1.558576171875, 1.558561767578125, 1.55837744140625, 1.5585587158203125, 1.5589652099609375, 1.5587061767578125, 1.55881787109375, 1.558748046875, 1.5588065185546875, 1.5585740966796875, 1.5593819580078125, 1.5591004638671875, 1.558873046875, 1.5587451171875, 1.5594066162109375, 1.559108642578125, 1.55873583984375, 1.5592396240234374, 1.559013427734375, 1.558961181640625, 1.5588013916015624, 1.5588310546875, 1.5591044921875, 1.5594649658203126, 1.558951904296875, 1.5591546630859374, 1.55907177734375, 1.5593482666015626, 1.5590062255859376, 1.559110595703125, 1.5595079345703124, 1.55951513671875, 1.5589560546875, 1.5587255859375, 1.5589560546875, 1.559404541015625, 1.5599503173828124, 1.5588074951171875, 1.5591341552734375, 1.5587113037109375, 1.55890380859375, 1.561585693359375, 1.5592509765625, 1.5593123779296876, 1.5594271240234374, 1.55922021484375, 1.5592386474609374, 1.559277587890625, 1.559350341796875, 1.5596614990234374, 1.5596533203125, 1.559194580078125, 1.5592427978515624, 1.5593604736328126, 1.5594066162109375, 1.559331787109375, 1.5592806396484375, 1.559973876953125, 3.23797607421875, 1.5602718505859374, 1.559963623046875, 1.5592427978515624, 1.5589161376953125, 1.5587706298828126, 1.5589017333984374, 1.559024658203125, 1.559089111328125, 1.5593585205078124, 1.5589334716796874, 1.5592069091796874, 1.5589990234375, 1.5592745361328124, 1.5593963623046876, 1.559119873046875, 1.5596800537109374, 1.5589478759765625, 1.5588074951171875, 1.5590042724609374, 1.558916015625, 1.55900927734375, 1.5591424560546876, 1.559677978515625, 1.5591373291015624, 1.559119873046875, 1.5591474609375, 1.5588157958984374, 1.5592601318359376, 1.5591690673828125, 1.559141357421875, 1.5591485595703125, 1.55908203125, 1.559183349609375, 1.55915576171875, 1.559400390625, 1.5592960205078126, 1.5591761474609376, 1.5593154296875, 1.5590482177734375, 1.558973388671875, 1.559626708984375, 1.559330810546875, 1.5593973388671876, 1.5593338623046875, 1.559593994140625, 1.5598807373046875, 1.5612364501953124, 1.55947412109375, 1.5594168701171875, 1.5596483154296874, 1.5595643310546874, 1.559582763671875, 1.55965234375, 1.5598623046875, 1.559635986328125, 1.5595242919921875, 1.5597813720703124, 1.5598643798828125, 1.5591075439453126, 1.5595611572265624, 1.55919873046875, 1.5593585205078124, 3.23542333984375, 1.55846044921875, 1.558307861328125, 1.5589805908203125, 1.558640625, 1.5585218505859375, 1.55879833984375, 1.558877197265625, 1.5586907958984375, 1.5587757568359375, 1.5589539794921874, 1.559329833984375, 1.559342041015625, 1.5588546142578126, 1.5590174560546874, 1.559341064453125, 1.558877197265625, 1.5588382568359376, 1.55911474609375, 1.5592764892578126, 1.5589847412109374, 1.5587337646484376, 1.55911376953125, 1.5587901611328125, 1.5588280029296875, 1.5588690185546874, 1.5593216552734375, 1.559299072265625, 1.559120849609375, 1.558898681640625, 1.5588414306640626, 1.55898876953125, 1.5588966064453125, 1.5587901611328125, 1.558739990234375, 1.5589232177734376, 1.5593912353515624, 1.55947314453125, 1.559403564453125, 1.559525390625, 1.559373779296875, 1.5593902587890625, 1.5591658935546875, 1.559456787109375, 1.5621683349609374, 1.558978515625, 1.5591126708984375, 1.55909326171875, 1.5591383056640624, 1.5590767822265625, 1.55901025390625, 1.5589765625, 1.559456787109375, 1.5590185546875, 1.55949462890625, 1.55913623046875, 1.5593287353515626, 1.559487548828125, 1.559192626953125, 1.55945263671875, 1.5594342041015625, 1.5596298828125, 1.5594813232421876, 3.236599853515625, 1.5593328857421875, 1.5589642333984375, 1.55854541015625, 1.558728759765625, 1.558466552734375, 1.55877783203125, 1.5593133544921876, 1.5591546630859374, 1.5588116455078125, 1.5587174072265626, 1.5586641845703124, 1.558667236328125, 1.558561767578125, 1.5583734130859375, 1.558703125, 1.5588648681640624, 1.55964111328125, 1.5586856689453126, 1.558662109375, 1.5585587158203125, 1.558540283203125, 1.5592540283203125, 1.558740966796875, 1.559257080078125, 1.5590430908203126, 1.5589775390625, 1.558866943359375, 1.5591044921875, 1.5596021728515626, 1.559546875, 1.55960009765625, 1.55951513671875, 1.5589847412109374, 1.5592540283203125, 1.559413818359375, 1.5594434814453124, 1.5591956787109376, 1.559342041015625, 1.5589847412109374, 1.5589805908203125, 1.5593165283203125, 1.559119873046875, 1.5594117431640624, 1.5589334716796874, 1.5593184814453125, 1.559287841796875, 1.5593902587890625, 1.5601192626953124, 1.5590482177734375, 1.559271484375, 1.559646240234375, 1.5597353515625, 1.5595601806640624, 1.5594761962890624, 1.559477294921875, 1.5593011474609375, 1.5621007080078124, 1.559258056640625, 1.5594239501953124, 1.559357421875, 1.5594639892578126, 1.559361572265625, 3.23485693359375, 1.5584061279296875, 1.5584429931640624, 1.558455322265625, 1.558666259765625, 1.5583436279296874, 1.5583365478515625, 1.55860888671875, 1.55905224609375, 1.558590576171875, 1.558578125, 1.5587183837890626, 1.55867236328125, 1.5587255859375, 1.5585269775390624, 1.5587215576171876, 1.558656982421875, 1.5586744384765625, 1.5588331298828124, 1.559310302734375, 1.558556640625, 1.5583118896484376, 1.5588935546875, 1.5596380615234375, 1.559288818359375, 1.558983642578125, 1.559357421875, 1.559225341796875, 1.558750244140625, 1.558867919921875, 1.5587225341796875, 1.5589283447265625, 1.5586417236328125, 1.5587911376953125, 1.5586907958984375, 1.559267333984375, 1.5594957275390624, 1.558765625, 1.5612979736328125, 1.5589273681640625, 1.5587225341796875, 1.558982666015625, 1.5588209228515626, 1.5590604248046875, 1.558908935546875, 1.5590697021484374, 1.5586539306640625, 1.5587706298828126, 1.5595069580078125, 1.559635986328125, 1.5590185546875, 1.55902978515625, 1.5588546142578126, 1.5590277099609375, 1.5595592041015625, 1.5593226318359374, 1.5589283447265625, 1.5589744873046876, 1.5589171142578124, 1.5588587646484375, 1.5594556884765625, 1.5590963134765625, 1.5595478515625, 3.235958740234375, 1.5587061767578125, 1.5582955322265626, 1.558918212890625, 1.559405517578125, 1.5590072021484376, 1.559066650390625, 1.559258056640625, 1.558982666015625, 1.5591373291015624, 1.5590123291015625, 1.559109619140625, 1.5587010498046876, 1.5590697021484374, 1.558677490234375, 1.558846435546875, 1.559234619140625, 1.55937890625, 1.558992919921875, 1.5595223388671875, 1.559162841796875, 1.5587706298828126, 1.559435302734375, 1.560753173828125, 1.55900830078125, 1.5587052001953126, 1.5587420654296875, 1.55875634765625, 1.5585545654296875, 1.55881884765625, 1.5590809326171875, 1.559193603515625, 1.559277587890625, 1.55932568359375, 1.55913623046875, 1.5592764892578126, 1.559235595703125, 1.5595654296875, 1.559406494140625, 1.5591424560546876, 1.558719482421875, 1.55981103515625, 1.5590491943359375, 1.5598408203125, 1.55958984375, 1.55905029296875, 1.5590296630859375, 1.5592437744140626, 1.559709716796875, 1.559160888671875, 1.5592642822265625, 1.55951513671875, 1.559525390625, 1.55964111328125, 1.559710693359375, 1.5596390380859375, 1.5594691162109375, 1.55936767578125, 1.5593133544921876, 1.5591116943359375, 1.5594761962890624, 1.5594700927734375, 1.5595284423828124, 3.23728076171875, 1.558865966796875, 1.5586630859375, 1.5593482666015626, 1.5598438720703125, 1.559294921875, 1.559099365234375, 1.5591485595703125, 1.558935546875, 1.55944970703125, 1.558636474609375, 1.5588331298828124, 1.5585986328125, 1.558814697265625, 1.55905224609375, 1.5590338134765624, 1.56031591796875, 1.5590655517578125, 1.5592652587890625, 1.5588966064453125, 1.5590400390625, 1.5587420654296875, 1.5598909912109375, 1.559056396484375, 1.5592652587890625, 1.5594056396484375, 1.5589385986328126, 1.5587860107421876, 1.55886083984375, 1.5590543212890624, 1.559300048828125, 1.5590921630859376, 1.559215087890625, 1.5589744873046876, 1.5589600830078125, 1.5591177978515625, 1.5594957275390624, 1.5592960205078126, 1.558877197265625, 1.5592017822265625, 1.5588270263671875, 1.5589222412109376, 1.5591588134765626, 1.559298095703125, 1.5588055419921876, 1.5590748291015626, 1.559099365234375, 1.5595028076171875, 1.5594915771484374, 1.5594495849609376, 1.559300048828125, 1.55938916015625, 1.5596697998046876, 1.5595755615234375, 1.5595201416015625, 1.5593564453125, 1.5596134033203124, 1.55945263671875, 1.5595294189453126, 1.5592764892578126, 1.559214111328125, 1.55919873046875, 1.5595919189453125, 3.238277099609375, 1.5593380126953125, 1.558845458984375, 1.559089111328125, 1.5596502685546876, 1.559151611328125, 1.55905126953125, 1.5589949951171875, 1.558698974609375, 1.5589119873046875, 1.559118896484375, 1.5591055908203124, 1.5588433837890625, 1.55887109375, 1.559098388671875, 1.559488525390625, 1.5590921630859376, 1.5589212646484376, 1.5586201171875, 1.5587542724609376, 1.5588372802734376, 1.5583734130859375, 1.559034912109375, 1.5590389404296876, 1.5593184814453125, 1.559047119140625, 1.559078857421875, 1.5590113525390625, 1.5589908447265626, 1.5593114013671876, 1.559214111328125, 1.559314453125, 1.5593348388671875, 1.5594014892578125, 1.5593011474609375, 1.55919873046875, 1.5591455078125, 1.559130126953125, 1.5589488525390625, 1.55922021484375, 1.5590164794921875, 1.55928369140625, 1.5593359375, 1.55940869140625, 1.5589334716796874, 1.5590072021484376, 1.5590543212890624, 1.5592960205078126, 1.5591453857421875, 1.5592960205078126, 1.55916796875, 1.55928271484375, 1.559329833984375, 1.5590318603515625, 1.5594649658203126, 1.5592314453125, 1.55926123046875, 1.5591290283203125, 1.5590205078125, 1.5590174560546874, 1.559034912109375, 1.5591658935546875, 1.559373779296875, 3.237329833984375, 1.558613037109375, 1.5583804931640626, 1.5587542724609376, 1.5585423583984375, 1.559004150390625, 1.5588382568359376, 1.55866015625, 1.5590809326171875, 1.558877197265625, 1.558772705078125, 1.558992919921875, 1.558794189453125, 1.559083984375, 1.55869287109375, 1.5590604248046875, 1.55894580078125, 1.5587255859375, 1.5587962646484375, 1.558877197265625, 1.5590615234375, 1.558513671875, 1.5592530517578125, 1.5587318115234374, 1.5593697509765625, 1.559119873046875, 1.5611954345703125, 1.559103515625, 1.5589058837890626, 1.5593779296875, 1.559484375, 1.5593184814453125, 1.5594208984375, 1.5591065673828124, 1.559189453125, 1.5593656005859375, 1.5593656005859375, 1.55962060546875, 1.5591669921875, 1.5591322021484375, 1.559208984375, 1.5593665771484375, 1.559109619140625, 1.5592222900390624, 1.5591434326171876, 1.559194580078125, 1.5592008056640625, 1.55911376953125, 1.55950390625, 1.5590687255859375, 1.559582763671875, 1.5593564453125, 1.5598284912109375, 1.5596851806640626, 1.55951513671875, 1.55947216796875, 1.5594691162109375, 1.5591895751953124, 1.5594127197265626, 1.55911572265625, 1.559299072265625, 1.5592508544921875, 1.5591474609375]",tokens/s,0.6316462606508407,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3193.102336,5128.060928,0.0,4481.613824,4276.256768,s,10,3.2413734130859373,0.32413734130859373,0.0016825840816240875,0.3241157684326172,0.32623468627929686,0.3264099456787109,0.32655015319824215,"[0.326585205078125, 0.3253639221191406, 0.3222522888183594, 0.32284228515625, 0.32339013671875, 0.32129388427734373, 0.32326702880859376, 0.3248414001464844, 0.32534152221679685, 0.3261957397460937]",tokens/s,789.7886709580806,kWh,3.8053463575326738e-06,2.0845240635480878e-06,1.689038677618604e-05,2.27802571972668e-05,tokens/kWh,11237801.126789523,MB,3193.102336,5128.060928,0.0,4481.613824,4465.661952,s,10,189.078173828125,18.9078173828125,0.012273201694566093,18.9097021484375,18.920707812499998,18.9233919921875,18.9255393359375,"[18.91315625, 18.920111328125, 18.9024375, 18.901080078125, 18.88138671875, 18.897064453125, 18.91123828125, 18.908166015625, 18.926076171875, 18.91745703125]",tokens/s,3.331955176236681,kWh,0.00022324078258437416,0.00012235523933478362,0.0009709997090908196,0.0013165957310099774,tokens/kWh,47850.67922988926,,s,629,191.68752508544924,0.3047496424251975,0.038502073286161766,0.300015625,0.3008729064941406,0.3014152465820313,0.6232092919921874,"[0.3012270202636719, 0.30114816284179685, 0.3007774658203125, 0.30110003662109375, 0.29941351318359377, 0.29971661376953124, 0.2996531066894531, 0.2997452697753906, 0.30033203125, 0.3001241455078125, 0.30014566040039065, 0.3000565795898438, 0.3007344665527344, 0.30101809692382814, 0.2998548583984375, 0.2999808044433594, 0.3005767822265625, 0.29991937255859374, 0.3002378234863281, 0.3010672607421875, 0.30078155517578126, 0.30025625610351564, 0.29994802856445313, 0.3000330505371094, 0.3002951354980469, 0.30040884399414064, 0.30003506469726565, 0.30066790771484375, 0.30030438232421874, 0.30015179443359374, 0.30028594970703126, 0.3002818603515625, 0.30056243896484375, 0.3007068786621094, 0.3003309326171875, 0.29966232299804685, 0.2997370910644531, 0.2999521179199219, 0.29981594848632814, 0.2995947570800781, 0.29999102783203124, 0.29974835205078126, 0.299620361328125, 0.2994493408203125, 0.2994759826660156, 0.29976165771484375, 0.30030642700195315, 0.3000166320800781, 0.2999060363769531, 0.30026956176757813, 0.2995128173828125, 0.29971661376953124, 0.2999552001953125, 0.3005082092285156, 0.30011181640625, 0.2995394592285156, 0.3000186767578125, 0.30043032836914063, 0.29998388671875, 0.29997259521484376, 0.299610107421875, 0.29887283325195313, 0.6254059448242187, 0.3006320495605469, 0.3013908386230469, 0.30085733032226564, 0.3013273620605469, 0.29998489379882814, 0.2996244506835937, 0.3003135986328125, 0.299826171875, 0.2998814697265625, 0.300400634765625, 0.29994085693359374, 0.3004631042480469, 0.3001395263671875, 0.2999992370605469, 0.299926513671875, 0.300759033203125, 0.300590087890625, 0.29988455200195313, 0.29991729736328127, 0.30021221923828123, 0.30040472412109376, 0.3002593383789062, 0.30088909912109374, 0.3011358642578125, 0.2998763427734375, 0.30023577880859376, 0.30020709228515624, 0.3008174133300781, 0.3003627624511719, 0.300759033203125, 0.3007027587890625, 0.30042620849609375, 0.30063821411132813, 0.2999624328613281, 0.29996844482421875, 0.30086349487304687, 0.30146969604492185, 0.29992141723632815, 0.3000361022949219, 0.3000586242675781, 0.29992755126953125, 0.2999654541015625, 0.2998486938476562, 0.30020095825195314, 0.3000299377441406, 0.30309478759765623, 0.3002388610839844, 0.2999859313964844, 0.3003576354980469, 0.30067608642578125, 0.3001446533203125, 0.30003302001953125, 0.3000166320800781, 0.2999244689941406, 0.3001692199707031, 0.29990194702148437, 0.3005317077636719, 0.30013031005859375, 0.2998405151367188, 0.29998797607421873, 0.2999336853027344, 0.30006988525390627, 0.622824462890625, 0.30008013916015625, 0.29997259521484376, 0.2997729187011719, 0.29949029541015626, 0.300337158203125, 0.30040985107421875, 0.30018765258789065, 0.2999029846191406, 0.29995724487304687, 0.30025112915039065, 0.30034637451171875, 0.29992959594726565, 0.30029824829101565, 0.3003873291015625, 0.2998978576660156, 0.30032589721679687, 0.3002255249023438, 0.3004590454101562, 0.3001773681640625, 0.2999449462890625, 0.2996777038574219, 0.2997125244140625, 0.3017687072753906, 0.2998691711425781, 0.29983026123046874, 0.2998783874511719, 0.30037503051757813, 0.3002183532714844, 0.29920254516601563, 0.29975244140625, 0.2997073974609375, 0.29997671508789064, 0.2997862548828125, 0.30004327392578123, 0.2995845031738281, 0.29983743286132813, 0.29989376831054687, 0.3005511779785156, 0.3004989318847656, 0.3004620666503906, 0.2998620300292969, 0.2996592712402344, 0.2998609924316406, 0.29997567749023435, 0.2994964599609375, 0.3000780944824219, 0.30188134765625, 0.29991015625, 0.30026956176757813, 0.30005453491210937, 0.29941964721679687, 0.3000770568847656, 0.2994923400878906, 0.29899981689453126, 0.299219970703125, 0.29973809814453123, 0.29967666625976563, 0.30005966186523436, 0.3000115661621094, 0.3005040283203125, 0.2999951477050781, 0.30062490844726564, 0.6233589477539062, 0.29990194702148437, 0.3002234802246094, 0.2998190002441406, 0.30008734130859377, 0.30004833984375, 0.2998200378417969, 0.3000770568847656, 0.300015625, 0.3000606689453125, 0.29941146850585937, 0.29921484375, 0.299578369140625, 0.2996756591796875, 0.2996449279785156, 0.30001971435546876, 0.3000995788574219, 0.29999002075195313, 0.3003607177734375, 0.30082763671875, 0.3000340576171875, 0.300611572265625, 0.30002584838867186, 0.299694091796875, 0.29959066772460935, 0.30004019165039064, 0.2999818115234375, 0.30008218383789065, 0.29999002075195313, 0.3006935119628906, 0.2997913513183594, 0.29910015869140627, 0.2994124755859375, 0.29980160522460936, 0.2999715881347656, 0.3000504455566406, 0.3000074157714844, 0.2999378051757812, 0.2999920654296875, 0.3000719299316406, 0.2999992370605469, 0.30074981689453123, 0.30103448486328127, 0.2998681640625, 0.3038136291503906, 0.3005091857910156, 0.3002746887207031, 0.2998763427734375, 0.299767822265625, 0.29981594848632814, 0.2999541625976562, 0.29982720947265623, 0.299641845703125, 0.29993267822265623, 0.29993472290039064, 0.2999715881347656, 0.2998343811035156, 0.29893017578125, 0.29962136840820314, 0.30002072143554687, 0.2994237365722656, 0.299831298828125, 0.30021630859375, 0.623921142578125, 0.3000299377441406, 0.30005966186523436, 0.299926513671875, 0.299947021484375, 0.300115966796875, 0.29993572998046875, 0.30002072143554687, 0.30004531860351563, 0.29974118041992187, 0.30062188720703126, 0.29995513916015626, 0.29942477416992186, 0.2993663940429688, 0.29983026123046874, 0.2999603271484375, 0.2999285888671875, 0.3015209045410156, 0.29998797607421873, 0.30004327392578123, 0.30011700439453126, 0.2999449462890625, 0.30022860717773436, 0.3003965759277344, 0.30081021118164064, 0.30038836669921876, 0.30042520141601564, 0.30013336181640626, 0.3006033935546875, 0.29989376831054687, 0.29989273071289063, 0.30014874267578123, 0.3004436340332031, 0.299747314453125, 0.2998476867675781, 0.3000094604492187, 0.29924453735351564, 0.29881036376953124, 0.29879910278320315, 0.2987581481933594, 0.29875506591796874, 0.29884518432617185, 0.298787841796875, 0.2988482666015625, 0.29977191162109373, 0.3000657958984375, 0.2989967346191406, 0.2990592041015625, 0.2991626281738281, 0.2990673828125, 0.29893017578125, 0.2991349792480469, 0.299323486328125, 0.29853277587890625, 0.29891278076171873, 0.29906942749023435, 0.2987386779785156, 0.3028009033203125, 0.29910748291015626, 0.2988552551269531, 0.2989045715332031, 0.299146240234375, 0.29900595092773435, 0.621075439453125, 0.2991349792480469, 0.2998179931640625, 0.29896807861328123, 0.2991811828613281, 0.2986914672851563, 0.29862911987304686, 0.29889239501953124, 0.2989116516113281, 0.2987960205078125, 0.29937152099609377, 0.29994189453125, 0.29974630737304686, 0.30000537109375, 0.300537841796875, 0.29991015625, 0.30004837036132814, 0.300326904296875, 0.3002030029296875, 0.3000575866699219, 0.3000770568847656, 0.3004405822753906, 0.2999531555175781, 0.30005148315429686, 0.3000484008789063, 0.3000626525878906, 0.30036172485351564, 0.3001272277832031, 0.30000640869140627, 0.2996879272460938, 0.29982925415039063, 0.29986611938476565, 0.3000391540527344, 0.2998128662109375, 0.29970330810546875, 0.30041189575195315, 0.3001200561523438, 0.29997772216796875, 0.3035576171875, 0.30046923828125, 0.29996749877929685, 0.3007201232910156, 0.3004538879394531, 0.29997671508789064, 0.30013543701171874, 0.30009344482421874, 0.3003351135253906, 0.3000340576171875, 0.300626953125, 0.29982925415039063, 0.29992959594726565, 0.299978759765625, 0.3000924072265625, 0.30002484130859375, 0.29995724487304687, 0.2998896789550781, 0.299720703125, 0.2996705322265625, 0.2998804626464844, 0.2997698669433594, 0.3005307006835938, 0.29985382080078127, 0.30008731079101564, 0.6239016723632812, 0.3004989318847656, 0.3000094604492187, 0.3003781127929688, 0.3024650268554688, 0.30008941650390625, 0.30021728515625, 0.29997671508789064, 0.3000985717773437, 0.2997452697753906, 0.3001559143066406, 0.299789306640625, 0.299578369140625, 0.3000637512207031, 0.3000022888183594, 0.30041189575195315, 0.300179443359375, 0.30062490844726564, 0.3001978759765625, 0.29980978393554686, 0.30084915161132814, 0.3001692199707031, 0.30007400512695315, 0.3024322204589844, 0.3004334106445313, 0.29990194702148437, 0.30002584838867186, 0.2999787902832031, 0.3001108093261719, 0.3005962219238281, 0.3003105163574219, 0.30027059936523437, 0.299931640625, 0.2999029846191406, 0.3000309753417969, 0.30007601928710936, 0.2999808044433594, 0.299936767578125, 0.3001610107421875, 0.29992755126953125, 0.29989376831054687, 0.29997567749023435, 0.3000391540527344, 0.3000443115234375, 0.3002992248535156, 0.2999715881347656, 0.2998241577148438, 0.2998548278808594, 0.3000862731933594, 0.300653564453125, 0.3015086059570313, 0.29992242431640626, 0.299863037109375, 0.29971966552734375, 0.29996954345703125, 0.2999613342285156, 0.3001968688964844, 0.2997841796875, 0.2999797668457031, 0.29949029541015626, 0.2995916748046875, 0.29981695556640625, 0.30072625732421876, 0.6255585327148437, 0.30017230224609376, 0.299831298828125, 0.3000555419921875, 0.30022042846679686, 0.30029925537109375, 0.30140109252929687, 0.30094949340820315, 0.2998886413574219, 0.29979647827148437, 0.30036376953125, 0.29990194702148437, 0.30001458740234377, 0.300010498046875, 0.29990707397460936, 0.29992959594726565, 0.3013775329589844, 0.2998958129882813, 0.3009157104492188, 0.3004344177246094, 0.2997698974609375, 0.299400146484375, 0.3000780944824219, 0.300410888671875, 0.3009054870605469, 0.3005409240722656, 0.301384765625, 0.30065350341796876, 0.30062591552734375, 0.30096075439453124, 0.3004375, 0.30160894775390623, 0.3008819274902344, 0.3004241943359375, 0.29964389038085937, 0.2996091003417969, 0.2996643981933594, 0.29964901733398436, 0.2998384704589844, 0.29929779052734373, 0.29952517700195314, 0.30001556396484375, 0.29967666625976563, 0.29992242431640626, 0.2999613342285156, 0.299926513671875, 0.2998753356933594, 0.2995947570800781, 0.2999183349609375, 0.2996172790527344, 0.29993267822265623, 0.30100991821289064, 0.30017843627929686, 0.30001254272460937, 0.29992141723632815, 0.29997055053710936, 0.300147705078125, 0.3002347412109375, 0.29967462158203123, 0.299504638671875, 0.299514892578125, 0.29878680419921877, 0.2995189819335938, 0.6255062866210938, 0.30063821411132813, 0.30005966186523436, 0.30045697021484374, 0.3000965270996094, 0.2999285888671875, 0.300015625, 0.300263427734375, 0.299894775390625, 0.29981695556640625, 0.30063821411132813, 0.300000244140625, 0.29996954345703125, 0.29998797607421873, 0.30009344482421874, 0.2999808044433594, 0.3002378234863281, 0.30191104125976564, 0.3011993713378906, 0.30066586303710935, 0.3013355407714844, 0.30207794189453124, 0.302529541015625, 0.30110720825195314, 0.3007958984375, 0.3010785217285156, 0.30087783813476565, 0.2998056945800781, 0.30067608642578125, 0.3011614685058594, 0.3007979431152344, 0.30112152099609374, 0.3007201232910156, 0.3007068176269531, 0.30179122924804686, 0.3001978759765625, 0.3004272766113281, 0.2999080810546875, 0.3004764099121094, 0.30043954467773437, 0.3001835632324219, 0.3006484375, 0.3004712829589844, 0.3014246826171875, 0.3003248291015625, 0.30087884521484376, 0.3002808227539063, 0.299840576171875, 0.3000575256347656, 0.30002383422851564, 0.2996745910644531, 0.29950567626953123, 0.2997145690917969, 0.29997567749023435, 0.30001458740234377, 0.30037503051757813, 0.30008114624023435, 0.2989066162109375, 0.30013543701171874, 0.2999111633300781, 0.29975347900390625, 0.2997350463867188, 0.30005453491210937, 0.6268078002929688, 0.30009548950195314, 0.29986407470703125, 0.30065869140625, 0.3011307373046875, 0.30074368286132813, 0.3008604125976562, 0.30035250854492185, 0.30087167358398437, 0.29988760375976564, 0.30013644409179685, 0.3006300048828125, 0.3005399169921875, 0.30181991577148437, 0.30090756225585935, 0.29990191650390624, 0.30007601928710936, 0.30002688598632815, 0.2999869384765625, 0.3001077880859375, 0.30047845458984374, 0.29997567749023435, 0.3000227966308594, 0.3018362731933594, 0.30386279296875, 0.30291455078125, 0.303236083984375, 0.3023329162597656, 0.30000436401367186, 0.2989014892578125, 0.2988851318359375, 0.29931622314453127, 0.2989588623046875, 0.29878271484375, 0.2992056274414063, 0.30051431274414064, 0.3006761169433594, 0.29950460815429686, 0.29902847290039064, 0.2990940246582031, 0.29902545166015626, 0.29914212036132815, 0.29988455200195313, 0.299652099609375, 0.30047232055664064, 0.2996326293945312, 0.30089727783203124, 0.29991729736328127, 0.3000965270996094, 0.30000234985351565, 0.29993365478515627, 0.3000115051269531, 0.2999603881835938, 0.29999508666992186, 0.29998284912109374, 0.2999869384765625, 0.30106521606445313, 0.3007928466796875, 0.300732421875, 0.29975653076171876, 0.29992755126953125, 0.30012313842773436, 0.29981491088867185]",tokens/s,3.281382029005844,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1866.50624,3328.704512,0.0,2682.257408,2578.238464,s,10,1.4175592651367188,0.14175592651367186,0.0022402045159939975,0.14135794067382812,0.14292337188720702,0.14542544631958007,0.1474271058654785,"[0.14792752075195312, 0.14161701965332033, 0.1391898193359375, 0.1415839385986328, 0.14236735534667969, 0.14101449584960937, 0.13986236572265626, 0.1410111083984375, 0.14185369873046874, 0.14113194274902344]",tokens/s,1805.9209677932563,kWh,1.6547061226986076e-06,9.066846465884736e-07,6.786722250360737e-06,9.348113019647817e-06,tokens/kWh,27385205.919305906,MB,1866.50624,3328.704512,0.0,2682.257408,2667.0976,s,10,83.8020732421875,8.38020732421875,0.04772557025936409,8.37658544921875,8.433103320312501,8.4430912109375,8.4510815234375,"[8.4288447265625, 8.39970703125, 8.4144833984375, 8.4530791015625, 8.4308837890625, 8.3407841796875, 8.3356474609375, 8.31612109375, 8.32905859375, 8.3534638671875]",tokens/s,7.51771377038971,kWh,9.806880771561905e-05,5.3749043913185225e-05,0.00039216448579924195,0.0005439823374280462,tokens/kWh,115812.5837281126,,s,629,84.93340989685055,0.1350292685164556,0.016826908436194207,0.13322035217285155,0.13436354370117187,0.1346594787597656,0.27325439575195315,"[0.1383321533203125, 0.13691903686523438, 0.13555609130859375, 0.1336924133300781, 0.1340948486328125, 0.13436006164550782, 0.1343426513671875, 0.1342003173828125, 0.13426687622070313, 0.13448089599609375, 0.13400473022460938, 0.13385011291503907, 0.13449215698242187, 0.13411634826660157, 0.13182975769042968, 0.13270323181152344, 0.13469491577148437, 0.13397605895996093, 0.13359616088867188, 0.13410610961914063, 0.13234483337402345, 0.13145497131347655, 0.13187686157226564, 0.1315246124267578, 0.13338418579101563, 0.13427098083496095, 0.13404261779785157, 0.13425152587890626, 0.13394534301757813, 0.13425050354003906, 0.1328660430908203, 0.1342433319091797, 0.13415116882324218, 0.13333401489257812, 0.1342013397216797, 0.13399346923828126, 0.1340968933105469, 0.134508544921875, 0.13404876708984376, 0.1326940155029297, 0.1316812744140625, 0.13177658081054688, 0.13164845275878906, 0.13489971923828126, 0.13331251525878907, 0.1342248992919922, 0.13452799987792968, 0.13407334899902343, 0.13159321594238282, 0.13165260314941407, 0.13322035217285155, 0.13399244689941406, 0.13447474670410156, 0.13405696105957032, 0.13407948303222655, 0.13395046997070312, 0.13439283752441405, 0.1342300109863281, 0.13573222351074218, 0.1321625671386719, 0.13170687866210937, 0.13169970703125, 0.2737838134765625, 0.13152255249023437, 0.13275955200195313, 0.13433351135253907, 0.1345811767578125, 0.13403852844238281, 0.13368319702148437, 0.13302169799804686, 0.13411737060546874, 0.13280256652832031, 0.13474508666992188, 0.13335859680175782, 0.1340590057373047, 0.13169459533691405, 0.13173248291015624, 0.13171405029296876, 0.13407743835449218, 0.1342689208984375, 0.13365248107910158, 0.13403340148925783, 0.1339299774169922, 0.13434367370605468, 0.1315061798095703, 0.13148466491699218, 0.13182156372070314, 0.1337518005371094, 0.1333534698486328, 0.1340999755859375, 0.13400575256347655, 0.13407852172851562, 0.1340497283935547, 0.13159117126464845, 0.13308108520507814, 0.13406515502929686, 0.13437747192382812, 0.1340518341064453, 0.1342433319091797, 0.13421466064453125, 0.13415834045410155, 0.1342986297607422, 0.13406617736816406, 0.13197926330566406, 0.1316177978515625, 0.13170687866210937, 0.13170687866210937, 0.13363917541503906, 0.13486598205566405, 0.13352549743652345, 0.13383468627929687, 0.13421055603027343, 0.13174989318847657, 0.13169973754882813, 0.13152662658691405, 0.13283226013183594, 0.13399142456054688, 0.13478297424316407, 0.13450035095214843, 0.1341460418701172, 0.1339115447998047, 0.13337496948242186, 0.134002685546875, 0.13234687805175782, 0.13289779663085938, 0.2724710388183594, 0.13155020141601562, 0.13163827514648438, 0.131810302734375, 0.1316618194580078, 0.13126144409179688, 0.13352549743652345, 0.13428326416015626, 0.13351628112792968, 0.13398326110839845, 0.1340497589111328, 0.1338419189453125, 0.13386444091796876, 0.13370777893066407, 0.13431910705566405, 0.1338470458984375, 0.13402931213378907, 0.13414399719238282, 0.13386752319335937, 0.13391973876953125, 0.13366067504882811, 0.13545677185058594, 0.13440921020507812, 0.13534104919433593, 0.13244415283203126, 0.134466552734375, 0.13180108642578126, 0.131599365234375, 0.13178880310058594, 0.13154098510742188, 0.13177754211425782, 0.13156147766113283, 0.13368832397460936, 0.134044677734375, 0.13403347778320313, 0.13381727600097656, 0.13421772766113282, 0.1335265350341797, 0.13435289001464842, 0.133959716796875, 0.13362889099121095, 0.1339842529296875, 0.13384602355957032, 0.13403443908691406, 0.13420748901367188, 0.13402316284179688, 0.1340590057373047, 0.13474713134765626, 0.13409791564941406, 0.13352960205078124, 0.13388697814941405, 0.1340088348388672, 0.13404978942871093, 0.13377023315429687, 0.13412864685058593, 0.13397708129882813, 0.13398016357421874, 0.13395968627929689, 0.1340712890625, 0.13423004150390624, 0.13353570556640626, 0.13389414978027345, 0.13325209045410155, 0.27623629760742185, 0.134403076171875, 0.13430271911621094, 0.13406105041503907, 0.1344286651611328, 0.1346447296142578, 0.13446553039550782, 0.13406207275390625, 0.13451161193847655, 0.13428941345214843, 0.1343856658935547, 0.13401190185546874, 0.134466552734375, 0.13412249755859376, 0.13459762573242187, 0.1339463653564453, 0.13446553039550782, 0.13476864624023438, 0.1342044219970703, 0.13403443908691406, 0.13416653442382812, 0.134276123046875, 0.13387055969238282, 0.13414707946777343, 0.13453106689453126, 0.13373338317871095, 0.13383680725097657, 0.13395046997070312, 0.133791748046875, 0.13404570007324218, 0.1341265869140625, 0.13388697814941405, 0.1346693115234375, 0.1340518341064453, 0.1335562286376953, 0.1343969268798828, 0.13446348571777345, 0.1351065673828125, 0.1340641326904297, 0.13445733642578125, 0.13435903930664062, 0.1340631103515625, 0.1340518341064453, 0.13444816589355468, 0.13536051940917967, 0.13402621459960937, 0.13327052307128906, 0.13432012939453125, 0.1344593963623047, 0.13412249755859376, 0.1338419189453125, 0.13387469482421874, 0.13338726806640624, 0.13416653442382812, 0.13386854553222657, 0.13441127014160156, 0.13412351989746094, 0.13347328186035157, 0.13382246398925782, 0.13420338439941407, 0.1338275909423828, 0.13340467834472655, 0.1342750701904297, 0.27683428955078127, 0.13397196960449217, 0.1343129577636719, 0.1338665008544922, 0.13388082885742186, 0.1340958709716797, 0.1340712890625, 0.13391871643066405, 0.13356646728515625, 0.1333104705810547, 0.13409178161621094, 0.133396484375, 0.1351403503417969, 0.13372825622558593, 0.13417575073242188, 0.13519052124023437, 0.1343938903808594, 0.1338654327392578, 0.133359619140625, 0.1340712890625, 0.13396890258789063, 0.1339894104003906, 0.1338838653564453, 0.13374668884277344, 0.133538818359375, 0.13404876708984376, 0.13366169738769532, 0.1339351043701172, 0.13478195190429687, 0.13399859619140625, 0.13399449157714843, 0.13394432067871093, 0.13413682556152343, 0.1340262451171875, 0.13399449157714843, 0.13403546142578124, 0.13435289001464842, 0.1340712890625, 0.13406112670898437, 0.1335233917236328, 0.13358387756347656, 0.13382553100585937, 0.13504620361328126, 0.13462828063964843, 0.13439897155761718, 0.13393516540527345, 0.13389511108398439, 0.1334097900390625, 0.13408767700195312, 0.13458636474609376, 0.13346713256835938, 0.13414297485351562, 0.13160243225097656, 0.1317795867919922, 0.13214413452148438, 0.13376716613769532, 0.133718017578125, 0.13606605529785157, 0.13217485046386718, 0.13167718505859374, 0.13171302795410156, 0.13281280517578126, 0.13389926147460937, 0.27561163330078126, 0.1342044219970703, 0.13399655151367187, 0.13381837463378907, 0.13696818542480468, 0.13187277221679689, 0.13215437316894532, 0.13239193725585938, 0.1329521026611328, 0.13212156677246092, 0.13276876831054688, 0.132463623046875, 0.13214002990722656, 0.13234072875976563, 0.13270118713378906, 0.13262847900390626, 0.1321871337890625, 0.1320509490966797, 0.13238067626953126, 0.13276876831054688, 0.1321564178466797, 0.1327431640625, 0.1328158721923828, 0.13263360595703125, 0.1317580871582031, 0.1315215301513672, 0.13196083068847655, 0.1318225860595703, 0.13159117126464845, 0.13150003051757814, 0.13156454467773437, 0.13174887084960937, 0.1325496368408203, 0.1326755828857422, 0.13302169799804686, 0.13294898986816406, 0.1324881896972656, 0.13196800231933595, 0.13392588806152345, 0.1337159729003906, 0.13316915893554687, 0.1326755828857422, 0.13233255004882813, 0.13182054138183594, 0.13182566833496093, 0.13186253356933594, 0.13180519104003907, 0.1316822967529297, 0.13177548217773438, 0.13161984252929687, 0.1317611541748047, 0.131852294921875, 0.13162086486816407, 0.1321553955078125, 0.13262950134277343, 0.13191372680664062, 0.13178265380859375, 0.13150515747070313, 0.13170176696777344, 0.1315205078125, 0.13172735595703125, 0.13178778076171874, 0.13167103576660155, 0.2728120422363281, 0.13346815490722655, 0.1335142364501953, 0.1333534698486328, 0.13239910888671874, 0.13167616271972657, 0.13235302734375, 0.1338050537109375, 0.13347225952148437, 0.1334886474609375, 0.1320120391845703, 0.1320816650390625, 0.13306573486328124, 0.13341183471679688, 0.13176934814453126, 0.13158195495605468, 0.13284147644042968, 0.13212979125976562, 0.1316290588378906, 0.1317058563232422, 0.13171098327636718, 0.13157887268066407, 0.13160960388183593, 0.13298486328125, 0.13236732482910157, 0.13162188720703125, 0.13165260314941407, 0.13161062622070313, 0.13162495422363282, 0.13157376098632811, 0.13175910949707031, 0.13201100158691406, 0.13279539489746095, 0.13196493530273437, 0.13224140930175782, 0.13323365783691407, 0.13173248291015624, 0.13189529418945312, 0.133254150390625, 0.13235711669921876, 0.1316505584716797, 0.13167922973632812, 0.13156556701660158, 0.13257522583007814, 0.1328158721923828, 0.13173350524902344, 0.13212261962890626, 0.13244415283203126, 0.13331968688964843, 0.1319720916748047, 0.13162188720703125, 0.1338275909423828, 0.13188710021972655, 0.13169049072265626, 0.1329100799560547, 0.13289677429199218, 0.13157273864746094, 0.13191987609863282, 0.1330063934326172, 0.13177952575683594, 0.1324830780029297, 0.1317232666015625, 0.132210693359375, 0.2737438659667969, 0.13398220825195312, 0.13256195068359375, 0.13281686401367188, 0.13316409301757812, 0.13165664672851562, 0.131557373046875, 0.1315246124267578, 0.1316116485595703, 0.13375692749023438, 0.13275033569335937, 0.13160755920410155, 0.13167514038085937, 0.1316546630859375, 0.13151744079589844, 0.1325260772705078, 0.13415525817871093, 0.13211955261230468, 0.13158604431152343, 0.13158706665039063, 0.131599365234375, 0.13191885375976561, 0.13164959716796876, 0.13163615417480468, 0.131557373046875, 0.1315635223388672, 0.13156454467773437, 0.13150822448730468, 0.13155328369140626, 0.131778564453125, 0.13244825744628907, 0.13156761169433595, 0.13152870178222656, 0.13237452697753907, 0.13321420288085936, 0.1316188507080078, 0.13167100524902345, 0.13171507263183593, 0.13173043823242186, 0.13229055786132812, 0.13326028442382812, 0.13208883666992188, 0.13159730529785157, 0.13276570129394533, 0.13298074340820312, 0.13150210571289062, 0.13231517028808593, 0.13168019104003906, 0.1316177978515625, 0.13166490173339843, 0.13228031921386718, 0.131704833984375, 0.13154815673828124, 0.1315246124267578, 0.13159117126464845, 0.13162803649902344, 0.13144985961914063, 0.13153074645996093, 0.13159423828125, 0.13154304504394532, 0.1314959411621094, 0.13150822448730468, 0.13215335083007812, 0.2743265380859375, 0.13167922973632812, 0.13160243225097656, 0.13170278930664062, 0.13161068725585937, 0.13170375061035156, 0.13167820739746094, 0.13153996276855467, 0.13159628295898437, 0.13154917907714844, 0.13170994567871094, 0.13149491882324219, 0.1311682586669922, 0.1336678466796875, 0.13168333435058593, 0.1315020751953125, 0.13163827514648438, 0.1326510009765625, 0.13166490173339843, 0.131599365234375, 0.1316054992675781, 0.1315574035644531, 0.1316433563232422, 0.13159321594238282, 0.13151744079589844, 0.13169664001464843, 0.13158604431152343, 0.13171609497070313, 0.13180723571777345, 0.13306982421875, 0.1319505920410156, 0.13152665710449218, 0.1316188201904297, 0.13160755920410155, 0.13153485107421875, 0.1316321258544922, 0.13152255249023437, 0.13150413513183593, 0.13162393188476562, 0.13158604431152343, 0.13178880310058594, 0.13175398254394532, 0.13282815551757812, 0.13279335021972657, 0.1332623291015625, 0.13260493469238283, 0.1335029754638672, 0.1333217315673828, 0.13349990844726561, 0.13284454345703126, 0.13330943298339842, 0.13328793334960937, 0.13312818908691407, 0.13345382690429688, 0.13288447570800782, 0.13335142517089843, 0.1332725830078125, 0.1326561279296875, 0.13411943054199219, 0.1333104705810547, 0.13202330017089844, 0.1328404541015625, 0.13351219177246093, 0.27342642211914064, 0.1317181396484375, 0.131778564453125, 0.1316300811767578, 0.13164851379394532, 0.13157376098632811, 0.13182464599609375, 0.1316505584716797, 0.1314580535888672, 0.1316853790283203, 0.13220352172851563, 0.13342617797851564, 0.13168435668945314, 0.1316259765625, 0.13177754211425782, 0.13159423828125, 0.13173356628417968, 0.1318133087158203, 0.1316444091796875, 0.1315082550048828, 0.13163209533691406, 0.13170994567871094, 0.13161062622070313, 0.13163929748535155, 0.13251788330078124, 0.13330738830566408, 0.13480653381347657, 0.13196902465820312, 0.1324451904296875, 0.13310054016113282, 0.13354290771484376, 0.13340882873535156, 0.1336657257080078, 0.13366886901855468, 0.133396484375, 0.13281893920898438, 0.13362892150878905, 0.13327769470214842, 0.1322239990234375, 0.13191474914550783, 0.13346099853515625, 0.1332162628173828, 0.13334835815429688, 0.13336679077148436, 0.13285171508789062, 0.13309132385253905, 0.13161369323730468, 0.13310464477539063, 0.13307391357421874, 0.13365248107910158, 0.13310464477539063, 0.13244313049316406, 0.1326376953125, 0.13354908752441405, 0.13300936889648438, 0.13332179260253907, 0.13388691711425782, 0.13313023376464844, 0.1333780517578125, 0.13311077880859376, 0.13335551452636718, 0.13207347106933592, 0.1331865539550781]",tokens/s,7.405801801245284,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmp2h78w9mi/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1531.768832,9676.783616,0.0,9030.336512,8583.572992,s,10,9.471071899414062,0.9471071899414062,0.0007865321197485112,0.9470617980957031,0.9481417907714844,0.9482434417724609,0.9483247625732422,"[0.9481192016601563, 0.9483450927734375, 0.9464827270507813, 0.9463027954101563, 0.9464989624023438, 0.9459486694335938, 0.9466808471679687, 0.9474427490234375, 0.9474686889648437, 0.9477821655273437]",tokens/s,270.2967549172948,kWh,1.1175276083175583e-05,6.1235719211254035e-06,5.30962293456365e-05,7.039507734993748e-05,tokens/kWh,3636617.9232591945,MB,1531.768832,9676.783616,0.0,9030.336512,8872.966144,s,10,564.2868125,56.42868125,0.005902619636999738,56.430064453125,56.434701171875,56.4354833984375,56.4361091796875,"[56.42066796875, 56.42100390625, 56.427828125, 56.43380078125, 56.436265625, 56.4306328125, 56.43452734375, 56.42949609375, 56.41955859375, 56.43303125]",tokens/s,1.116453523357858,kWh,0.0006659982151861745,0.00036502579988969956,0.003165337650954964,0.004196361666030838,tokens/kWh,15013.005316004865,,s,629,571.914466552734,0.9092439849804999,0.11250283527003516,0.8956805419921875,0.8961118286132812,0.8962535400390624,1.84229025390625,"[0.8955299682617187, 0.8956344604492188, 0.8952893676757813, 0.8954654541015625, 0.8952473754882813, 0.8954511108398437, 0.8954111938476562, 0.895388671875, 0.8953507690429687, 0.8954296264648437, 0.8954921264648438, 0.8958034057617188, 0.8957071533203125, 0.895331298828125, 0.89537841796875, 0.8954317016601563, 0.895688720703125, 0.8951869506835938, 0.89523095703125, 0.895341552734375, 0.8953385009765625, 0.89526171875, 0.8952105712890625, 0.8952554931640625, 0.8955637817382812, 0.8952135620117188, 0.895278076171875, 0.89569384765625, 0.8954439697265625, 0.8954368286132812, 0.8953395385742188, 0.895320068359375, 0.895224853515625, 0.8956580200195312, 0.8956251220703125, 0.895593505859375, 0.8955872802734375, 0.895393798828125, 0.89556591796875, 0.8955975341796875, 0.8953139038085938, 0.8952350463867188, 0.8954644775390626, 0.8952453002929688, 0.8950845336914063, 0.89560888671875, 0.8954244384765625, 0.8954317016601563, 0.8957972412109375, 0.8959805297851563, 0.8959723510742188, 0.896089111328125, 0.8958925170898437, 0.8958883666992188, 0.8961648559570312, 0.8958853149414062, 0.896374755859375, 0.896205810546875, 0.8961065063476562, 0.89630517578125, 0.89601025390625, 0.8961126098632812, 1.844611083984375, 0.8954337158203125, 0.89558837890625, 0.8955914306640625, 0.8959385375976563, 0.8956160278320312, 0.8950343627929688, 0.8949083862304688, 0.8952064208984375, 0.895373291015625, 0.8951787719726563, 0.895162353515625, 0.8951439208984375, 0.895182861328125, 0.895109130859375, 0.8953907470703125, 0.8957255859375, 0.8957091674804688, 0.8956385498046875, 0.8958136596679688, 0.8958320922851563, 0.8952658081054687, 0.8952637329101563, 0.8951807861328125, 0.8952391967773438, 0.8951900024414062, 0.8952842407226562, 0.8955750122070313, 0.89556787109375, 0.8954613647460937, 0.895562744140625, 0.8955679321289063, 0.8955903930664062, 0.8953548583984375, 0.8952647705078125, 0.8954501342773438, 0.8954869995117187, 0.89586181640625, 0.8957409057617187, 0.8956713256835938, 0.8955576171875, 0.8954132690429687, 0.8955822143554687, 0.89568359375, 0.8954531860351562, 0.8953220825195313, 0.89735986328125, 0.8953026733398437, 0.8954552612304687, 0.8961781616210938, 0.8958126220703125, 0.8957644653320312, 0.8956436767578125, 0.8958248901367187, 0.896047119140625, 0.8958515014648437, 0.8958351440429687, 0.8958453979492188, 0.895657958984375, 0.8957132568359375, 0.8961444091796875, 0.8957071533203125, 0.8954951782226562, 1.842377685546875, 0.895404052734375, 0.8950650634765625, 0.8951746826171875, 0.8952135620117188, 0.895152099609375, 0.8950364379882813, 0.8954869995117187, 0.8951756591796876, 0.8951070556640625, 0.89539794921875, 0.8955360717773437, 0.8956303100585937, 0.895278076171875, 0.8953108520507812, 0.895466552734375, 0.8955247802734375, 0.8954808349609376, 0.89577880859375, 0.8958197631835938, 0.8954214477539062, 0.8957296752929688, 0.8957849731445312, 0.8969154663085938, 0.8955699462890625, 0.8955740356445312, 0.8955402221679688, 0.8957010498046875, 0.8956046752929687, 0.8959457397460937, 0.89588427734375, 0.895657958984375, 0.8959969482421875, 0.8955279541015625, 0.89569384765625, 0.8956375122070312, 0.8958167114257812, 0.89558837890625, 0.895529052734375, 0.89546337890625, 0.8958197631835938, 0.8958023681640624, 0.895668212890625, 0.895762451171875, 0.8955596923828125, 0.89588427734375, 0.8957982788085938, 0.896742431640625, 0.8959180297851562, 0.895889404296875, 0.8958699340820313, 0.8959774780273437, 0.8961423950195313, 0.8960029907226562, 0.895941650390625, 0.8961116333007813, 0.8960184326171875, 0.8960419921875, 0.8958904418945313, 0.8957849731445312, 0.8956774291992188, 0.8956559448242187, 0.8954009399414062, 1.84203369140625, 0.8953282470703126, 0.8955074462890625, 0.8958883666992188, 0.895805419921875, 0.895224853515625, 0.8955054321289062, 0.89569482421875, 0.8954593505859375, 0.895592529296875, 0.89609619140625, 0.89571533203125, 0.89652734375, 0.8958136596679688, 0.8957890625, 0.8957183837890625, 0.8957440185546875, 0.8959334106445312, 0.895752197265625, 0.8961761474609375, 0.8955248413085938, 0.896421875, 0.895963134765625, 0.895330322265625, 0.8955340576171875, 0.8957603759765626, 0.8954306640625, 0.8958146362304688, 0.895805419921875, 0.8954982299804688, 0.8955299682617187, 0.8953681640625, 0.8958136596679688, 0.8957706298828125, 0.89615869140625, 0.8956036987304687, 0.8954429321289062, 0.895578125, 0.8957962036132813, 0.8954183959960937, 0.8954849243164062, 0.89569384765625, 0.8957583618164062, 0.8959283447265625, 0.8975103759765625, 0.8956825561523437, 0.8957440185546875, 0.895594482421875, 0.8960829467773438, 0.8958392333984375, 0.8954531860351562, 0.8957625122070313, 0.8960316772460938, 0.8954849243164062, 0.8954644775390626, 0.8962181396484376, 0.895805419921875, 0.8961618041992188, 0.8958760986328125, 0.8960430297851563, 0.89603173828125, 0.8957348022460937, 0.8958709716796875, 1.8425006103515624, 0.8954132690429687, 0.8952770385742187, 0.8951552124023437, 0.8951265258789063, 0.8949452514648437, 0.8951756591796876, 0.8958238525390625, 0.895236083984375, 0.895140869140625, 0.8951900024414062, 0.8952207641601563, 0.895283203125, 0.8956876831054688, 0.8953272094726562, 0.89558837890625, 0.8958331298828125, 0.896849853515625, 0.8957357788085938, 0.8959354858398437, 0.8956190795898438, 0.8957183837890625, 0.89544189453125, 0.8957081298828125, 0.8961085205078125, 0.8960030517578125, 0.895741943359375, 0.8960706787109375, 0.8960758056640625, 0.8956846313476563, 0.8959539794921875, 0.8957930908203126, 0.8956805419921875, 0.8956405639648437, 0.8957777709960938, 0.895805419921875, 0.8960061645507813, 0.8956876831054688, 0.8957982788085938, 0.8960235595703125, 0.896294921875, 0.8963491821289062, 0.8959744262695313, 0.8960379028320312, 0.8958555908203125, 0.8957880249023438, 0.8960327758789063, 0.8959774780273437, 0.8963082275390625, 0.8961085205078125, 0.8957849731445312, 0.8961433715820313, 0.896015380859375, 0.8959569702148438, 0.89592626953125, 0.895857666015625, 0.89588427734375, 0.8977633056640625, 0.8960972900390625, 0.8961024169921875, 0.8960040893554687, 0.8959642333984374, 0.895754150390625, 1.8420654296875, 0.895541259765625, 0.8955330810546875, 0.895551513671875, 0.8954542846679687, 0.89588525390625, 0.8957655639648437, 0.8955913696289063, 0.8954900512695313, 0.895973388671875, 0.8957081298828125, 0.895446044921875, 0.8953692016601562, 0.8954695434570312, 0.8955350952148438, 0.8955064086914063, 0.8957685546875, 0.895510498046875, 0.8955617065429687, 0.89569384765625, 0.8958258666992187, 0.8956436767578125, 0.8955709228515625, 0.8953487548828125, 0.895425537109375, 0.89541015625, 0.8953661499023438, 0.8959344482421875, 0.8957112426757813, 0.8955719604492187, 0.8957173461914063, 0.8952852783203125, 0.895425537109375, 0.8956323852539062, 0.8953405151367188, 0.8953170166015625, 0.8956170043945313, 0.89562109375, 0.8976466064453125, 0.8961249389648438, 0.8960225830078125, 0.8955401611328125, 0.8958443603515625, 0.8955822143554687, 0.8957726440429687, 0.8958177490234375, 0.8958822631835938, 0.8957255859375, 0.89603173828125, 0.8960040893554687, 0.8958167114257812, 0.8959232177734375, 0.8961556396484375, 0.895921142578125, 0.8957132568359375, 0.8960726928710937, 0.8958699340820313, 0.8956088256835938, 0.8956508178710938, 0.8958750610351562, 0.8959140014648438, 0.896184326171875, 0.8957747192382812, 1.8425538330078124, 0.8953139038085938, 0.8954296264648437, 0.8954317016601563, 0.8954019775390625, 0.8954296264648437, 0.895804443359375, 0.89565185546875, 0.8956016845703125, 0.8954736938476563, 0.8951910400390625, 0.8951807861328125, 0.8956160278320312, 0.8953865966796875, 0.895224853515625, 0.8951572265625, 0.895447021484375, 0.8951286010742188, 0.8952534790039063, 0.8959088745117187, 0.8953425903320312, 0.8954378051757812, 0.8953170166015625, 0.8966533203125, 0.8956589965820313, 0.8957174072265625, 0.8958781127929687, 0.8958924560546875, 0.89592626953125, 0.895657958984375, 0.8963993530273437, 0.8960133056640625, 0.8955350952148438, 0.895636474609375, 0.8958023681640624, 0.8959241943359375, 0.8960481567382812, 0.8959610595703125, 0.8962498779296875, 0.8960297241210937, 0.8959395751953125, 0.8961136474609375, 0.896173095703125, 0.8961300048828125, 0.8959856567382812, 0.8956856079101563, 0.8959078369140625, 0.8958648071289063, 0.8959365234375, 0.8959959106445312, 0.89580029296875, 0.8960338134765625, 0.8962744140625, 0.8961085205078125, 0.896195556640625, 0.8962867431640625, 0.8956589965820313, 0.896268310546875, 0.8962437133789063, 0.896083984375, 0.895952880859375, 0.8960859985351562, 0.8958607177734375, 1.8435286865234375, 0.8953579711914063, 0.8959723510742188, 0.895552490234375, 0.8962406616210937, 0.895710205078125, 0.8959600830078125, 0.895594482421875, 0.8955453491210937, 0.8953630981445313, 0.8957020263671875, 0.8955637817382812, 0.895478759765625, 0.8956026611328125, 0.89554736328125, 0.8955330810546875, 0.8961474609375, 0.895688720703125, 0.8956405639648437, 0.89556787109375, 0.8954163208007813, 0.8957962036132813, 0.8957061157226562, 0.8955453491210937, 0.895515625, 0.8955586547851563, 0.8955166625976563, 0.8954368286132812, 0.8956876831054688, 0.8957532348632813, 0.8957552490234375, 0.89554638671875, 0.8954439697265625, 0.8958248901367187, 0.8957378540039063, 0.8956109008789063, 0.895552490234375, 0.8955668334960938, 0.8956426391601563, 0.8957552490234375, 0.8958330688476562, 0.8957849731445312, 0.8957276000976563, 0.895825927734375, 0.8963133544921875, 0.8962559814453125, 0.89609423828125, 0.8956958618164063, 0.8961679077148438, 0.8956907348632812, 0.8954450073242187, 0.8954439697265625, 0.8957511596679687, 0.8955791625976562, 0.8954685668945312, 0.8961618041992188, 0.8959201049804687, 0.8955299682617187, 0.8956661987304687, 0.8956989135742187, 0.8956763916015625, 0.895662109375, 0.8958924560546875, 1.8434007568359374, 0.8952268676757813, 0.8956057739257812, 0.8958668823242187, 0.8953005981445312, 0.8950661010742188, 0.8951449584960938, 0.8954685668945312, 0.8951981811523437, 0.8952791137695313, 0.8951910400390625, 0.895224853515625, 0.8951367797851563, 0.8953661499023438, 0.8954818725585938, 0.8955637817382812, 0.8956846313476563, 0.8963553466796875, 0.8955187377929688, 0.89537841796875, 0.8953743286132813, 0.895267822265625, 0.8952647705078125, 0.8953159790039062, 0.895520751953125, 0.8957470703125, 0.8953702392578125, 0.8953262329101562, 0.8954111938476562, 0.8956661987304687, 0.8953436279296875, 0.8954030151367187, 0.895283203125, 0.8952268676757813, 0.89514599609375, 0.8952166137695312, 0.8958832397460937, 0.89552587890625, 0.8954111938476562, 0.895561767578125, 0.8955135498046874, 0.895520751953125, 0.8956928100585938, 0.8956426391601563, 0.8954716186523437, 0.8955125732421875, 0.8955330810546875, 0.8961566772460937, 0.8961351928710938, 0.8959539184570312, 0.8957091674804688, 0.89584228515625, 0.8958197631835938, 0.8957470703125, 0.8957593383789062, 0.8957214965820313, 0.8955811767578125, 0.8955985717773437, 0.8961331176757813, 0.8957828979492187, 0.8960563354492187, 0.8956763916015625, 0.8957511596679687, 1.8439761962890624, 0.8954634399414062, 0.89548388671875, 0.8955084838867188, 0.89558837890625, 0.895562744140625, 0.8955801391601562, 0.895515625, 0.8953907470703125, 0.8954337158203125, 0.8955177001953125, 0.8955903930664062, 0.8959283447265625, 0.8955166625976563, 0.89547265625, 0.8954685668945312, 0.895752197265625, 0.895425537109375, 0.8954317016601563, 0.8954531860351562, 0.8953671875, 0.89560986328125, 0.8953211059570313, 0.8955269165039063, 0.8957849731445312, 0.8958924560546875, 0.8968284301757813, 0.8958668823242187, 0.8959989624023438, 0.8959027099609375, 0.8958955688476562, 0.8957982788085938, 0.8957511596679687, 0.895730712890625, 0.8959959106445312, 0.8958034057617188, 0.895724609375, 0.8958135375976563, 0.8960409545898438, 0.8961474609375, 0.8957860107421876, 0.8955709228515625, 0.8958484497070313, 0.89590576171875, 0.895847412109375, 0.895763427734375, 0.8960266723632813, 0.8961084594726563, 0.8961710205078125, 0.89615869140625, 0.8962139892578125, 0.895963134765625, 0.8957614135742188, 0.8957449951171875, 0.8958822631835938, 0.8960726928710937, 0.8957849731445312, 0.8957849731445312, 0.895599609375, 0.895626220703125, 0.8956661987304687, 0.8960829467773438, 0.8956692504882813]",tokens/s,1.09981480935664,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2832.146432,8389.132288,0.0,7742.685184,7007.0144,s,10,5.830655212402344,0.5830655212402344,0.0011864011955084602,0.5831084289550781,0.5846742309570313,0.5847881042480468,0.5848792028808594,"[0.5832706909179688, 0.5849019775390625, 0.5821006469726563, 0.5817232055664062, 0.582320068359375, 0.5811737670898437, 0.5829461669921875, 0.5834404907226562, 0.5841292724609375, 0.58464892578125]",tokens/s,439.05871754423805,kWh,6.867315049892591e-06,3.7630052303029514e-06,3.309072091699999e-05,4.372104119719553e-05,tokens/kWh,5855304.288051151,MB,2832.146432,8389.132288,0.0,7742.685184,7283.984384,s,10,342.646671875,34.264667187499995,0.008129823876006116,34.26347265625,34.275117968749996,34.278303125,34.28085125,"[34.26221484375, 34.28148828125, 34.27441015625, 34.256734375, 34.26859765625, 34.26473046875, 34.2578125, 34.267296875, 34.25966015625, 34.2537265625]",tokens/s,1.8386286857904421,kWh,0.0004044825890237167,0.00022169153025011835,0.0019209350367467965,0.002547109156020632,tokens/kWh,24733.92231781122,,s,629,347.34827374267576,0.5522230107196754,0.06935218607240677,0.5438034057617187,0.5445187377929688,0.5449166748046874,1.1266134716796876,"[0.54485400390625, 0.5438034057617187, 0.543246337890625, 0.5442498779296875, 0.5438187255859375, 0.544405517578125, 0.5433927612304688, 0.5439907836914063, 0.5433046875, 0.5437378540039063, 0.5438269653320312, 0.5440072021484375, 0.543795166015625, 0.544226318359375, 0.5433211059570312, 0.5441300659179688, 0.54451708984375, 0.5443307495117188, 0.5436416015625, 0.5442396240234375, 0.5436170043945312, 0.545089599609375, 0.543943603515625, 0.5440245971679688, 0.5435012817382813, 0.54389453125, 0.543447021484375, 0.544269287109375, 0.5434480590820312, 0.5442416381835937, 0.5436282958984375, 0.5436856079101563, 0.5433425903320312, 0.5441137084960938, 0.5437623901367188, 0.5438228759765625, 0.54314599609375, 0.543266845703125, 0.5434480590820312, 0.54394677734375, 0.543478759765625, 0.544342041015625, 0.5433477172851563, 0.5437071533203125, 0.5434163208007813, 0.5436990356445313, 0.5434121704101562, 0.5436589965820312, 0.5431746826171875, 0.5436907348632812, 0.5431572265625, 0.5437921142578125, 0.5443922119140625, 0.5438034057617187, 0.5433538818359375, 0.5437992553710937, 0.54312548828125, 0.5436221313476562, 0.5442550048828125, 0.5447546997070313, 0.5435003051757813, 0.544110595703125, 1.129606201171875, 0.5438555908203125, 0.5446000366210938, 0.5434962158203125, 0.5441279907226563, 0.5435760498046875, 0.5443614501953125, 0.5440512084960938, 0.5445314331054687, 0.544, 0.544395263671875, 0.5442437133789062, 0.5449338989257813, 0.5445150756835937, 0.5455985107421875, 0.5442651977539062, 0.5440481567382812, 0.5437183837890625, 0.5441484985351562, 0.5433558959960938, 0.544912353515625, 0.5442252807617187, 0.544775146484375, 0.5442785034179688, 0.5442467651367188, 0.5443768310546875, 0.544701416015625, 0.5438463745117188, 0.5441720581054688, 0.543763427734375, 0.5439447021484375, 0.5434183959960938, 0.5442539672851563, 0.543873046875, 0.5442447509765626, 0.5435484008789062, 0.544189453125, 0.5435852661132813, 0.5441228637695312, 0.5439774780273438, 0.5444413452148438, 0.5440932006835938, 0.5443020629882812, 0.5437276000976563, 0.5443696899414062, 0.5436436767578126, 0.5473484497070312, 0.5439129638671875, 0.5444976806640625, 0.54388427734375, 0.5441033935546875, 0.5436795043945313, 0.5439283447265625, 0.5437163696289062, 0.5439866943359375, 0.5433651123046875, 0.5438566284179688, 0.5434593505859375, 0.5443276977539062, 0.5436395263671875, 0.54405224609375, 0.5434828491210938, 0.5446829833984375, 1.1269549560546874, 0.5436589965820312, 0.5440983276367187, 0.5438801879882813, 0.5444874267578125, 0.544152587890625, 0.5444679565429688, 0.5439887084960937, 0.5441515502929688, 0.5438320922851563, 0.54458984375, 0.5438555908203125, 0.544068603515625, 0.5435648193359375, 0.5439713134765625, 0.5431132202148438, 0.5440512084960938, 0.54389453125, 0.5443778686523437, 0.543446044921875, 0.5438597412109375, 0.5438648071289063, 0.5439907836914063, 0.5458063354492187, 0.5444915161132813, 0.543572998046875, 0.5443184814453125, 0.5437357788085937, 0.543878173828125, 0.5435125732421875, 0.5439928588867188, 0.5434962158203125, 0.544068603515625, 0.5433712768554687, 0.5437890625, 0.5446963500976563, 0.5459650268554688, 0.5440255737304688, 0.5438883666992187, 0.5443931884765625, 0.5446287231445313, 0.5444362182617187, 0.5449840698242188, 0.5441444091796875, 0.5444034423828125, 0.543510498046875, 0.5440491333007812, 0.5454448852539062, 0.5443717041015625, 0.5432412109375, 0.5437112426757813, 0.5432013549804687, 0.5434971313476562, 0.5437726440429688, 0.5439866943359375, 0.5433477172851563, 0.5437194213867188, 0.5431173095703125, 0.5434132690429687, 0.5431746826171875, 0.544648193359375, 0.5442304077148438, 0.5442119750976563, 1.1263170166015626, 0.5434583129882813, 0.5442969360351563, 0.543373291015625, 0.5437562866210938, 0.543373291015625, 0.5437644653320313, 0.5435750122070313, 0.5440972900390625, 0.5432647705078125, 0.5439580078125, 0.5433252563476563, 0.5438104858398437, 0.5432924194335937, 0.5439324340820313, 0.5438750610351563, 0.5440696411132813, 0.543283203125, 0.543783935546875, 0.54326171875, 0.5440430297851563, 0.543267822265625, 0.5438822631835938, 0.54329443359375, 0.5437296752929688, 0.5433179931640625, 0.5443358764648437, 0.5437020263671875, 0.5440706787109375, 0.5436016845703125, 0.5435985717773437, 0.54325244140625, 0.5440829467773437, 0.5445253295898438, 0.5435402221679687, 0.54327294921875, 0.5437296752929688, 0.5436436767578126, 0.54380029296875, 0.5434234619140625, 0.5437747192382812, 0.5434429321289063, 0.5436068115234375, 0.543056884765625, 0.5473935546875, 0.54410546875, 0.5440604248046875, 0.5434931030273438, 0.5439365234375, 0.5433773803710937, 0.5438289794921874, 0.5436354370117188, 0.5443809204101563, 0.5432913818359375, 0.5437869873046876, 0.5433190307617187, 0.5438975830078125, 0.54358837890625, 0.543973388671875, 0.5433487548828125, 0.5437777709960937, 0.5435279541015625, 0.5438392333984375, 1.126728759765625, 0.5433446655273437, 0.544162841796875, 0.5432760620117187, 0.5441720581054688, 0.5443881225585937, 0.54490625, 0.54394677734375, 0.5442590942382812, 0.5439252319335938, 0.544701416015625, 0.5439826049804688, 0.5446226196289062, 0.5433385009765626, 0.5438064575195313, 0.5434009399414063, 0.5440983276367187, 0.5450567626953124, 0.5439652099609374, 0.5432319946289063, 0.5438095092773437, 0.5432586059570312, 0.5437614135742187, 0.543435791015625, 0.544484375, 0.5434317016601562, 0.5437009887695312, 0.5431490478515625, 0.5438167114257813, 0.543298583984375, 0.5435801391601562, 0.5435494995117187, 0.5441719970703125, 0.5433681640625, 0.5436928100585937, 0.5437245483398437, 0.5445037841796875, 0.5434193725585937, 0.5443307495117188, 0.543531005859375, 0.5440133056640625, 0.5436139526367187, 0.5439487915039063, 0.54411572265625, 0.5449932861328125, 0.5437962036132813, 0.5439794921875, 0.543466552734375, 0.5439456787109375, 0.543562744140625, 0.5440255737304688, 0.5437542114257813, 0.5439508666992188, 0.5435565795898437, 0.5450424194335938, 0.5437081298828125, 0.5441198120117188, 0.5460613403320312, 0.5440328369140625, 0.5436876220703125, 0.5439970092773437, 0.5443685913085937, 0.5439518432617187, 1.1256268310546875, 0.5433876342773437, 0.5439027099609375, 0.5437491455078125, 0.5440143432617187, 0.5435873413085938, 0.5440634765625, 0.5435217895507812, 0.544837646484375, 0.5441290283203125, 0.5442662353515625, 0.5436077880859375, 0.5440040893554687, 0.5432811279296875, 0.5437716674804688, 0.5439754028320313, 0.543921142578125, 0.5436795043945313, 0.5448601684570312, 0.543963134765625, 0.5444270629882813, 0.5435064086914062, 0.5439723510742187, 0.5439713134765625, 0.5442775268554687, 0.5437449951171875, 0.544595947265625, 0.5438966064453125, 0.5437860107421875, 0.5432279052734375, 0.5435350952148438, 0.5433651123046875, 0.5438668823242188, 0.5433426513671875, 0.5449727783203125, 0.5435340576171875, 0.5439334106445313, 0.54326171875, 0.5467739868164062, 0.5434767456054688, 0.5440921630859376, 0.5435054931640625, 0.544133056640625, 0.5435484008789062, 0.5440020751953125, 0.5437808837890625, 0.5440993041992187, 0.5436477661132812, 0.5438197631835937, 0.5434757690429688, 0.54377978515625, 0.5431531372070313, 0.5442908325195313, 0.5436088256835937, 0.5439539184570312, 0.54348291015625, 0.54430517578125, 0.5432504272460937, 0.5439170532226563, 0.5432053833007813, 0.5438597412109375, 0.5436006469726562, 0.543825927734375, 1.1284039306640625, 0.5435811767578125, 0.54392626953125, 0.5432975463867188, 0.5440030517578125, 0.5434276123046875, 0.5444700317382812, 0.5436016845703125, 0.5438228759765625, 0.5433507690429688, 0.5437460327148438, 0.5433599853515625, 0.5435975952148437, 0.5432709350585937, 0.54407373046875, 0.5433487548828125, 0.5436477661132812, 0.5439201049804687, 0.544205810546875, 0.5434214477539062, 0.543847412109375, 0.5432391967773438, 0.5437501220703125, 0.54501171875, 0.543856689453125, 0.5445160522460938, 0.544321533203125, 0.5439661865234375, 0.5444495849609375, 0.5435463256835937, 0.543562744140625, 0.5435699462890625, 0.5438617553710937, 0.5433108520507812, 0.5436497802734375, 0.5432432861328125, 0.5446655883789062, 0.5437214965820313, 0.5437655029296875, 0.543151123046875, 0.5436876831054688, 0.54325146484375, 0.5437511596679687, 0.5447864379882813, 0.5438218994140624, 0.5431879272460938, 0.543825927734375, 0.543182861328125, 0.5435903930664062, 0.54308349609375, 0.5436334228515625, 0.5431613159179688, 0.5436016845703125, 0.5431839599609375, 0.5440931396484375, 0.544869384765625, 0.543978515625, 0.5436641235351563, 0.5438402709960938, 0.54329443359375, 0.5435975952148437, 0.5432872924804687, 0.5449031982421875, 1.12785302734375, 0.5432197265625, 0.5443031005859374, 0.543805419921875, 0.5440696411132813, 0.543562744140625, 0.5437880249023438, 0.5434265747070313, 0.54437890625, 0.5443410034179688, 0.5438085327148438, 0.5433548583984374, 0.5437552490234375, 0.5441167602539062, 0.5438873901367187, 0.5431286010742188, 0.5460684814453125, 0.5433446655273437, 0.54401123046875, 0.5432525024414062, 0.5443113403320312, 0.5433211059570312, 0.5440449829101562, 0.54329443359375, 0.5437286376953125, 0.5433364868164062, 0.5437962036132813, 0.5443225708007813, 0.544879638671875, 0.543446044921875, 0.543447021484375, 0.5436170043945312, 0.5437798461914063, 0.543077392578125, 0.5447864379882813, 0.5435914306640625, 0.5437706298828126, 0.5436293334960938, 0.5449195556640625, 0.5433282470703125, 0.544373779296875, 0.543984619140625, 0.54413720703125, 0.5432801513671875, 0.5436323852539062, 0.5439119262695312, 0.5450363159179688, 0.543909912109375, 0.5446492309570312, 0.5441402587890625, 0.5445682983398438, 0.5445365600585937, 0.5437798461914063, 0.5442201538085938, 0.5439365234375, 0.5434225463867187, 0.5440900268554687, 0.5439365844726562, 0.5439845581054688, 0.5436405639648437, 0.5438587036132813, 0.5438146362304688, 0.5440071411132813, 1.1298836669921875, 0.5435586547851563, 0.5440675659179688, 0.5432913818359375, 0.5438289794921874, 0.5433005981445312, 0.5437767944335937, 0.544300048828125, 0.54409521484375, 0.543405029296875, 0.54519091796875, 0.5432658081054688, 0.5438023681640625, 0.5436221313476562, 0.5439180908203125, 0.5432565307617188, 0.5435023193359375, 0.5447024536132813, 0.54401953125, 0.5435442504882813, 0.5441484985351562, 0.5433333740234375, 0.5439180908203125, 0.5433262329101562, 0.5437921142578125, 0.5436846313476562, 0.5451581420898437, 0.54331494140625, 0.5439129638671875, 0.5431747436523438, 0.5436364135742188, 0.543467529296875, 0.5444925537109375, 0.5434685668945313, 0.5439539184570312, 0.543331298828125, 0.5441863403320313, 0.5441474609375, 0.5438177490234375, 0.5437880249023438, 0.5436282958984375, 0.5433865966796875, 0.5437327270507812, 0.5433026733398437, 0.5443440551757812, 0.5432484130859375, 0.5437440185546875, 0.5434849243164063, 0.5438463745117188, 0.5431654663085937, 0.5436897583007813, 0.5432340698242187, 0.5437265625, 0.5432975463867188, 0.543541259765625, 0.5436201171875, 0.5445703735351562, 0.543752197265625, 0.5440389404296875, 0.5439641723632812, 0.5437122802734375, 0.543457275390625, 0.5449390258789063, 1.1290306396484375, 0.5434531860351562, 0.5440993041992187, 0.5435391845703125, 0.5443635864257812, 0.54331591796875, 0.5436958618164063, 0.54315625, 0.54371533203125, 0.5437439575195312, 0.544100341796875, 0.5435381469726562, 0.5437388916015625, 0.5432217407226563, 0.5436754150390625, 0.54318896484375, 0.5440829467773437, 0.544564208984375, 0.543515625, 0.5431910400390625, 0.5438494873046875, 0.54326171875, 0.5435248413085938, 0.5433456420898437, 0.5437112426757813, 0.5435238647460937, 0.5456578369140626, 0.5435504760742188, 0.5442672729492187, 0.543446044921875, 0.54443212890625, 0.5432156372070313, 0.5440870971679688, 0.5433077392578125, 0.5435750122070313, 0.5440040893554687, 0.5438587036132813, 0.5430794067382813, 0.5436282958984375, 0.5436846313476562, 0.5436190795898438, 0.5430947875976563, 0.5436282958984375, 0.5432340698242187, 0.5435166625976563, 0.5432954711914062, 0.5439907836914063, 0.5439354858398437, 0.5442140502929688, 0.543595458984375, 0.544216064453125, 0.5438013305664062, 0.5442723999023438, 0.5444034423828125, 0.5439794921875, 0.54333642578125, 0.5436118774414063, 0.5431787719726563, 0.5439519653320313, 0.5432032470703125, 0.5435668334960938, 0.5430866088867188, 0.5438269653320312]",tokens/s,1.810862605483909,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1743.122432,22129.672192,0.0,21483.225088,20799.036928,s,10,27.909502197265624,2.7909502197265623,0.0030225550178342967,2.7914050292968753,2.79439267578125,2.794928149414062,2.7953565283203123,"[2.794273681640625, 2.795463623046875, 2.7873525390625, 2.789448486328125, 2.787631103515625, 2.78628466796875, 2.790373291015625, 2.792436767578125, 2.79304931640625, 2.793188720703125]",tokens/s,91.72503263962948,kWh,3.290233454770512e-05,1.803171549396211e-05,0.0001583880155991868,0.00020932206564085407,tokens/kWh,1222995.765956342,MB,1743.720448,22129.672192,0.0,21483.225088,20902.142976,s,10,1661.799328125,166.1799328125,0.024199823300055656,166.1772734375,166.2117125,166.21745,166.22204,"[166.17496875, 166.167359375, 166.14703125, 166.2104375, 166.2231875, 166.181296875, 166.179578125, 166.204078125, 166.159546875, 166.15184375]",tokens/s,0.37910714569299164,kWh,0.0019616428730719624,0.001075153456676562,0.009382404450362024,0.012419200780110549,tokens/kWh,5072.790199261051,,s,629,1684.2762666015624,2.677704716377683,0.33156351074934576,2.63752294921875,2.6395720703125,2.6405790039062498,5.428127246093751,"[2.63648046875, 2.637477783203125, 2.636610595703125, 2.63773388671875, 2.63674365234375, 2.6374482421875, 2.63684716796875, 2.637740966796875, 2.636241943359375, 2.6376162109375, 2.636966796875, 2.6365615234375, 2.639180908203125, 2.637003662109375, 2.637414306640625, 2.638074951171875, 2.638488525390625, 2.637603759765625, 2.6381865234375, 2.6367314453125, 2.63737646484375, 2.636360595703125, 2.63697314453125, 2.6366044921875, 2.6370693359375, 2.636568603515625, 2.636980224609375, 2.636353515625, 2.63706103515625, 2.636314697265625, 2.637646728515625, 2.63864208984375, 2.637822998046875, 2.63769091796875, 2.636580810546875, 2.636631103515625, 2.6366474609375, 2.636380126953125, 2.635778076171875, 2.639795166015625, 2.637285400390625, 2.636735595703125, 2.63680419921875, 2.636505126953125, 2.636515380859375, 2.637959228515625, 2.6366884765625, 2.638927001953125, 2.6396015625, 2.6393056640625, 2.640069580078125, 2.638898193359375, 2.6396181640625, 2.639129638671875, 2.6404248046875, 2.63950439453125, 2.6398291015625, 2.639203369140625, 2.639447021484375, 2.638824462890625, 2.638972900390625, 2.637701171875, 5.43399951171875, 2.63682861328125, 2.637526123046875, 2.637077392578125, 2.636991455078125, 2.636851318359375, 2.63690966796875, 2.636982177734375, 2.638088134765625, 2.63716455078125, 2.63853662109375, 2.638309326171875, 2.63889111328125, 2.64060107421875, 2.637055908203125, 2.63606884765625, 2.63802880859375, 2.637928466796875, 2.6390927734375, 2.6384384765625, 2.637506591796875, 2.63756298828125, 2.63665869140625, 2.636583984375, 2.63687060546875, 2.6370908203125, 2.637446044921875, 2.63802685546875, 2.6375556640625, 2.637516845703125, 2.63621728515625, 2.637552734375, 2.637285400390625, 2.637663330078125, 2.6371767578125, 2.637763671875, 2.63699560546875, 2.63773583984375, 2.637076416015625, 2.637444091796875, 2.636642333984375, 2.636317626953125, 2.63705078125, 2.636695556640625, 2.637413330078125, 2.63615478515625, 2.6434365234375, 2.637322265625, 2.638057373046875, 2.63786181640625, 2.638676025390625, 2.637177734375, 2.63716455078125, 2.637470703125, 2.63781689453125, 2.636496826171875, 2.63722802734375, 2.637602783203125, 2.638507080078125, 2.637663330078125, 2.636621826171875, 2.63727001953125, 2.6375107421875, 5.42925830078125, 2.638592041015625, 2.6379560546875, 2.637390869140625, 2.636768310546875, 2.63889501953125, 2.638235595703125, 2.638288818359375, 2.637874267578125, 2.638148681640625, 2.637728759765625, 2.637327392578125, 2.63690869140625, 2.637347900390625, 2.6369453125, 2.637433837890625, 2.63707958984375, 2.636822509765625, 2.637656982421875, 2.63617529296875, 2.636454833984375, 2.6358466796875, 2.636423095703125, 2.63856640625, 2.63604833984375, 2.63657470703125, 2.637000732421875, 2.637095947265625, 2.636664794921875, 2.6381435546875, 2.640372802734375, 2.638454833984375, 2.636282958984375, 2.636675048828125, 2.635629638671875, 2.636507080078125, 2.63680615234375, 2.63701611328125, 2.636884033203125, 2.63752294921875, 2.6366669921875, 2.63699462890625, 2.63780859375, 2.6374072265625, 2.636739501953125, 2.6374296875, 2.637625244140625, 2.639678466796875, 2.63699658203125, 2.63729052734375, 2.637656982421875, 2.636547119140625, 2.6364794921875, 2.636135498046875, 2.636613525390625, 2.636599365234375, 2.63747265625, 2.6357861328125, 2.63600439453125, 2.6359716796875, 2.636907470703125, 2.636198974609375, 2.639195068359375, 5.42974560546875, 2.637918212890625, 2.638065673828125, 2.6382080078125, 2.639459228515625, 2.638834716796875, 2.640337890625, 2.639097900390625, 2.638284912109375, 2.639035400390625, 2.636672119140625, 2.63703955078125, 2.63772265625, 2.639283203125, 2.6380830078125, 2.639520751953125, 2.63948291015625, 2.637526123046875, 2.638147705078125, 2.637602783203125, 2.63681640625, 2.63762841796875, 2.638529541015625, 2.63696484375, 2.63714599609375, 2.637178955078125, 2.6376796875, 2.63855810546875, 2.641314697265625, 2.637876220703125, 2.63853369140625, 2.63912548828125, 2.637345703125, 2.636560302734375, 2.636971923828125, 2.636669921875, 2.636619873046875, 2.6392392578125, 2.63902099609375, 2.638465087890625, 2.63775341796875, 2.638636962890625, 2.637673583984375, 2.6372373046875, 2.64167431640625, 2.6387353515625, 2.637962158203125, 2.639416259765625, 2.637681640625, 2.639564697265625, 2.6377236328125, 2.638668701171875, 2.638095458984375, 2.63796728515625, 2.637655029296875, 2.637210693359375, 2.63780859375, 2.638158935546875, 2.637846435546875, 2.63988232421875, 2.641334228515625, 2.636745849609375, 2.63619482421875, 5.42609521484375, 2.63847412109375, 2.64098095703125, 2.6405458984375, 2.64087451171875, 2.64148388671875, 2.64074755859375, 2.64026318359375, 2.640604248046875, 2.640962646484375, 2.638622802734375, 2.63895458984375, 2.636958740234375, 2.636030029296875, 2.636072021484375, 2.637117431640625, 2.636577880859375, 2.642241455078125, 2.64078857421875, 2.641033203125, 2.641383544921875, 2.64137841796875, 2.639701904296875, 2.640848876953125, 2.640034912109375, 2.6397451171875, 2.640384033203125, 2.637106201171875, 2.636432373046875, 2.636267578125, 2.637424560546875, 2.636958740234375, 2.63680712890625, 2.637274169921875, 2.638011474609375, 2.63804931640625, 2.63707861328125, 2.636590087890625, 2.63739794921875, 2.63689013671875, 2.63980029296875, 2.639002685546875, 2.638836669921875, 2.63720849609375, 2.637477783203125, 2.637814697265625, 2.637075439453125, 2.637104248046875, 2.638521240234375, 2.63874755859375, 2.63686962890625, 2.63682763671875, 2.637802490234375, 2.63747998046875, 2.6362255859375, 2.63657568359375, 2.636940185546875, 2.642130859375, 2.6372998046875, 2.63684912109375, 2.63803271484375, 2.63796533203125, 2.637073486328125, 5.4244033203125, 2.636745849609375, 2.6364814453125, 2.6371123046875, 2.637408203125, 2.636239990234375, 2.63697607421875, 2.638215087890625, 2.639066162109375, 2.63918994140625, 2.639048583984375, 2.63975634765625, 2.639510498046875, 2.63817626953125, 2.637822021484375, 2.6385029296875, 2.637655029296875, 2.637551513671875, 2.636876708984375, 2.63750048828125, 2.63714599609375, 2.63798779296875, 2.63727099609375, 2.637442138671875, 2.636712890625, 2.637854736328125, 2.638200927734375, 2.6394736328125, 2.63754541015625, 2.636971923828125, 2.637844482421875, 2.63727197265625, 2.6361865234375, 2.636801025390625, 2.637619140625, 2.637947998046875, 2.63809228515625, 2.637560791015625, 2.6417490234375, 2.63729052734375, 2.63712158203125, 2.63689111328125, 2.63716259765625, 2.635864990234375, 2.6373251953125, 2.63663623046875, 2.63742578125, 2.637765625, 2.63775341796875, 2.638487548828125, 2.638257080078125, 2.63924951171875, 2.639295654296875, 2.639500244140625, 2.63644970703125, 2.637189208984375, 2.637624267578125, 2.639413330078125, 2.639233154296875, 2.63777587890625, 2.637401123046875, 2.637765625, 2.63680712890625, 5.42891748046875, 2.637751220703125, 2.6369423828125, 2.637287353515625, 2.6384208984375, 2.6376162109375, 2.637705078125, 2.6376396484375, 2.636984375, 2.636735595703125, 2.637454345703125, 2.63906005859375, 2.641005615234375, 2.639869873046875, 2.638488525390625, 2.638159912109375, 2.638043212890625, 2.6367958984375, 2.6363330078125, 2.637013916015625, 2.63684716796875, 2.63693115234375, 2.636872802734375, 2.64043212890625, 2.636421142578125, 2.63861962890625, 2.63849169921875, 2.638710693359375, 2.637824951171875, 2.6379111328125, 2.636610595703125, 2.6375966796875, 2.637263916015625, 2.63708984375, 2.637641845703125, 2.637759521484375, 2.6372197265625, 2.63781591796875, 2.63773583984375, 2.638950439453125, 2.637347900390625, 2.63666796875, 2.638234619140625, 2.638180419921875, 2.637719482421875, 2.63872216796875, 2.639690673828125, 2.63703662109375, 2.636780517578125, 2.636748779296875, 2.637224853515625, 2.636233642578125, 2.636378173828125, 2.636863525390625, 2.63923193359375, 2.638593994140625, 2.638180419921875, 2.63895556640625, 2.638899169921875, 2.637263916015625, 2.63600537109375, 2.637727783203125, 2.63825, 5.43155078125, 2.637918212890625, 2.63883056640625, 2.63889208984375, 2.63790185546875, 2.637695068359375, 2.636559326171875, 2.636966796875, 2.63714208984375, 2.639033447265625, 2.6404404296875, 2.640530517578125, 2.64198046875, 2.640649169921875, 2.637720458984375, 2.637758544921875, 2.639655029296875, 2.63736328125, 2.63708251953125, 2.636771240234375, 2.636508056640625, 2.63598583984375, 2.6370068359375, 2.636777587890625, 2.636590087890625, 2.63606884765625, 2.63714111328125, 2.63913671875, 2.637537353515625, 2.63705712890625, 2.636992431640625, 2.638615478515625, 2.636080078125, 2.637097900390625, 2.636854248046875, 2.640509033203125, 2.63925146484375, 2.6389052734375, 2.6383544921875, 2.63872705078125, 2.638085205078125, 2.6382275390625, 2.637551513671875, 2.637571044921875, 2.637200439453125, 2.63806982421875, 2.637642822265625, 2.637718505859375, 2.6373642578125, 2.6366044921875, 2.638297119140625, 2.63918798828125, 2.63943896484375, 2.6392646484375, 2.639478759765625, 2.637856689453125, 2.638085205078125, 2.638904296875, 2.6383564453125, 2.639803466796875, 2.640150634765625, 2.637856689453125, 2.640256103515625, 5.4306796875, 2.637127685546875, 2.6382705078125, 2.640280517578125, 2.639151123046875, 2.639075439453125, 2.63817529296875, 2.638035888671875, 2.638784423828125, 2.639287353515625, 2.637106201171875, 2.637173828125, 2.6368388671875, 2.6376591796875, 2.635720703125, 2.63617333984375, 2.636442626953125, 2.63904150390625, 2.6368818359375, 2.635509765625, 2.636674072265625, 2.6366484375, 2.637701171875, 2.638306396484375, 2.64076806640625, 2.63686865234375, 2.6378896484375, 2.639107177734375, 2.6401259765625, 2.6394111328125, 2.63695166015625, 2.63650927734375, 2.636439453125, 2.63674267578125, 2.637043701171875, 2.637275146484375, 2.637382568359375, 2.63693115234375, 2.636669921875, 2.637486083984375, 2.636729248046875, 2.636916748046875, 2.6372802734375, 2.638066650390625, 2.637075439453125, 2.637048828125, 2.636240966796875, 2.637787109375, 2.636506103515625, 2.636660888671875, 2.63712548828125, 2.637421630859375, 2.637157470703125, 2.636517333984375, 2.637552734375, 2.636777587890625, 2.63666064453125, 2.6360966796875, 2.637487060546875, 2.636655517578125, 2.6372158203125, 2.637021240234375, 2.63826025390625, 5.43210595703125, 2.637992919921875, 2.6383369140625, 2.636921875, 2.637557861328125, 2.637305908203125, 2.63912646484375, 2.63752099609375, 2.6370693359375, 2.638551025390625, 2.6377646484375, 2.638067626953125, 2.637068359375, 2.63798876953125, 2.6374892578125, 2.63644775390625, 2.636535888671875, 2.637727783203125, 2.63798876953125, 2.637382568359375, 2.637177734375, 2.637806640625, 2.63628173828125, 2.6376171875, 2.63735302734375, 2.63743896484375, 2.6397236328125, 2.637740966796875, 2.637970458984375, 2.6363720703125, 2.636199951171875, 2.6363935546875, 2.638331787109375, 2.636755859375, 2.636908447265625, 2.636675048828125, 2.63809033203125, 2.636525634765625, 2.63657373046875, 2.63596435546875, 2.63809130859375, 2.6373671875, 2.636396484375, 2.637010009765625, 2.637177734375, 2.636538818359375, 2.637276123046875, 2.638035888671875, 2.636796875, 2.636739501953125, 2.636539794921875, 2.63828076171875, 2.63714404296875, 2.637462646484375, 2.637360107421875, 2.638277587890625, 2.636430419921875, 2.6370283203125, 2.6365234375, 2.638316650390625, 2.636391357421875, 2.636579833984375, 2.636466064453125]",tokens/s,0.3734541728532224,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9ca-1d834c9f323b0b817d31ef84;ee948139-9732-450c-a061-6b0b9ca9c9c7) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.0,,,,MB,1723.056128,9941.024768,0.0,9294.577664,8910.495232,s,10,10.523069458007814,1.0523069458007812,0.0008028232056597039,1.0523904418945311,1.0530852783203124,1.05345859375,1.05375724609375,"[1.0527669677734375, 1.0538319091796875, 1.05145703125, 1.0517464599609374, 1.052252197265625, 1.0511856689453125, 1.0525286865234376, 1.0530023193359375, 1.0528663330078125, 1.051431884765625]",tokens/s,243.27502638043498,kWh,1.2425229681862726e-05,6.805541535795782e-06,6.044174279779546e-05,7.967251401545396e-05,tokens/kWh,3213153.283331113,MB,1723.056128,9941.024768,0.0,9294.577664,9220.876288,s,10,626.3534921875,62.63534921875,0.004239119732727424,62.635099609375004,62.63956796875,62.641526171875,62.643092734375,"[62.6319921875, 62.6329375, 62.6391328125, 62.643484375, 62.63139453125, 62.6282421875, 62.63577734375, 62.634421875, 62.63843359375, 62.63767578125]",tokens/s,1.0058218048721415,kWh,0.0007394197557369868,0.0004052695339850652,0.003591932262432407,0.004736621552154459,tokens/kWh,13300.619292952455,,s,629,634.8295839233394,1.009268018956025,0.1250286498200473,0.9942149047851563,0.9945757568359375,0.99473427734375,2.0461909814453128,"[0.9937213745117187, 0.9941636962890625, 0.9939281616210938, 0.9936691284179687, 0.9940469970703125, 0.9936640014648438, 0.99386572265625, 0.9940029296875, 0.9938134765625, 0.9938718872070312, 0.9940316162109375, 0.9940879516601563, 0.9947422485351562, 0.9942005615234375, 0.994029541015625, 0.993860595703125, 0.9938984985351562, 0.994188232421875, 0.9941381225585938, 0.9940950927734375, 0.9939527587890625, 0.9939947509765625, 0.99481396484375, 0.994408447265625, 0.994361328125, 0.9940614013671875, 0.99426806640625, 0.9940326538085937, 0.9939323120117187, 0.993944580078125, 0.9940070190429687, 0.994107421875, 0.994229248046875, 0.9940459594726563, 0.9943674926757813, 0.9942251586914063, 0.99422412109375, 0.9942691650390625, 0.9943838500976563, 0.994155517578125, 0.994197509765625, 0.9944873046875, 0.9943777465820313, 0.994418701171875, 0.994387939453125, 0.9942783813476562, 0.9941801147460938, 0.9943582763671875, 0.9945180053710938, 0.9940408325195312, 0.9938134765625, 0.9940326538085937, 0.9944760131835938, 0.9945057373046875, 0.9942804565429687, 0.99428662109375, 0.9941749877929688, 0.994018310546875, 0.9942005615234375, 0.994018310546875, 0.9942138671875, 0.99439306640625, 2.048400390625, 0.9937254638671875, 0.9939384155273437, 0.993649658203125, 0.993681396484375, 0.993491943359375, 0.9935380249023438, 0.993723388671875, 0.9939885864257813, 0.993955810546875, 0.9941115112304687, 0.9941227416992188, 0.9936732177734375, 0.9938882446289062, 0.993818603515625, 0.9939773559570313, 0.9937152099609375, 0.9938954467773438, 0.994029541015625, 0.9940746459960937, 0.9940223999023438, 0.9943040161132812, 0.9939876098632813, 0.9940018920898438, 0.993870849609375, 0.9941780395507812, 0.9940029296875, 0.9939568481445312, 0.9940223999023438, 0.9943408813476563, 0.9943121948242187, 0.9944913940429687, 0.994107421875, 0.9944105224609375, 0.9942425537109375, 0.9945681762695312, 0.9944022827148438, 0.9939404907226562, 0.994323486328125, 0.9944729614257812, 0.9945559692382813, 0.9941841430664062, 0.9939691772460938, 0.9944022827148438, 0.9942015991210937, 0.9941749877929688, 0.996316162109375, 0.994234375, 0.9943941040039063, 0.9945528564453125, 0.9943756713867188, 0.9942384643554687, 0.99406640625, 0.9946685180664062, 0.9946071166992188, 0.9947156372070313, 0.9943460083007812, 0.9942159423828125, 0.9944033203125, 0.994250732421875, 0.994207763671875, 0.9942732543945313, 0.9941258544921875, 2.0457523193359375, 0.9937326049804688, 0.993923095703125, 0.9941954345703125, 0.9940490112304687, 0.9940633544921875, 0.9942056884765625, 0.9942149047851563, 0.9938370361328125, 0.9937274780273437, 0.9940162353515625, 0.9940469970703125, 0.99371826171875, 0.9940367431640625, 0.9938534545898438, 0.9942271728515625, 0.9941575927734375, 0.9941626586914063, 0.9939773559570313, 0.9939947509765625, 0.993728515625, 0.993818603515625, 0.99376025390625, 0.9947381591796876, 0.9940807495117188, 0.9942988891601563, 0.9943746337890625, 0.9940838623046875, 0.9939465942382812, 0.9941636962890625, 0.9940090942382812, 0.99420263671875, 0.9942149047851563, 0.9942138671875, 0.9943306274414062, 0.9944053955078125, 0.9946675415039062, 0.9946388549804688, 0.9943070678710938, 0.9942528076171875, 0.9945692138671876, 0.9944391479492187, 0.9944893188476562, 0.9945906982421875, 0.9944791259765625, 0.99445556640625, 0.9944688720703125, 0.9956966552734375, 0.9946471557617188, 0.9943448486328125, 0.99428662109375, 0.9943275756835938, 0.9942988891601563, 0.9942845458984375, 0.99435107421875, 0.9946736450195313, 0.9949030151367187, 0.99470849609375, 0.9946101684570312, 0.99445556640625, 0.9944708862304688, 0.9945303344726563, 0.9944258422851563, 2.046361572265625, 0.9943121948242187, 0.993902587890625, 0.9938494262695312, 0.9939803466796875, 0.993817626953125, 0.99403369140625, 0.9938892822265625, 0.993890380859375, 0.9941790161132813, 0.9943521118164063, 0.994566162109375, 0.9950842895507812, 0.9945149536132812, 0.9943418579101563, 0.994255859375, 0.9942322998046875, 0.9944514770507813, 0.9944678344726563, 0.9941493530273438, 0.99386572265625, 0.994466796875, 0.994135009765625, 0.9944791259765625, 0.993997802734375, 0.9939465942382812, 0.993850341796875, 0.994050048828125, 0.994281494140625, 0.9941104736328125, 0.9941370849609374, 0.99445556640625, 0.9942169799804688, 0.9945046997070313, 0.9944360961914063, 0.994355224609375, 0.9945057373046875, 0.9943777465820313, 0.9944063720703125, 0.9943849487304688, 0.9944248046875, 0.9944248046875, 0.9944043579101562, 0.9945538330078125, 0.9957181396484375, 0.9942650756835938, 0.9944627075195313, 0.9945119018554688, 0.9939578857421875, 0.9942630615234375, 0.9940695190429687, 0.994323486328125, 0.9945487060546875, 0.9944248046875, 0.9942774047851562, 0.9945712890625, 0.9945681762695312, 0.994572265625, 0.9946071166992188, 0.9948231811523438, 0.994545654296875, 0.9945897216796875, 0.994703369140625, 2.0469268798828124, 0.9937100830078125, 0.99370703125, 0.9939507446289062, 0.9940214233398438, 0.993982421875, 0.9939998779296875, 0.994187255859375, 0.9942363891601562, 0.994165771484375, 0.9937623291015625, 0.9936098022460937, 0.9937017822265625, 0.99392919921875, 0.9935421142578125, 0.9941094360351562, 0.9936434936523437, 0.9947597045898438, 0.9939476318359375, 0.9940572509765625, 0.993876953125, 0.9940459594726563, 0.9939384155273437, 0.9936783447265625, 0.9934899291992187, 0.9937745971679688, 0.9935821533203125, 0.9939537353515625, 0.993924072265625, 0.9939793701171875, 0.9942067260742188, 0.9939005737304687, 0.9939332885742187, 0.9940510864257812, 0.9938903198242187, 0.9940438842773438, 0.9939251098632812, 0.9943121948242187, 0.9945211181640625, 0.9941473388671875, 0.9944022827148438, 0.9940101318359374, 0.9942968139648437, 0.9944330444335937, 0.994639892578125, 0.9945938110351562, 0.9943070678710938, 0.99437158203125, 0.9943593139648438, 0.9942528076171875, 0.9945589599609375, 0.994482177734375, 0.9939876098632813, 0.9945620727539063, 0.9944330444335937, 0.9943951416015625, 0.9943367919921875, 0.9962434692382812, 0.9943162841796875, 0.994186279296875, 0.9945906372070312, 0.9944248657226562, 0.9944125366210937, 2.04558642578125, 0.993912841796875, 0.9936803588867188, 0.9937315673828125, 0.9937705688476562, 0.9936362915039062, 0.9941083984375, 0.9939169311523437, 0.993712158203125, 0.9938135375976562, 0.9937837524414063, 0.9940541381835938, 0.9938093872070313, 0.9940275268554688, 0.9939824829101562, 0.9938954467773438, 0.9938401489257812, 0.9937479858398437, 0.9936363525390625, 0.9938779907226563, 0.9936895751953125, 0.9938565063476562, 0.9942282104492187, 0.9945733032226562, 0.9943101196289063, 0.994271240234375, 0.9942517700195312, 0.9937531127929687, 0.9937540893554687, 0.9940654296875, 0.9936906127929688, 0.9937561645507812, 0.9941299438476563, 0.994255859375, 0.9943173217773438, 0.99435107421875, 0.9943480224609375, 0.9938595581054688, 0.9955552978515625, 0.9940582275390625, 0.994145263671875, 0.9937991943359376, 0.9941483764648438, 0.994471923828125, 0.9942374267578125, 0.9941565551757813, 0.994735107421875, 0.99420263671875, 0.9940562133789063, 0.994302978515625, 0.9941094360351562, 0.9940899658203125, 0.9943009033203125, 0.9945149536132812, 0.9943971557617187, 0.9942517700195312, 0.9943009033203125, 0.9943582763671875, 0.9941473388671875, 0.9944699096679688, 0.9942067260742188, 0.9939999389648437, 0.994462646484375, 2.0474276123046873, 0.9942875366210937, 0.9945169677734375, 0.9941104736328125, 0.99388623046875, 0.99411865234375, 0.9940193481445313, 0.9938565063476562, 0.9939681396484376, 0.9939568481445312, 0.9942251586914063, 0.9939885864257813, 0.9937736206054687, 0.9940674438476562, 0.9934888916015625, 0.9936240844726563, 0.9936025390625, 0.9939957885742188, 0.9941309204101563, 0.9939783935546875, 0.9941585693359375, 0.9935872192382813, 0.9937633056640625, 0.99496142578125, 0.9941268310546875, 0.9944862670898438, 0.993723388671875, 0.9938739013671875, 0.9938882446289062, 0.9944063720703125, 0.9942282104492187, 0.9941043090820313, 0.9945272216796875, 0.9945446166992188, 0.9939815063476563, 0.9940366821289063, 0.9942149047851563, 0.9942435913085937, 0.9943531494140625, 0.9939906616210937, 0.994450439453125, 0.9943357543945313, 0.9943040161132812, 0.9945149536132812, 0.9941370849609374, 0.9942384643554687, 0.99449853515625, 0.9946624145507813, 0.9942548217773437, 0.9945487060546875, 0.9945855712890626, 0.9943418579101563, 0.9944760131835938, 0.9945589599609375, 0.9944545288085938, 0.9943593139648438, 0.9944330444335937, 0.9942958374023437, 0.9943746337890625, 0.9943889770507812, 0.9947330322265625, 0.9947310180664063, 0.9944688720703125, 2.047277099609375, 0.9937192993164062, 0.9936015625, 0.993828857421875, 0.9938534545898438, 0.9940787353515625, 0.9938462524414062, 0.9938677978515625, 0.9937264404296875, 0.9940316162109375, 0.9937715454101562, 0.9938134765625, 0.9937797241210937, 0.9938841552734375, 0.9937469482421875, 0.9938042602539062, 0.9948897094726562, 0.99428759765625, 0.99430810546875, 0.9943807983398437, 0.9940869140625, 0.994150390625, 0.9942937622070313, 0.994207763671875, 0.9942947998046875, 0.9942835083007813, 0.9943101196289063, 0.994366455078125, 0.9940070190429687, 0.9944422607421874, 0.9940172729492187, 0.9939844970703124, 0.9939251098632812, 0.9939476318359375, 0.9940101318359374, 0.9939158935546875, 0.9941954345703125, 0.9944647827148437, 0.994376708984375, 0.9946132202148438, 0.9943787231445312, 0.9942466430664062, 0.994313232421875, 0.9944729614257812, 0.9945261840820312, 0.9942916870117188, 0.9944391479492187, 0.9942671508789063, 0.994545654296875, 0.994798583984375, 0.9944873046875, 0.9943193359375, 0.994164794921875, 0.9943551635742187, 0.9943255004882813, 0.99406640625, 0.9942916870117188, 0.994587646484375, 0.9942916870117188, 0.9949767456054688, 0.9941903076171875, 0.9943889770507812, 0.9943162841796875, 2.047072265625, 0.9936138305664063, 0.9937510375976563, 0.9939783935546875, 0.9938709106445313, 0.9944656982421874, 0.993997802734375, 0.99403369140625, 0.993713134765625, 0.9936732177734375, 0.9935554809570313, 0.9939323120117187, 0.9941073608398437, 0.994260986328125, 0.9937418212890625, 0.9940961303710938, 0.9939844970703124, 0.9946102294921875, 0.993966064453125, 0.9940264892578125, 0.9938206787109375, 0.9939199829101563, 0.9943121948242187, 0.9942937622070313, 0.9941575927734375, 0.9946552124023438, 0.9939906616210937, 0.9938114624023437, 0.9936793823242187, 0.9941647338867188, 0.9941442260742187, 0.994471923828125, 0.9945323486328125, 0.99468798828125, 0.9943142700195312, 0.994466796875, 0.9942467041015625, 0.9940725708007813, 0.9942282104492187, 0.994255859375, 0.9944166259765626, 0.9944391479492187, 0.9945436401367187, 0.9948764038085938, 0.99462451171875, 0.9944658203125, 0.994460693359375, 0.994155517578125, 0.9942097778320312, 0.9942763671875, 0.9942702026367187, 0.994260986328125, 0.9944309692382812, 0.9945098266601563, 0.9942968139648437, 0.9946491088867188, 0.9945579223632812, 0.99460302734375, 0.994281494140625, 0.9945108642578125, 0.99460302734375, 0.9943797607421875, 0.9946961669921875, 2.04813623046875, 0.99420263671875, 0.99418115234375, 0.9938411254882813, 0.9938524169921875, 0.9939671020507812, 0.9941248168945312, 0.994044921875, 0.9940910034179687, 0.9938984985351562, 0.9937213745117187, 0.9939066772460937, 0.9943142700195312, 0.9939824829101562, 0.9937520751953125, 0.9941227416992188, 0.9939323120117187, 0.9941923828125, 0.9938759765625, 0.9943316650390625, 0.994208740234375, 0.99416064453125, 0.9944412231445312, 0.99399169921875, 0.9939773559570313, 0.9942149658203125, 0.9955389404296875, 0.9943828735351562, 0.9939456176757813, 0.9944596557617188, 0.9941442260742187, 0.9940377807617188, 0.9945589599609375, 0.994135009765625, 0.9938985595703125, 0.9942609252929687, 0.994212890625, 0.9942732543945313, 0.9943726196289062, 0.994460693359375, 0.9944248657226562, 0.9942844848632812, 0.9949808349609375, 0.9943121948242187, 0.9940910034179687, 0.994150390625, 0.9942640380859376, 0.9941688232421875, 0.9940387573242188, 0.9946736450195313, 0.994753662109375, 0.99428857421875, 0.994555908203125, 0.9943367919921875, 0.9942702026367187, 0.994302978515625, 0.994234375, 0.9944371337890625, 0.9942466430664062, 0.9943602905273438, 0.9947944946289062, 0.99437158203125, 0.9947586669921875]",tokens/s,0.9908170884423626,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1273.409536,921.174016,0.0,274.726912,220.646912,s,10,0.326115104675293,0.032611510467529296,0.0012484582231843272,0.03218409538269043,0.032784641265869136,0.034564048767089836,0.035987574768066403,"[0.036343456268310544, 0.03226867294311524, 0.03213779067993164, 0.032141822814941406, 0.03203395080566406, 0.03214742279052735, 0.032220767974853515, 0.03238921737670898, 0.03208832168579102, 0.032343681335449216]",tokens/s,7849.989047728858,kWh,3.834838485734976e-07,2.1012961289151069e-07,8.25654520552228e-07,1.419267982017236e-06,tokens/kWh,180374674.2994524,MB,1273.409536,921.174016,0.0,274.726912,250.722304,s,10,20.085640136718748,2.008564013671875,0.006065788108395492,2.006720520019531,2.0181213745117184,2.0185785705566404,2.018944327392578,"[2.018019775390625, 2.006443115234375, 2.0069979248046876, 2.0103148193359375, 2.0040799560546874, 2.00180322265625, 2.00428125, 2.0133179931640623, 2.0013463134765623, 2.0190357666015624]",tokens/s,31.36569189290069,kWh,2.3731767884623614e-05,1.3005590929520364e-05,4.874369069043466e-05,8.548104950457861e-05,tokens/kWh,737005.457526882,,s,629,20.34066634368894,0.03233810229521298,0.0038819833531807996,0.03179007911682129,0.0323544822692871,0.032856268310546874,0.06427897552490235,"[0.03209625625610352, 0.031882240295410154, 0.031893503189086916, 0.032045055389404296, 0.03220787048339844, 0.031922176361083986, 0.031958015441894534, 0.03196518325805664, 0.031784959793090824, 0.03223756790161133, 0.031974399566650394, 0.031955968856811526, 0.03202969741821289, 0.03302809524536133, 0.03192835235595703, 0.031831008911132816, 0.03254272079467774, 0.033226783752441404, 0.032270305633544924, 0.032140289306640625, 0.031940607070922854, 0.032451583862304685, 0.03203788757324219, 0.03188531112670898, 0.032231422424316404, 0.031936511993408204, 0.03298099136352539, 0.03213516616821289, 0.03167948722839355, 0.031526912689208986, 0.031693824768066405, 0.03172966384887695, 0.03170918464660644, 0.03180953598022461, 0.03179929542541504, 0.031628288269042966, 0.031508480072021484, 0.03155660820007324, 0.03158937644958496, 0.03198259162902832, 0.03151974487304687, 0.03146342468261719, 0.031752191543579104, 0.03203481674194336, 0.03182796859741211, 0.03228160095214844, 0.03201331329345703, 0.03220377731323242, 0.03181056022644043, 0.031865856170654294, 0.031473663330078124, 0.031682559967041016, 0.031628288269042966, 0.03277414321899414, 0.03190790367126465, 0.0320777587890625, 0.03253350448608398, 0.032473087310791016, 0.032159744262695314, 0.03166924858093262, 0.03159654426574707, 0.031492095947265625, 0.06451507568359376, 0.032, 0.03342745590209961, 0.032282623291015625, 0.03176243209838867, 0.0318525447845459, 0.03180646324157715, 0.03143065643310547, 0.03154841613769531, 0.03168870353698731, 0.031514623641967776, 0.03150643157958984, 0.03214438247680664, 0.033160190582275394, 0.03213312149047851, 0.03156684875488281, 0.031958015441894534, 0.03179110336303711, 0.03184332847595215, 0.03142963218688965, 0.03191705513000488, 0.031768575668334964, 0.03164057540893555, 0.0317573127746582, 0.03179827117919922, 0.031731712341308595, 0.03301375961303711, 0.031920127868652344, 0.03172352027893066, 0.03289395141601562, 0.031927295684814457, 0.03162521553039551, 0.03168358421325684, 0.03179110336303711, 0.031666175842285156, 0.03177369689941406, 0.031678464889526366, 0.03179315185546875, 0.031765504837036135, 0.031699968338012696, 0.03142860794067383, 0.03162419128417969, 0.031615999221801756, 0.03140505599975586, 0.03147776031494141, 0.031458303451538085, 0.03143987274169922, 0.03174399948120117, 0.0317255687713623, 0.03142758369445801, 0.03156991958618164, 0.031783935546875, 0.03183616065979004, 0.0317890567779541, 0.03189248085021973, 0.03191910362243652, 0.031932416915893554, 0.032277503967285154, 0.032382976531982424, 0.0317706241607666, 0.03181260871887207, 0.031749120712280275, 0.03174502372741699, 0.06404198455810547, 0.03170611190795898, 0.031704063415527346, 0.031628288269042966, 0.03138764762878418, 0.031698944091796875, 0.03154022407531738, 0.03150540733337402, 0.03196416091918945, 0.03177881622314453, 0.032020481109619144, 0.03214233779907227, 0.03199692726135254, 0.031780864715576174, 0.03226009750366211, 0.032115711212158206, 0.03199180793762207, 0.03156787109375, 0.031453184127807614, 0.03144908714294434, 0.0314839038848877, 0.03146649551391602, 0.031458303451538085, 0.03172147178649903, 0.031893503189086916, 0.0328007698059082, 0.03156787109375, 0.031731712341308595, 0.031849472045898435, 0.03165388870239258, 0.03178700828552246, 0.03168259239196777, 0.03179311943054199, 0.031783935546875, 0.03212083053588867, 0.03178188705444336, 0.03179724884033203, 0.033549312591552735, 0.03284889602661133, 0.031834112167358396, 0.03212492752075195, 0.03198259162902832, 0.03174297523498535, 0.031731712341308595, 0.031753215789794925, 0.03182387161254883, 0.031473663330078124, 0.03165388870239258, 0.03157196807861328, 0.031453184127807614, 0.031734783172607424, 0.03165695953369141, 0.03180953598022461, 0.03184537506103516, 0.03207372665405273, 0.032285694122314454, 0.032556095123291016, 0.0321484146118164, 0.0324956169128418, 0.03182592010498047, 0.03148287963867188, 0.03179110336303711, 0.03181056022644043, 0.06443007659912109, 0.03183001518249512, 0.03196928024291992, 0.03182694435119629, 0.03170099258422852, 0.03181056022644043, 0.0315043830871582, 0.03177881622314453, 0.03177574348449707, 0.03191910362243652, 0.0321710090637207, 0.03204403305053711, 0.03181875228881836, 0.031703039169311525, 0.03174604797363281, 0.03175526428222656, 0.031764480590820314, 0.031731712341308595, 0.03183923149108887, 0.03340390396118164, 0.032922622680664065, 0.032161792755126956, 0.03223756790161133, 0.031734783172607424, 0.03291340637207031, 0.032074752807617186, 0.03202150344848633, 0.031716352462768556, 0.03155046463012695, 0.031692800521850584, 0.03226828765869141, 0.03171737670898438, 0.03140505599975586, 0.03134976005554199, 0.03197952079772949, 0.03200307083129883, 0.031696895599365234, 0.03169484710693359, 0.03176755142211914, 0.0321710090637207, 0.03153919982910156, 0.03143987274169922, 0.03146444892883301, 0.03160371208190918, 0.03245568084716797, 0.0314337272644043, 0.03163545608520508, 0.03231846237182617, 0.03222323226928711, 0.031942655563354495, 0.03174399948120117, 0.03162112045288086, 0.03139788818359375, 0.032302078247070314, 0.03218841552734375, 0.03275161743164062, 0.0322529296875, 0.03198361587524414, 0.03178803253173828, 0.031665151596069335, 0.03129241561889649, 0.03186073684692383, 0.031955968856811526, 0.06424063873291015, 0.03157811164855957, 0.03165695953369141, 0.03165081596374512, 0.03146854400634766, 0.031693824768066405, 0.03204095840454101, 0.03246182250976563, 0.03183103942871094, 0.03218636703491211, 0.03204095840454101, 0.031784959793090824, 0.03218431854248047, 0.03177779197692871, 0.031698944091796875, 0.03156684875488281, 0.031731712341308595, 0.03239424133300781, 0.03142758369445801, 0.03172966384887695, 0.03203379058837891, 0.03207167816162109, 0.03187302398681641, 0.033006591796875, 0.03175526428222656, 0.0316180477142334, 0.031959039688110355, 0.031514623641967776, 0.031884288787841795, 0.03161497688293457, 0.031440895080566404, 0.031442943572998046, 0.03174092864990234, 0.03172966384887695, 0.03197747230529785, 0.03176755142211914, 0.03205222320556641, 0.03165695953369141, 0.03194777679443359, 0.03171327972412109, 0.031562751770019534, 0.03168972778320313, 0.031731712341308595, 0.03172966384887695, 0.03186483192443847, 0.032143360137939454, 0.03146240043640137, 0.03147776031494141, 0.03138355255126953, 0.0313753604888916, 0.03180646324157715, 0.0317255687713623, 0.03186380767822266, 0.03160678482055664, 0.032494590759277346, 0.03222528076171875, 0.032045055389404296, 0.03183206367492676, 0.03183206367492676, 0.03180851173400879, 0.03134464073181152, 0.031470592498779294, 0.031731712341308595, 0.06429388427734375, 0.03167436790466309, 0.031923200607299806, 0.03159756851196289, 0.03151667213439941, 0.031647743225097655, 0.031802400588989255, 0.03235427093505859, 0.03198259162902832, 0.03172147178649903, 0.031888383865356446, 0.03179417610168457, 0.031507455825805664, 0.03172761535644531, 0.031579135894775394, 0.031692800521850584, 0.0315156478881836, 0.031440895080566404, 0.03173990440368652, 0.03155356788635254, 0.031624160766601565, 0.03153510475158691, 0.033051647186279294, 0.03177369689941406, 0.031783935546875, 0.03135897636413574, 0.03179007911682129, 0.031714303970336914, 0.031848447799682614, 0.031476736068725586, 0.03139993667602539, 0.03133440017700195, 0.03155763244628906, 0.03146956825256347, 0.03141024017333984, 0.031344575881958006, 0.03177574348449707, 0.03194675254821777, 0.03219046401977539, 0.03170918464660644, 0.031678464889526366, 0.03172249603271484, 0.03172147178649903, 0.031513599395751955, 0.03161497688293457, 0.03177779197692871, 0.03177574348449707, 0.03137228775024414, 0.03147369575500488, 0.031712223052978515, 0.03170918464660644, 0.03205222320556641, 0.03178803253173828, 0.03221299362182617, 0.03220889663696289, 0.03131391906738281, 0.03146956825256347, 0.03166720008850098, 0.031661056518554685, 0.03207376098632812, 0.03320111846923828, 0.032717823028564456, 0.032519168853759765, 0.06434508514404297, 0.031736831665039066, 0.031752191543579104, 0.031851520538330076, 0.03142963218688965, 0.03157196807861328, 0.032121856689453124, 0.03311513519287109, 0.03222534561157227, 0.03178079986572266, 0.03179827117919922, 0.03183206367492676, 0.03181465530395508, 0.03183206367492676, 0.03177267265319824, 0.03184025573730469, 0.031542272567749025, 0.03150540733337402, 0.03178598403930664, 0.03185663986206055, 0.03176038360595703, 0.03312639999389649, 0.03152179145812988, 0.032102401733398435, 0.03188019180297851, 0.03193036842346191, 0.031324159622192385, 0.031851520538330076, 0.03158016014099121, 0.031389696121215824, 0.03189555168151856, 0.03170099258422852, 0.03173785591125488, 0.031285247802734374, 0.03177068710327149, 0.031356864929199216, 0.03158527946472168, 0.031526912689208986, 0.03160985565185547, 0.03185868835449219, 0.031848447799682614, 0.031437824249267575, 0.0317388801574707, 0.03174297523498535, 0.0319866886138916, 0.03175014305114746, 0.031922176361083986, 0.031817728042602536, 0.03180544090270996, 0.03139072036743164, 0.03181260871887207, 0.03197952079772949, 0.03224166488647461, 0.03209011077880859, 0.03164057540893555, 0.03165184020996094, 0.031438848495483396, 0.03207372665405273, 0.03194675254821777, 0.03155148887634277, 0.03187302398681641, 0.03263488006591797, 0.03184025573730469, 0.06429801940917969, 0.03187299156188965, 0.03203276824951172, 0.032094207763671875, 0.03197849655151367, 0.031958015441894534, 0.031926271438598636, 0.031834112167358396, 0.03189452743530274, 0.03218841552734375, 0.032353279113769534, 0.0318791675567627, 0.03179315185546875, 0.031887359619140625, 0.03185868835449219, 0.03156480026245117, 0.03182796859741211, 0.0322426872253418, 0.032043006896972655, 0.03181670379638672, 0.03299020767211914, 0.032031742095947266, 0.03186892890930176, 0.032304126739501955, 0.03237171173095703, 0.0319866886138916, 0.031454208374023435, 0.031525888442993165, 0.03151155281066895, 0.031492095947265625, 0.03167334365844727, 0.031730688095092774, 0.03897753524780274, 0.032933887481689454, 0.031889408111572266, 0.03211980819702148, 0.031719423294067385, 0.031352832794189454, 0.03140505599975586, 0.031687711715698244, 0.031402975082397463, 0.03169177627563476, 0.031749120712280275, 0.03167948722839355, 0.03165695953369141, 0.0316753921508789, 0.03172659111022949, 0.03139072036743164, 0.03140812873840332, 0.03175526428222656, 0.03156684875488281, 0.03263590240478516, 0.03202252960205078, 0.03183923149108887, 0.0319682559967041, 0.031649791717529296, 0.031784959793090824, 0.03172352027893066, 0.03167129516601563, 0.0314337272644043, 0.03163443183898926, 0.03186483192443847, 0.03146137619018555, 0.06431948852539063, 0.03137843132019043, 0.03158016014099121, 0.031425535202026365, 0.031645696640014646, 0.031730688095092774, 0.03186486434936524, 0.031797216415405276, 0.0318156795501709, 0.03175628852844238, 0.031699968338012696, 0.031410175323486327, 0.03138355255126953, 0.03278540802001953, 0.03300454330444336, 0.032178176879882815, 0.0318156795501709, 0.031855615615844726, 0.031870975494384765, 0.03283967971801758, 0.0314204158782959, 0.0314081916809082, 0.031652799606323244, 0.03211775970458984, 0.031666175842285156, 0.03154022407531738, 0.031373311996459964, 0.03173785591125488, 0.031546367645263675, 0.0316753921508789, 0.0315043830871582, 0.031529983520507815, 0.031764480590820314, 0.03139481544494629, 0.03133440017700195, 0.03138559913635254, 0.03137843132019043, 0.031735807418823245, 0.031543296813964845, 0.03143065643310547, 0.03159859275817871, 0.031936511993408204, 0.031735807418823245, 0.03193548774719238, 0.031458303451538085, 0.031456256866455076, 0.031716352462768556, 0.0317388801574707, 0.032037952423095706, 0.031414207458496095, 0.0326901741027832, 0.03216793441772461, 0.031585344314575194, 0.03156883239746094, 0.03191398429870605, 0.031631359100341795, 0.031903743743896484, 0.03156076812744141, 0.03184736061096191, 0.03181363105773926, 0.03224371337890625, 0.03225702285766602, 0.031955968856811526, 0.06444134521484375, 0.03154431915283203, 0.031665151596069335, 0.03288780975341797, 0.031886335372924804, 0.03204403305053711, 0.03172659111022949, 0.031509504318237305, 0.03173785591125488, 0.032, 0.032320510864257815, 0.03182387161254883, 0.03230003356933594, 0.03187404823303223, 0.032198688507080075, 0.03247203063964844, 0.03235532760620117, 0.032702465057373044, 0.03311718368530273, 0.03155148887634277, 0.03170201683044434, 0.03225094223022461, 0.03220576095581055, 0.03237580871582031, 0.032230400085449216, 0.03218431854248047, 0.03240447998046875, 0.032449535369873043, 0.03225600051879883, 0.03243622589111328, 0.0319498233795166, 0.03165081596374512, 0.031528959274291994, 0.03161702346801758, 0.0321003532409668, 0.03190784072875977, 0.03203788757324219, 0.032328704833984374, 0.032467967987060545, 0.03184435272216797, 0.031665151596069335, 0.03177369689941406, 0.03174604797363281, 0.03233894348144531, 0.031704063415527346, 0.03172454452514648, 0.032039936065673826, 0.03196211242675781, 0.031926271438598636, 0.0328611831665039, 0.03221200180053711, 0.03217712020874024, 0.03181056022644043, 0.03184332847595215, 0.032064510345458985, 0.03207372665405273, 0.03184339141845703, 0.03191596794128418, 0.031817760467529294, 0.031796192169189455, 0.03153408050537109, 0.032161792755126956, 0.03232460784912109]",tokens/s,30.92327406447812,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,3081.781248,9521.594368,0.0,8875.147264,8264.141824,s,10,10.63840625,1.0638406249999999,0.0011581823391520636,1.0637650756835937,1.0652460571289062,1.0654749572753905,1.0656580773925781,"[1.0649481201171875, 1.065703857421875, 1.0634771728515624, 1.0627923583984376, 1.063096435546875, 1.0621585693359374, 1.064052978515625, 1.0644849853515626, 1.0651951904296875, 1.06249658203125]",tokens/s,240.63754850497463,kWh,1.2549749844604067e-05,6.874677577252441e-06,5.877399146358986e-05,7.819841888544636e-05,tokens/kWh,3273723.479946787,MB,3081.781248,9521.594368,0.0,8875.147264,8556.643328,s,10,631.86266796875,63.186266796874996,0.007118498406389631,63.1850546875,63.19375234375,63.198333203125,63.201997890625,"[63.192734375, 63.2029140625, 63.18565625, 63.17730078125, 63.18101953125, 63.1805546875, 63.18579296875, 63.18146875, 63.1907734375, 63.184453125]",tokens/s,0.9970520999844192,kWh,0.0007460630791551536,0.0004089103833046283,0.003496975492022614,0.004651948954482396,tokens/kWh,13542.710940389017,,s,629,640.4335947265614,1.0181774160994632,0.12635276340148643,1.0029219970703125,1.0034655395507812,1.0036467651367187,2.0656750390625,"[1.00284619140625, 1.002883056640625, 1.0025042114257812, 1.0028574829101562, 1.0028175659179688, 1.002925048828125, 1.0030203247070313, 1.0030990600585938, 1.0027622680664063, 1.0032742309570313, 1.0026455078125, 1.003062255859375, 1.0025420532226563, 1.0029680786132813, 1.0027857666015625, 1.00307763671875, 1.0029732055664062, 1.0030858154296876, 1.002630126953125, 1.0030980834960936, 1.0028267822265624, 1.003293701171875, 1.0026495971679688, 1.0032005004882814, 1.0028339233398438, 1.0035537719726562, 1.0033336181640624, 1.0031492919921876, 1.0029854736328125, 1.0029179077148438, 1.0027100219726564, 1.0030469360351562, 1.0027202758789062, 1.0029025268554688, 1.002692626953125, 1.003177978515625, 1.0032864990234376, 1.0032291870117187, 1.0031237182617188, 1.0033694458007814, 1.0031052856445313, 1.0034606323242188, 1.0029014892578125, 1.0031063232421875, 1.002977294921875, 1.0030796508789062, 1.0032691040039063, 1.003052001953125, 1.003093994140625, 1.0031728515625, 1.0026219482421874, 1.003230224609375, 1.0031892700195313, 1.003472900390625, 1.0031318969726561, 1.0034933471679688, 1.0038251342773437, 1.0035701904296874, 1.0035916748046876, 1.0035220336914064, 1.0029598999023437, 1.0031124267578124, 2.068518798828125, 1.0033325805664062, 1.0031472778320312, 1.0031912841796875, 1.0029844360351563, 1.0031206665039063, 1.0034401245117188, 1.0029373168945312, 1.0032005004882814, 1.00276123046875, 1.003198486328125, 1.0033233642578125, 1.0032588500976563, 1.0031349487304688, 1.0031708374023438, 1.0031646728515624, 1.0034074096679688, 1.0032445068359375, 1.0034769897460938, 1.0028963623046876, 1.0031943969726562, 1.0029393920898437, 1.0032639770507812, 1.0037217407226562, 1.0034319458007812, 1.0026045532226562, 1.003093017578125, 1.0026946411132813, 1.0035282592773438, 1.0035670166015624, 1.0035159301757812, 1.0033766479492188, 1.0034708251953126, 1.0032630004882812, 1.0034298706054687, 1.002693603515625, 1.0028206176757812, 1.0027888793945312, 1.0029660034179688, 1.0028257446289062, 1.0030294799804687, 1.00284619140625, 1.003236328125, 1.0038517456054687, 1.0034739379882813, 1.0031769409179687, 1.003452392578125, 1.0032343139648439, 1.0034002075195312, 1.0033592529296875, 1.0034892578125, 1.003430908203125, 1.0036060180664061, 1.0037442626953126, 1.0036817626953125, 1.003325439453125, 1.0032271118164062, 1.0029475708007813, 1.0032077026367188, 1.0028257446289062, 1.0033950805664062, 1.0032373657226563, 1.0033449096679687, 2.06620166015625, 1.003240478515625, 1.0030346069335938, 1.0028810424804688, 1.0028892211914062, 1.0025840454101562, 1.0026762084960938, 1.0028451538085938, 1.0030079956054687, 1.0030458984375, 1.0029598999023437, 1.0025256958007813, 1.002809326171875, 1.0025021362304687, 1.002545166015625, 1.002588134765625, 1.0025830688476562, 1.0028103637695311, 1.002876953125, 1.0027571411132812, 1.0027847900390625, 1.0027694091796875, 1.0029219970703125, 1.0027468872070313, 1.0030151977539064, 1.002692626953125, 1.0029998168945313, 1.0028185424804688, 1.002977294921875, 1.0030653686523439, 1.0028308715820313, 1.0027254028320312, 1.0027396850585937, 1.002481689453125, 1.0026741943359374, 1.002397705078125, 1.0026148071289063, 1.0031001586914063, 1.003051025390625, 1.0032445678710937, 1.0031226806640625, 1.00276123046875, 1.0029619140625, 1.0027110595703126, 1.00299365234375, 1.0028328857421875, 1.0028206176757812, 1.0037903442382812, 1.0038609619140626, 1.0040064086914062, 1.0035978393554688, 1.0031810302734374, 1.0034503784179687, 1.0034176025390624, 1.0035722045898436, 1.0029711303710938, 1.0032793579101562, 1.003167724609375, 1.0029445190429687, 1.0031759643554687, 1.0028656616210938, 1.002893310546875, 1.0028052368164062, 2.0654345703125, 1.0026424560546876, 1.0026322021484375, 1.0025789184570313, 1.0027899169921874, 1.0024918823242188, 1.0032875366210938, 1.002587158203125, 1.002587158203125, 1.0025154418945312, 1.0026045532226562, 1.0023638916015625, 1.002629150390625, 1.0024959716796875, 1.0028257446289062, 1.0028626098632814, 1.0027550659179687, 1.0028287963867188, 1.0026536865234374, 1.0024857788085937, 1.0027151489257813, 1.0024734497070313, 1.0031738891601563, 1.0028011474609375, 1.003345947265625, 1.002503173828125, 1.0029957275390624, 1.0027591552734374, 1.003062255859375, 1.003240478515625, 1.0029967651367186, 1.002982421875, 1.0035916748046876, 1.0024970092773438, 1.0031472778320312, 1.0028124389648438, 1.0027591552734374, 1.0026690673828125, 1.0027110595703126, 1.0026762084960938, 1.0028493041992188, 1.0025471801757813, 1.0032220458984376, 1.0027745361328124, 1.0030745849609375, 1.0030172119140626, 1.0028216552734375, 1.002661865234375, 1.0027734985351562, 1.00288818359375, 1.0029906005859375, 1.00273046875, 1.0033080444335938, 1.0029434814453124, 1.0028789672851564, 1.0029700927734375, 1.0029117431640624, 1.0027509765625, 1.002767333984375, 1.0027171630859375, 1.0029578247070312, 1.002545166015625, 1.003062255859375, 2.0657685546875, 1.0028359375, 1.0028124389648438, 1.0024847412109374, 1.0028124389648438, 1.0025441284179688, 1.0027601928710939, 1.0023495483398437, 1.0028533935546875, 1.0025604858398438, 1.0030325927734376, 1.0025574340820314, 1.0029946899414062, 1.0025062255859376, 1.0027683715820312, 1.002640380859375, 1.0027284545898438, 1.0027683715820312, 1.0030612182617187, 1.002598388671875, 1.0030366821289063, 1.0028635864257813, 1.0032056274414063, 1.0027100219726564, 1.0028585205078124, 1.0025328369140625, 1.0029660034179688, 1.0023075561523438, 1.0032752685546875, 1.002946533203125, 1.0032271118164062, 1.0027387084960937, 1.0030264282226562, 1.0025748291015626, 1.0029188842773438, 1.0027284545898438, 1.002767333984375, 1.0028052368164062, 1.0030530395507813, 1.0026967163085938, 1.0031349487304688, 1.0030233764648437, 1.0033059692382813, 1.002514404296875, 1.0027438354492189, 1.0027816772460938, 1.0031032104492188, 1.0027970581054688, 1.0033837890625, 1.0027315063476563, 1.0034063110351563, 1.0029496459960938, 1.0030530395507813, 1.0026383056640624, 1.00305615234375, 1.0027919311523437, 1.0027427978515624, 1.0032557983398438, 1.0037903442382812, 1.0031646728515624, 1.003399169921875, 1.0027448120117188, 1.0029649658203126, 2.064649169921875, 1.0027888793945312, 1.0026015014648437, 1.0022328491210937, 1.0030059814453125, 1.0026045532226562, 1.0029281005859374, 1.0032117919921875, 1.0038660888671875, 1.0030458984375, 1.0028523559570313, 1.00279296875, 1.0029219970703125, 1.00295068359375, 1.0030786743164062, 1.00269775390625, 1.0029230346679687, 1.0029240112304687, 1.0026373291015624, 1.0023751220703125, 1.0028328857421875, 1.0023956298828125, 1.002513427734375, 1.002144775390625, 1.003087890625, 1.0024324951171875, 1.0031769409179687, 1.0026843872070312, 1.0027018432617187, 1.0026710815429687, 1.0026076049804689, 1.0026281127929688, 1.00263525390625, 1.0025758666992188, 1.00335107421875, 1.0025952758789063, 1.0027919311523437, 1.0030172119140626, 1.0029312133789063, 1.0026270751953126, 1.0026127319335938, 1.0024099731445313, 1.0028635864257813, 1.0025287475585938, 1.003124755859375, 1.0027868041992187, 1.0029752197265625, 1.0032691040039063, 1.0028994750976563, 1.0029813842773438, 1.0031022338867188, 1.0025799560546875, 1.0030069580078125, 1.0030786743164062, 1.0034902954101563, 1.00322509765625, 1.0034298706054687, 1.0037340087890625, 1.0030960693359374, 1.00282470703125, 1.0031349487304688, 1.0028359375, 1.0032772827148437, 2.06746826171875, 1.0032711791992188, 1.00282470703125, 1.0029578247070312, 1.0026793212890626, 1.0024990844726562, 1.0027325439453125, 1.0022778930664062, 1.0023464965820312, 1.0023444213867188, 1.0030346069335938, 1.0042449951171875, 1.0037616577148438, 1.0032691040039063, 1.0031185913085938, 1.0024017944335937, 1.0026875, 1.0032916259765625, 1.0031452026367187, 1.0032855224609376, 1.0035599365234376, 1.0035138549804687, 1.0038927612304687, 1.0032435302734375, 1.002841064453125, 1.0024775390625, 1.0027438354492189, 1.0022512817382812, 1.002450927734375, 1.00248779296875, 1.002692626953125, 1.0029946899414062, 1.0030489501953126, 1.0032496337890624, 1.0034002075195312, 1.003052001953125, 1.0032271118164062, 1.0028707885742187, 1.0030796508789062, 1.0030028686523438, 1.0040872802734375, 1.00299365234375, 1.0028626098632814, 1.00265576171875, 1.002756103515625, 1.002808349609375, 1.0027335815429688, 1.0026813354492188, 1.00265673828125, 1.002919921875, 1.0032476196289062, 1.0029752197265625, 1.0029025268554688, 1.00297216796875, 1.0027991333007813, 1.0026178588867187, 1.002767333984375, 1.0026639404296875, 1.0027540283203125, 1.0026741943359374, 1.0029127807617189, 1.002988525390625, 1.0027018432617187, 2.066872314453125, 1.0026639404296875, 1.00259228515625, 1.0022307739257812, 1.0025277709960938, 1.0030377197265625, 1.0026045532226562, 1.002482666015625, 1.0029168701171876, 1.002771484375, 1.0028124389648438, 1.0024642333984375, 1.0027807006835938, 1.0025420532226563, 1.0025379638671874, 1.002556396484375, 1.002534912109375, 1.0028687133789063, 1.002840087890625, 1.0028328857421875, 1.0028451538085938, 1.0025236206054688, 1.0027632446289063, 1.0025379638671874, 1.0027888793945312, 1.0026751708984376, 1.003261962890625, 1.0027161865234375, 1.0028973999023438, 1.0030325927734376, 1.00279296875, 1.0027479248046876, 1.0025471801757813, 1.0023690185546874, 1.0029291381835939, 1.0029486083984376, 1.0027970581054688, 1.0028840942382813, 1.0029475708007813, 1.0030294799804687, 1.0031657104492187, 1.0029014892578125, 1.003304931640625, 1.0028472290039063, 1.0032578735351563, 1.0034647216796875, 1.0028277587890626, 1.0030632934570312, 1.0030980834960936, 1.00314111328125, 1.0028748779296874, 1.002660888671875, 1.0028431396484374, 1.0028472290039063, 1.0029691162109375, 1.0030172119140626, 1.002925048828125, 1.0033325805664062, 1.0035189819335937, 1.0036541137695312, 1.004179443359375, 1.0036951293945313, 1.002919921875, 2.06862841796875, 1.0036357421875, 1.0038538208007812, 1.0032230224609375, 1.0033796997070312, 1.002618896484375, 1.002708984375, 1.0022072143554688, 1.0026813354492188, 1.0022891235351563, 1.002555419921875, 1.0025963745117188, 1.00295166015625, 1.0026690673828125, 1.002914794921875, 1.003019287109375, 1.0031646728515624, 1.002956787109375, 1.0028861694335938, 1.0026751708984376, 1.003303955078125, 1.0028124389648438, 1.0030386962890625, 1.0028052368164062, 1.00309912109375, 1.0027315063476563, 1.0034688110351562, 1.0020556640625, 1.003325439453125, 1.0033530883789064, 1.003283447265625, 1.0034749145507813, 1.0035108032226563, 1.0035435791015626, 1.0037903442382812, 1.0029168701171876, 1.0031943969726562, 1.0024591064453126, 1.0026526489257812, 1.0026332397460938, 1.0027837524414063, 1.0034381103515626, 1.0032527465820313, 1.0027008056640625, 1.0030745849609375, 1.002945556640625, 1.0031943969726562, 1.0027479248046876, 1.0030663452148438, 1.002672119140625, 1.0029179077148438, 1.0026751708984376, 1.002841064453125, 1.0025738525390624, 1.0032977905273437, 1.0027786254882813, 1.0031134643554687, 1.00257177734375, 1.0031124267578124, 1.0026741943359374, 1.0032947387695312, 1.0030386962890625, 1.0032332763671874, 2.068487060546875, 1.0029629516601561, 1.0030684204101563, 1.0028482666015626, 1.0030899047851562, 1.0025728149414062, 1.0029260864257812, 1.0024949951171875, 1.0026229858398437, 1.0027960205078126, 1.003514892578125, 1.0023598022460938, 1.0027949829101563, 1.0025420532226563, 1.0026260375976563, 1.00259228515625, 1.0028103637695311, 1.002603515625, 1.00250830078125, 1.002608642578125, 1.0032783203125, 1.0025861206054687, 1.0032824096679687, 1.0027950439453126, 1.0030663452148438, 1.0023987426757812, 1.0027417602539062, 1.0023075561523438, 1.0025850830078125, 1.0028851318359375, 1.003109375, 1.0031339721679688, 1.003430908203125, 1.0028308715820313, 1.00331005859375, 1.0026096801757813, 1.0028687133789063, 1.0031749267578125, 1.003378662109375, 1.0034810791015625, 1.0036776733398438, 1.0027991333007813, 1.0026577758789061, 1.0023618774414063, 1.002555419921875, 1.0025308227539063, 1.00274072265625, 1.0025973510742188, 1.0027908935546874, 1.003040771484375, 1.0038589477539062, 1.002471435546875, 1.003072509765625, 1.0029865112304688, 1.0029404296875, 1.0026116943359376, 1.003293701171875, 1.00347802734375, 1.0035588989257813, 1.00347802734375, 1.0037841796875, 1.0034053344726563, 1.0032077026367188]",tokens/s,0.9821471034300691,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaac7-230defcf3ca338ce0b586efa;26c9392d-28c6-4de7-aa1c-ac69f7615fa0) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1378.24256,1340.604416,0.0,694.157312,598.498816,s,10,0.5756522903442383,0.05756522903442383,0.0016369727383896268,0.0568012638092041,0.05918429183959961,0.06064101028442383,0.061806385040283206,"[0.06209772872924805, 0.05672822570800781, 0.056688095092773436, 0.05679859161376953, 0.05886057662963867, 0.057347423553466795, 0.05689571380615235, 0.05671388626098633, 0.05680393600463867, 0.05671811294555664]",tokens/s,4447.129009890898,kWh,6.782567046937489e-07,3.716537143845926e-07,1.9805285685486103e-06,3.0304389876269517e-06,tokens/kWh,84476209.89738722,MB,1378.24256,1340.604416,0.0,694.157312,659.031552,s,10,35.66919848632813,3.566919848632813,0.03950000205433062,3.552089111328125,3.624692236328125,3.64104033203125,3.65411880859375,"[3.57450146484375, 3.532454345703125, 3.56766015625, 3.657388427734375, 3.621059326171875, 3.5402509765625, 3.56031982421875, 3.532387939453125, 3.539317626953125, 3.5438583984375]",tokens/s,17.662297633109883,kWh,4.217196008799568e-05,2.3111615147015983e-05,0.00011743306696224983,0.00018271664219726155,tokens/kWh,344796.1786205822,,s,629,36.1255710258484,0.05743334026366993,0.0068485805293125165,0.05611315155029297,0.058175487518310545,0.05834158096313477,0.11305373931884766,"[0.05799008178710938, 0.05708492660522461, 0.05782732772827148, 0.05744332885742188, 0.057527294158935545, 0.05809561538696289, 0.0565401611328125, 0.05911040115356445, 0.05911040115356445, 0.058807296752929686, 0.058791934967041014, 0.05813555145263672, 0.05813248062133789, 0.05811199951171875, 0.058054656982421876, 0.05821952056884765, 0.057041919708251954, 0.056956928253173826, 0.057338878631591796, 0.05677363204956055, 0.05590323257446289, 0.05592268753051758, 0.05590425491333008, 0.05611008071899414, 0.05601484680175781, 0.05587148666381836, 0.055932926177978515, 0.056509441375732425, 0.05619200134277344, 0.056089599609375, 0.05595647811889649, 0.055981056213378906, 0.056079360961914064, 0.05589503860473633, 0.0559554557800293, 0.056065025329589846, 0.0564029426574707, 0.05732863998413086, 0.057398273468017576, 0.05806694412231445, 0.05649203109741211, 0.05601792144775391, 0.05590425491333008, 0.05598822402954102, 0.05598310470581055, 0.05660671997070312, 0.057234432220458986, 0.05611315155029297, 0.05587251281738281, 0.055923713684082034, 0.05598822402954102, 0.056018943786621096, 0.05615820693969727, 0.05591244888305664, 0.05669478225708008, 0.05584281539916992, 0.05593395233154297, 0.05587148666381836, 0.056016895294189455, 0.056000511169433595, 0.05590016174316406, 0.05584896087646484, 0.11391897583007812, 0.056886272430419924, 0.05697945785522461, 0.05592268753051758, 0.0564951057434082, 0.05716787338256836, 0.056594432830810545, 0.05600153732299805, 0.055695358276367186, 0.05583564758300781, 0.05596057510375976, 0.055913471221923826, 0.05584896087646484, 0.05577318572998047, 0.05587148666381836, 0.056035327911376956, 0.055943168640136716, 0.05580799865722656, 0.05776486587524414, 0.057622528076171874, 0.05631180953979492, 0.055818241119384764, 0.055777278900146485, 0.05587251281738281, 0.0560118408203125, 0.05598918533325195, 0.05593395233154297, 0.05613772964477539, 0.056908798217773435, 0.05586841583251953, 0.055809024810791016, 0.05591961669921875, 0.05592063903808594, 0.05590425491333008, 0.056174591064453126, 0.05596364974975586, 0.05545369720458984, 0.05593804931640625, 0.055806976318359375, 0.05584896087646484, 0.05586431884765625, 0.055890945434570315, 0.055839744567871094, 0.05590630340576172, 0.055741439819335936, 0.05607321548461914, 0.055858177185058595, 0.05598515319824219, 0.05599641418457031, 0.056035327911376956, 0.056120319366455076, 0.05593907165527344, 0.05596364974975586, 0.055932926177978515, 0.055947265625, 0.05584076690673828, 0.05575884628295898, 0.055823360443115234, 0.05576704025268555, 0.05586636734008789, 0.055923713684082034, 0.05593088150024414, 0.0557916145324707, 0.11292671966552735, 0.05593600082397461, 0.056169471740722655, 0.055927806854248044, 0.05587558364868164, 0.05590835189819336, 0.055769088745117185, 0.056676353454589844, 0.05695078277587891, 0.05639987182617188, 0.056005630493164066, 0.05686995315551758, 0.05640902328491211, 0.05631488037109375, 0.05584588623046875, 0.057027584075927736, 0.05787955093383789, 0.05715967941284179, 0.05641113662719727, 0.05674803161621094, 0.057038848876953124, 0.05642956924438477, 0.05662105560302735, 0.05600460815429688, 0.05647359848022461, 0.056395774841308595, 0.05592473602294922, 0.05653299331665039, 0.05728460693359375, 0.056796192169189456, 0.056970207214355466, 0.05783552169799805, 0.056842239379882815, 0.05652377700805664, 0.05740031814575195, 0.056120319366455076, 0.05686272048950195, 0.05625446319580078, 0.05662105560302735, 0.056174591064453126, 0.05706854248046875, 0.056346622467041016, 0.05597798538208008, 0.05667020797729492, 0.05655551910400391, 0.05687705612182617, 0.057256961822509764, 0.05748121643066406, 0.057133056640625, 0.05615923309326172, 0.055962623596191405, 0.05652070236206055, 0.056753150939941405, 0.056771583557128906, 0.056970241546630856, 0.05616844940185547, 0.057388031005859375, 0.05657702255249023, 0.057078784942626956, 0.05769420623779297, 0.05860966491699219, 0.056180736541748044, 0.05587148666381836, 0.11307008361816406, 0.05602406311035156, 0.05604044723510742, 0.056018943786621096, 0.058791934967041014, 0.0583741455078125, 0.05816524887084961, 0.05799321746826172, 0.058156032562255856, 0.058518527984619144, 0.058423297882080075, 0.05813350296020508, 0.058103809356689455, 0.05830451202392578, 0.058180606842041016, 0.058041343688964846, 0.05809254455566406, 0.05821952056884765, 0.05822873687744141, 0.05809151840209961, 0.058120193481445315, 0.05805158233642578, 0.05811609649658203, 0.05830348968505859, 0.058006526947021485, 0.05869670486450195, 0.058369022369384765, 0.058347518920898435, 0.058028030395507815, 0.058055679321289064, 0.058210304260253906, 0.0581058578491211, 0.058054656982421876, 0.058288158416748045, 0.058100704193115235, 0.05831987380981445, 0.058275840759277345, 0.05816524887084961, 0.05805977630615235, 0.05837823867797851, 0.05834444808959961, 0.05804032135009766, 0.05805158233642578, 0.05809971237182617, 0.058052608489990234, 0.05806489562988281, 0.05809254455566406, 0.05809664154052734, 0.05816729736328125, 0.0582042236328125, 0.05809862518310547, 0.05814169692993164, 0.05818675231933594, 0.057608192443847656, 0.05821440124511719, 0.058124320983886715, 0.058272735595703125, 0.058308609008789064, 0.058169345855712894, 0.058071041107177736, 0.058175487518310545, 0.05823897552490234, 0.0583372802734375, 0.11740262603759766, 0.058298366546630856, 0.0585544319152832, 0.05803107070922851, 0.05821948623657226, 0.05826047897338867, 0.05810793685913086, 0.058076126098632816, 0.0581662712097168, 0.05814988708496094, 0.05825843048095703, 0.05809356689453125, 0.05850931167602539, 0.058336254119873046, 0.058426368713378904, 0.05825331115722656, 0.058418174743652344, 0.0581396484375, 0.05809254455566406, 0.05811097717285156, 0.05806796646118164, 0.05830144119262695, 0.05805977630615235, 0.05824512100219727, 0.058036224365234375, 0.058142719268798826, 0.05810892868041992, 0.05846227264404297, 0.058175487518310545, 0.058110912322998046, 0.05813248062133789, 0.0584089584350586, 0.05830451202392578, 0.0581396484375, 0.058211326599121094, 0.05804851150512695, 0.0581662712097168, 0.05817350387573242, 0.05801055908203125, 0.0581662712097168, 0.05825331115722656, 0.05611929702758789, 0.05594521713256836, 0.055949310302734374, 0.05611008071899414, 0.056019966125488284, 0.055982078552246094, 0.055962623596191405, 0.05608448028564453, 0.05570457458496094, 0.056204288482666016, 0.05575372695922851, 0.055787521362304686, 0.05749350357055664, 0.05674803161621094, 0.055940097808837894, 0.05600460815429688, 0.056019966125488284, 0.05588787078857422, 0.05589913558959961, 0.05583052825927735, 0.055907329559326174, 0.05695283126831055, 0.11374079895019532, 0.055927806854248044, 0.05590323257446289, 0.05608652877807617, 0.05595340728759766, 0.056174591064453126, 0.05607321548461914, 0.05605686569213867, 0.056255455017089846, 0.05570969772338867, 0.05589606475830078, 0.05596063995361328, 0.056186817169189454, 0.05588991928100586, 0.055947265625, 0.055981056213378906, 0.056204288482666016, 0.05590016174316406, 0.055982078552246094, 0.05592575836181641, 0.05591862487792969, 0.055920608520507814, 0.05575475311279297, 0.055944190979003904, 0.05595852661132812, 0.055911422729492184, 0.05676134490966797, 0.05655039978027344, 0.055940097808837894, 0.055934974670410156, 0.05602918243408203, 0.056057857513427733, 0.055940097808837894, 0.05590425491333008, 0.056027137756347656, 0.05611724853515625, 0.055994369506835937, 0.05601484680175781, 0.05608448028564453, 0.055962623596191405, 0.05571177673339844, 0.055808990478515626, 0.05793484878540039, 0.057431041717529295, 0.056551422119140625, 0.05656576156616211, 0.05593600082397461, 0.05665689468383789, 0.05718732833862305, 0.055906368255615235, 0.058727359771728514, 0.05800755310058594, 0.05647257614135742, 0.056513534545898435, 0.05671116638183594, 0.05588172912597656, 0.056030208587646485, 0.05598003387451172, 0.05590528106689453, 0.05593600082397461, 0.05535129547119141, 0.05544755172729492, 0.05648691177368164, 0.11342233276367188, 0.05600358581542969, 0.05600153732299805, 0.05581107330322266, 0.055964672088623046, 0.05609062576293945, 0.05611724853515625, 0.05599846267700195, 0.056147968292236325, 0.05704908752441406, 0.05751091384887695, 0.05676031875610352, 0.05597798538208008, 0.05692620849609375, 0.05621247863769531, 0.05710335922241211, 0.05665280151367187, 0.055989246368408206, 0.0569989128112793, 0.056965118408203126, 0.056476673126220706, 0.05757952117919922, 0.05608243179321289, 0.057240577697753904, 0.05665280151367187, 0.056497150421142575, 0.057215999603271485, 0.0573573112487793, 0.05771366500854492, 0.056035327911376956, 0.05668044662475586, 0.05652479934692383, 0.056048641204833986, 0.056766464233398435, 0.05603737640380859, 0.05681568145751953, 0.05561439895629883, 0.055951358795166016, 0.05671014404296875, 0.05602099227905273, 0.05684531021118164, 0.05712998580932617, 0.05622579193115235, 0.05718425750732422, 0.05587046432495117, 0.05815193557739258, 0.05669580841064453, 0.056288257598876956, 0.05639168167114258, 0.05692416000366211, 0.05890764617919922, 0.05709619140625, 0.055943168640136716, 0.05594521713256836, 0.05592473602294922, 0.055702529907226565, 0.05582233428955078, 0.05554380798339844, 0.05590425491333008, 0.0567193603515625, 0.05655756759643555, 0.05586636734008789, 0.05595238494873047, 0.11324825286865234, 0.056097793579101565, 0.05586841583251953, 0.0565401611328125, 0.05603737640380859, 0.05586841583251953, 0.05586841583251953, 0.055787582397460934, 0.05582944107055664, 0.05609062576293945, 0.05594524765014648, 0.05589398574829101, 0.05601484680175781, 0.05601587295532227, 0.05612851333618164, 0.056167423248291014, 0.05636508941650391, 0.05600048065185547, 0.05591449737548828, 0.05615820693969727, 0.05590425491333008, 0.056697856903076174, 0.056491008758544924, 0.05597491073608398, 0.0558837776184082, 0.055894016265869144, 0.05606195068359375, 0.056013824462890625, 0.055877632141113284, 0.0558131217956543, 0.0560445442199707, 0.05591756820678711, 0.05575478363037109, 0.05573014450073242, 0.055923713684082034, 0.05588889694213867, 0.05579673767089844, 0.05603123092651367, 0.05595340728759766, 0.05692416000366211, 0.05709721755981445, 0.0572149772644043, 0.05634969711303711, 0.0558919677734375, 0.05590528106689453, 0.0558766098022461, 0.055841793060302736, 0.05595033645629883, 0.05608448028564453, 0.05609267044067383, 0.05588479995727539, 0.05583769607543945, 0.05596160125732422, 0.0559554557800293, 0.05604556655883789, 0.05690572738647461, 0.05594214248657227, 0.055856128692626954, 0.05609267044067383, 0.05597183990478516, 0.0560076789855957, 0.05595340728759766, 0.055993408203125, 0.11313145446777344, 0.05605068969726563, 0.05714432144165039, 0.05607219314575195, 0.055894016265869144, 0.05605990219116211, 0.055856128692626954, 0.055787521362304686, 0.0557916145324707, 0.05595647811889649, 0.05597183990478516, 0.055567359924316405, 0.05594521713256836, 0.05598720169067383, 0.05587251281738281, 0.055973888397216794, 0.05577318572998047, 0.055757823944091796, 0.055972862243652347, 0.05598310470581055, 0.05603839874267578, 0.056036350250244144, 0.05586438369750977, 0.055879615783691404, 0.05582950210571289, 0.056062976837158204, 0.055747615814208985, 0.05714838409423828, 0.05821952056884765, 0.0560711669921875, 0.05586739349365234, 0.05611520004272461, 0.05608857727050781, 0.05595340728759766, 0.0560076789855957, 0.055930912017822264, 0.05865881729125977, 0.05698665618896484, 0.0559431037902832, 0.05590835189819336, 0.05589811325073242, 0.05590323257446289, 0.05572403335571289, 0.05595852661132812, 0.055923713684082034, 0.05552844619750977, 0.05647052764892578, 0.05722623825073242, 0.05685452651977539, 0.05584384155273438, 0.05696409606933594, 0.05613260650634765, 0.05573222351074219, 0.05603123092651367, 0.05594214248657227, 0.05622579193115235, 0.05601587295532227, 0.055932926177978515, 0.05593907165527344, 0.05829119873046875, 0.05608038330078125, 0.056581119537353515, 0.056043521881103515, 0.11301171112060547, 0.05596160125732422, 0.05600460815429688, 0.05594828796386719, 0.0573306884765625, 0.05730099105834961, 0.056136703491210936, 0.05597183990478516, 0.055757823944091796, 0.05651865768432617, 0.05613772964477539, 0.05588684844970703, 0.05589811325073242, 0.055934974670410156, 0.055951358795166016, 0.05582438278198242, 0.055932926177978515, 0.05587251281738281, 0.05707059097290039, 0.057232383728027345, 0.057132030487060545, 0.056750080108642575, 0.05586227035522461, 0.055964672088623046, 0.05600972747802734, 0.05593088150024414, 0.05628518295288086, 0.05651148986816406, 0.056172542572021485, 0.05634764862060547, 0.055940097808837894, 0.0572149772644043, 0.05617049789428711, 0.05602201461791992, 0.05591654586791992, 0.05619609451293945, 0.0580239372253418, 0.056460289001464846, 0.055949310302734374, 0.056018943786621096, 0.056033279418945314, 0.05574860763549805, 0.0558960952758789, 0.056247264862060546, 0.05595443344116211, 0.05688217544555664, 0.05635276794433594, 0.055982078552246094, 0.055987232208251955, 0.0560035514831543, 0.05590937423706055, 0.05664051055908203, 0.05720883178710937, 0.056853504180908204, 0.056360958099365234, 0.0558919677734375, 0.05595852661132812, 0.05596160125732422, 0.05597491073608398, 0.05603228759765625, 0.0562564811706543, 0.055975936889648435, 0.055949310302734374]",tokens/s,17.41148948344487,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4397.162496,24111.480832,0.0,23465.033728,21690.932224,s,10,26.23831787109375,2.623831787109375,0.003031033246657479,2.6228023681640624,2.627721069335937,2.628540100097656,2.629195324707031,"[2.629359130859375, 2.621765380859375, 2.6223037109375, 2.621092041015625, 2.623301025390625, 2.620621826171875, 2.62457861328125, 2.6205380859375, 2.627218994140625, 2.6275390625]",tokens/s,97.56723020801203,kWh,3.09448529779911e-05,1.6958929474258183e-05,0.00014797261837800056,0.00019587640083024984,tokens/kWh,1306946.619985398,MB,4397.162496,24111.480832,0.0,23465.033728,21890.217984,s,10,1555.758796875,155.5758796875,0.01362531366110068,155.577890625,155.5897375,155.594603125,155.598495625,"[155.55940625, 155.57425, 155.583734375, 155.59946875, 155.58153125, 155.58503125, 155.568875, 155.58865625, 155.564453125, 155.553390625]",tokens/s,0.4049470915835152,kWh,0.0018369476799004608,0.0010068097738273171,0.008895798588855384,0.011739556042583163,tokens/kWh,5366.4721026484,,s,629,1576.8986267089822,2.50699304723209,0.31148250553470674,2.46929931640625,2.47089541015625,2.471475439453125,5.08945892578125,"[2.468798583984375, 2.46915185546875, 2.46936669921875, 2.469295166015625, 2.469130126953125, 2.468601806640625, 2.46879638671875, 2.46974560546875, 2.468950927734375, 2.468516845703125, 2.468890625, 2.469065673828125, 2.468599853515625, 2.46870947265625, 2.468912109375, 2.469274658203125, 2.468938720703125, 2.46896337890625, 2.4693310546875, 2.46970068359375, 2.469392333984375, 2.46949072265625, 2.47088330078125, 2.4697119140625, 2.469168212890625, 2.468552734375, 2.4692490234375, 2.4690791015625, 2.47006005859375, 2.46929931640625, 2.46951123046875, 2.46925, 2.469129150390625, 2.468915283203125, 2.46999560546875, 2.4695, 2.46917724609375, 2.4696279296875, 2.469708740234375, 2.469084228515625, 2.46938427734375, 2.468663330078125, 2.46932275390625, 2.468631591796875, 2.468556884765625, 2.46858447265625, 2.46909326171875, 2.46888232421875, 2.46854248046875, 2.469202880859375, 2.469517333984375, 2.468483154296875, 2.468864013671875, 2.46881494140625, 2.468998046875, 2.469455810546875, 2.469496826171875, 2.469419921875, 2.4694599609375, 2.469094482421875, 2.47101025390625, 2.468877197265625, 5.09138330078125, 2.468682861328125, 2.468284423828125, 2.471290771484375, 2.471075927734375, 2.47086279296875, 2.470519775390625, 2.471779296875, 2.4712724609375, 2.470220703125, 2.469166015625, 2.47142919921875, 2.469087158203125, 2.470717529296875, 2.469718994140625, 2.4686796875, 2.468135986328125, 2.469396484375, 2.47069482421875, 2.469185546875, 2.46943359375, 2.4694208984375, 2.469435302734375, 2.472340576171875, 2.46964404296875, 2.4692294921875, 2.4692080078125, 2.4687861328125, 2.4688701171875, 2.468831298828125, 2.468864013671875, 2.468359130859375, 2.468737060546875, 2.4681728515625, 2.46879345703125, 2.47047265625, 2.471103515625, 2.4709150390625, 2.469086181640625, 2.467948486328125, 2.46850244140625, 2.46847998046875, 2.468877197265625, 2.47016845703125, 2.472048583984375, 2.471004150390625, 2.46881689453125, 2.46902783203125, 2.469129150390625, 2.468443115234375, 2.468276123046875, 2.46843603515625, 2.468770751953125, 2.46885888671875, 2.468476806640625, 2.46837255859375, 2.46976318359375, 2.4682578125, 2.468187255859375, 2.467946533203125, 2.4700517578125, 2.468588623046875, 2.468370361328125, 5.09084375, 2.46865625, 2.4686162109375, 2.471500732421875, 2.47128173828125, 2.47047265625, 2.470948974609375, 2.46904638671875, 2.46915283203125, 2.468949951171875, 2.469103515625, 2.469283935546875, 2.46955615234375, 2.46936669921875, 2.46929931640625, 2.469792724609375, 2.46934521484375, 2.468339599609375, 2.469074951171875, 2.46917724609375, 2.46955322265625, 2.469060546875, 2.471446533203125, 2.471505859375, 2.471520263671875, 2.46911376953125, 2.470220703125, 2.46964111328125, 2.46999658203125, 2.469699462890625, 2.469473388671875, 2.4692724609375, 2.469899169921875, 2.469782470703125, 2.47012158203125, 2.469866455078125, 2.4691630859375, 2.46879345703125, 2.469590087890625, 2.47096533203125, 2.471996337890625, 2.46934326171875, 2.468957275390625, 2.468894775390625, 2.468242431640625, 2.470201416015625, 2.46946923828125, 2.469783447265625, 2.469652587890625, 2.469385009765625, 2.4691455078125, 2.469442626953125, 2.46881494140625, 2.470005859375, 2.469909423828125, 2.46930224609375, 2.469780517578125, 2.46929296875, 2.468728759765625, 2.46875146484375, 2.468708251953125, 2.469340087890625, 2.468431884765625, 5.08925537109375, 2.469267333984375, 2.4689326171875, 2.468461669921875, 2.470912841796875, 2.47052294921875, 2.47040625, 2.4696259765625, 2.470639404296875, 2.470507568359375, 2.469961669921875, 2.4698583984375, 2.470289306640625, 2.470327392578125, 2.470380615234375, 2.470289306640625, 2.47063037109375, 2.4716962890625, 2.4711904296875, 2.469096435546875, 2.46898583984375, 2.469234619140625, 2.46925927734375, 2.469075927734375, 2.468787109375, 2.470892578125, 2.468845458984375, 2.470190185546875, 2.470021240234375, 2.46980908203125, 2.469390380859375, 2.469427001953125, 2.469474365234375, 2.46984912109375, 2.468821044921875, 2.46932470703125, 2.46972412109375, 2.470828125, 2.470804443359375, 2.47084228515625, 2.471381103515625, 2.471739501953125, 2.4696513671875, 2.469551025390625, 2.46862744140625, 2.468588623046875, 2.468927490234375, 2.469970947265625, 2.469509033203125, 2.468726806640625, 2.470922119140625, 2.47033642578125, 2.470152099609375, 2.47086181640625, 2.469655517578125, 2.469350341796875, 2.469897216796875, 2.4691435546875, 2.468494384765625, 2.469423095703125, 2.470116455078125, 2.470024169921875, 2.4697353515625, 5.09418994140625, 2.47079833984375, 2.471482421875, 2.470703125, 2.4698798828125, 2.469234619140625, 2.469583984375, 2.469041259765625, 2.469078125, 2.468781982421875, 2.469856201171875, 2.470139892578125, 2.4699013671875, 2.46999853515625, 2.469337158203125, 2.4686396484375, 2.46900732421875, 2.46865625, 2.4688505859375, 2.46949365234375, 2.468842529296875, 2.469308349609375, 2.469699462890625, 2.469474365234375, 2.46980810546875, 2.470485107421875, 2.470642578125, 2.470287353515625, 2.46951220703125, 2.46904931640625, 2.47022900390625, 2.470436767578125, 2.46926025390625, 2.469675048828125, 2.47014306640625, 2.46932275390625, 2.468830322265625, 2.469003173828125, 2.469866455078125, 2.46951123046875, 2.468826171875, 2.469062744140625, 2.469338134765625, 2.469421142578125, 2.468864013671875, 2.4689970703125, 2.469686279296875, 2.468821044921875, 2.468958251953125, 2.46921533203125, 2.4694580078125, 2.468634521484375, 2.46913232421875, 2.4693955078125, 2.469814208984375, 2.468588623046875, 2.469433349609375, 2.470032470703125, 2.470792236328125, 2.469285888671875, 2.469370849609375, 2.469603271484375, 2.46991357421875, 5.0895380859375, 2.472522705078125, 2.472004638671875, 2.469856201171875, 2.468981689453125, 2.469172119140625, 2.46925, 2.4692265625, 2.469295166015625, 2.46936376953125, 2.47034765625, 2.468842529296875, 2.470485107421875, 2.471478271484375, 2.469750732421875, 2.468664306640625, 2.471371826171875, 2.468883544921875, 2.471439453125, 2.469444580078125, 2.46917626953125, 2.469329833984375, 2.469687255859375, 2.46869091796875, 2.469650390625, 2.468767822265625, 2.46934619140625, 2.46850048828125, 2.46826806640625, 2.46904443359375, 2.4693974609375, 2.469318603515625, 2.47453076171875, 2.469267333984375, 2.470331298828125, 2.4707685546875, 2.469550048828125, 2.46943017578125, 2.469969970703125, 2.469962646484375, 2.46923486328125, 2.468137939453125, 2.468229248046875, 2.46898779296875, 2.468890625, 2.468874267578125, 2.469168212890625, 2.470299560546875, 2.47147119140625, 2.469370849609375, 2.469350341796875, 2.470095947265625, 2.469555419921875, 2.46888232421875, 2.4688681640625, 2.469992431640625, 2.468869140625, 2.469044189453125, 2.469062744140625, 2.469525390625, 2.468809814453125, 2.46928076171875, 2.468957275390625, 5.09382470703125, 2.469845947265625, 2.4695244140625, 2.469098388671875, 2.46944677734375, 2.469396484375, 2.470770751953125, 2.469032958984375, 2.469140380859375, 2.468855712890625, 2.469170166015625, 2.469623779296875, 2.469095458984375, 2.4704697265625, 2.47050244140625, 2.4702392578125, 2.469874755859375, 2.4695224609375, 2.469214111328125, 2.469747802734375, 2.46868896484375, 2.469425048828125, 2.468915283203125, 2.4693955078125, 2.468820068359375, 2.46910986328125, 2.468548583984375, 2.4704716796875, 2.469411865234375, 2.4691220703125, 2.468193359375, 2.468788330078125, 2.4683017578125, 2.46884033203125, 2.470220703125, 2.4702197265625, 2.468990966796875, 2.468908935546875, 2.4688486328125, 2.4704501953125, 2.470517822265625, 2.470140869140625, 2.469308349609375, 2.470096923828125, 2.469972900390625, 2.4689111328125, 2.468103271484375, 2.468663330078125, 2.468252685546875, 2.468338623046875, 2.467991455078125, 2.468906982421875, 2.46822802734375, 2.468630615234375, 2.469042236328125, 2.469347412109375, 2.47062939453125, 2.471343017578125, 2.467967041015625, 2.468644775390625, 2.46843701171875, 2.4686162109375, 2.473323486328125, 5.0889111328125, 2.468672607421875, 2.4683642578125, 2.468341796875, 2.468705322265625, 2.468413330078125, 2.46852197265625, 2.468662353515625, 2.468912109375, 2.46987158203125, 2.470073486328125, 2.46920068359375, 2.469920654296875, 2.46879638671875, 2.4687412109375, 2.4697333984375, 2.4695244140625, 2.4696298828125, 2.4695244140625, 2.470005859375, 2.468862060546875, 2.47003125, 2.4691630859375, 2.468483154296875, 2.4710625, 2.469022705078125, 2.46885888671875, 2.4692705078125, 2.469971923828125, 2.47102783203125, 2.47071435546875, 2.4704482421875, 2.471227294921875, 2.470994873046875, 2.469182373046875, 2.46873095703125, 2.47003125, 2.468622314453125, 2.4721171875, 2.468535400390625, 2.47090673828125, 2.470928466796875, 2.470781982421875, 2.470194091796875, 2.470340576171875, 2.468509765625, 2.468907958984375, 2.469888916015625, 2.469969970703125, 2.47013671875, 2.472005859375, 2.47183740234375, 2.47176708984375, 2.471166015625, 2.471228515625, 2.4702783203125, 2.47012451171875, 2.468314208984375, 2.46890087890625, 2.46814208984375, 2.46915283203125, 2.467857421875, 2.46899609375, 5.0967275390625, 2.468865966796875, 2.47231494140625, 2.4689765625, 2.469561279296875, 2.469706787109375, 2.468801513671875, 2.468005859375, 2.46970068359375, 2.469357666015625, 2.470153076171875, 2.47166455078125, 2.4716962890625, 2.47134716796875, 2.4716728515625, 2.469540771484375, 2.46961865234375, 2.46875439453125, 2.468744140625, 2.469203857421875, 2.46841748046875, 2.469667724609375, 2.4690810546875, 2.46848828125, 2.468600830078125, 2.469570556640625, 2.46897265625, 2.468644775390625, 2.46887109375, 2.46993310546875, 2.469518310546875, 2.468893798828125, 2.469458740234375, 2.4689755859375, 2.469214111328125, 2.468822998046875, 2.468765625, 2.469544921875, 2.46893359375, 2.46794140625, 2.46793115234375, 2.468474853515625, 2.468509765625, 2.469032958984375, 2.468798583984375, 2.46907177734375, 2.469205078125, 2.46849853515625, 2.468729736328125, 2.469506103515625, 2.4690390625, 2.4686376953125, 2.4710419921875, 2.469719970703125, 2.4685322265625, 2.468116455078125, 2.468727783203125, 2.4698369140625, 2.46875244140625, 2.46940869140625, 2.469123046875, 2.47010205078125, 2.468192138671875, 5.09570947265625, 2.469843994140625, 2.469245849609375, 2.46862939453125, 2.469123046875, 2.470595703125, 2.46889990234375, 2.468484130859375, 2.468884521484375, 2.469498779296875, 2.46938720703125, 2.468440185546875, 2.4687841796875, 2.46934326171875, 2.468601806640625, 2.469178466796875, 2.46820654296875, 2.469718017578125, 2.469128173828125, 2.468865966796875, 2.468513671875, 2.46963525390625, 2.468724609375, 2.4697353515625, 2.468211669921875, 2.468957275390625, 2.4694794921875, 2.46911083984375, 2.4683447265625, 2.4698837890625, 2.468577392578125, 2.468810791015625, 2.468404296875, 2.46858251953125, 2.469440673828125, 2.469866455078125, 2.468959228515625, 2.4695625, 2.46875732421875, 2.46845849609375, 2.469051513671875, 2.46925830078125, 2.470119384765625, 2.468737060546875, 2.468697998046875, 2.469822509765625, 2.469697509765625, 2.4686376953125, 2.4682802734375, 2.468843505859375, 2.471318603515625, 2.4700693359375, 2.469341064453125, 2.469017578125, 2.469211181640625, 2.469179443359375, 2.468263916015625, 2.46847998046875, 2.4693955078125, 2.468968505859375, 2.469316650390625, 2.46883740234375, 2.46984814453125]",tokens/s,0.39888423348603846,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpj_4j1da9/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2454.69184,7298.613248,0.0,6652.166144,6323.221504,s,10,7.735312255859376,0.7735312255859375,0.0028429451285762273,0.7729794311523437,0.7762465698242188,0.7783619323730468,0.7800542224121094,"[0.780477294921875, 0.7757764892578125, 0.7711934204101563, 0.770524658203125, 0.7719502563476562, 0.7706873168945313, 0.7725225219726563, 0.7734363403320312, 0.7744036254882812, 0.77434033203125]",tokens/s,330.9497943099635,kWh,9.102796527565035e-06,4.9879214771863185e-06,4.3937086431680494e-05,5.802780443643185e-05,tokens/kWh,4411678.202997362,MB,2454.69184,7298.613248,0.0,6652.166144,6382.564864,s,10,458.5464296875,45.85464296875,0.0073453057278341505,45.852935546875,45.866698828124996,45.8672068359375,45.8676132421875,"[45.8568515625, 45.857828125, 45.85469140625, 45.8511796875, 45.845234375, 45.8491953125, 45.85046484375, 45.84668359375, 45.8665859375, 45.86771484375]",tokens/s,1.3739066738112995,kWh,0.0005414656486152074,0.00029677022395511814,0.002569976393586324,0.003408212266156649,tokens/kWh,18484.764175514054,,s,629,464.77344433593714,0.7389084965595191,0.09178579233429768,0.727773193359375,0.7285446655273438,0.7288094848632812,1.4987512353515624,"[0.72828515625, 0.7276810302734374, 0.7278029174804688, 0.72793701171875, 0.7272877807617187, 0.727383056640625, 0.7272335205078125, 0.7274444580078125, 0.7278551025390625, 0.727720947265625, 0.7276083374023438, 0.7281663818359375, 0.7275867919921875, 0.72804248046875, 0.72800048828125, 0.7276687622070312, 0.7281151733398438, 0.7279267578125, 0.7276728515625, 0.7276615600585937, 0.7278981323242187, 0.7287255249023438, 0.7286405029296875, 0.7275847778320312, 0.7281510620117188, 0.7281069946289063, 0.7279093627929687, 0.7283015747070313, 0.7283804321289062, 0.7276973876953124, 0.7277127685546875, 0.7276400756835938, 0.7282882690429687, 0.7277936401367188, 0.7284705200195313, 0.727784423828125, 0.727952392578125, 0.727930908203125, 0.728369140625, 0.728827880859375, 0.7278551025390625, 0.72924365234375, 0.7275161743164062, 0.7274977416992188, 0.7274014892578125, 0.727947265625, 0.7273707275390625, 0.7273738403320312, 0.7270891723632813, 0.72770458984375, 0.727604248046875, 0.7274721069335938, 0.728226806640625, 0.7281571655273438, 0.7279820556640625, 0.7282749633789063, 0.7275151977539063, 0.7279564208984375, 0.7273912353515625, 0.7277537231445312, 0.7282349853515625, 0.7278714599609375, 1.503824951171875, 0.72740966796875, 0.7277650146484375, 0.7272868041992188, 0.7274219360351563, 0.7278981323242187, 0.7287039794921875, 0.7276185302734375, 0.7274321899414062, 0.7271383056640625, 0.72722021484375, 0.7271024780273437, 0.7274373168945313, 0.7272499389648438, 0.7280332641601562, 0.7276113891601562, 0.7276943359375, 0.7272652587890625, 0.7283251342773438, 0.7285104370117188, 0.7281961059570312, 0.7280137939453125, 0.7283834838867187, 0.7276124267578125, 0.7279022216796875, 0.7284910278320312, 0.7285155639648437, 0.728142822265625, 0.7280394287109375, 0.7274547119140625, 0.7279277954101563, 0.7275509643554687, 0.7277987670898437, 0.7278591918945313, 0.728226806640625, 0.7301795654296875, 0.726887451171875, 0.727372802734375, 0.7274393310546875, 0.7276984252929688, 0.72789404296875, 0.7279503173828125, 0.7277659912109375, 0.7282677612304688, 0.7285678100585937, 0.7279226684570312, 0.7286671142578125, 0.7283712158203125, 0.7282360229492187, 0.727741455078125, 0.7277588500976563, 0.7276482543945313, 0.727857177734375, 0.7283681030273438, 0.7278796997070313, 0.7283937377929688, 0.727920654296875, 0.7278295288085938, 0.728015869140625, 0.7276452026367187, 0.7281694946289062, 0.7286343383789062, 0.7274926147460937, 1.4987724609375, 0.7273533325195313, 0.727457763671875, 0.7278960571289063, 0.7272560424804687, 0.7277005004882813, 0.7277772827148438, 0.7281305541992188, 0.7275653076171875, 0.727499755859375, 0.7276851196289063, 0.7280271606445312, 0.7270768432617187, 0.7270553588867188, 0.7272847290039063, 0.7274874877929688, 0.7273748779296875, 0.727520263671875, 0.727920654296875, 0.7277373657226562, 0.7273338623046876, 0.728322021484375, 0.7278253784179688, 0.7282452392578125, 0.72855859375, 0.7283507080078125, 0.7274547119140625, 0.727931884765625, 0.7276277465820312, 0.7273011474609375, 0.7271905517578126, 0.727736328125, 0.7286302490234375, 0.728036376953125, 0.727710693359375, 0.7286405029296875, 0.728036376953125, 0.7283230590820312, 0.72854833984375, 0.7284182739257813, 0.7284367065429688, 0.729618408203125, 0.7277322387695313, 0.7279042358398438, 0.7280332641601562, 0.7283035888671875, 0.7280281372070313, 0.7276656494140625, 0.7277639770507812, 0.7281858520507812, 0.7287337036132813, 0.7282145385742187, 0.7282606201171875, 0.7274495849609375, 0.727203857421875, 0.7272243041992188, 0.727235595703125, 0.7272662963867188, 0.7274495849609375, 0.727531494140625, 0.7289282836914063, 0.7277404174804688, 0.7281520385742187, 1.498346435546875, 0.7277035522460937, 0.7284224243164062, 0.7275243530273438, 0.727741455078125, 0.727731201171875, 0.7279708251953125, 0.7275581665039063, 0.7280773315429687, 0.7281356811523437, 0.7283568725585937, 0.7276912841796875, 0.7285411987304687, 0.7277701416015625, 0.7284715576171875, 0.72749462890625, 0.7280516967773437, 0.7273963623046875, 0.7273779296875, 0.7273543701171875, 0.7274270629882813, 0.7274116821289063, 0.727689208984375, 0.7282974853515625, 0.7277066040039063, 0.7288258666992188, 0.72807421875, 0.727720947265625, 0.728501220703125, 0.7287817993164063, 0.728057861328125, 0.7277598876953125, 0.72751513671875, 0.7276728515625, 0.7286773681640625, 0.7274321899414062, 0.7279042358398438, 0.727920654296875, 0.727573486328125, 0.7276513061523437, 0.7273072509765625, 0.7275028686523437, 0.7276226806640625, 0.727731201171875, 0.7275745239257813, 0.727709716796875, 0.7274833984375, 0.7272919311523437, 0.7273953247070313, 0.7276328735351563, 0.7272529907226563, 0.7279380493164063, 0.7277659912109375, 0.7278837890625, 0.7274598388671875, 0.7278622436523438, 0.727636962890625, 0.7282718505859375, 0.7279830932617187, 0.7275888671875, 0.72747314453125, 0.7274137573242188, 0.7276810302734374, 1.500190673828125, 0.7274137573242188, 0.727794677734375, 0.72776806640625, 0.7275325317382813, 0.728131591796875, 0.7274035034179688, 0.7273656616210937, 0.727741455078125, 0.7269683227539062, 0.7274475708007813, 0.727498779296875, 0.7274158325195312, 0.7276226806640625, 0.7277803344726562, 0.7272796020507812, 0.7277967529296875, 0.7278120727539062, 0.7281182861328125, 0.7282227172851562, 0.729248779296875, 0.7279073486328125, 0.727394287109375, 0.7274772338867187, 0.7273768920898438, 0.7279462280273438, 0.7280506591796875, 0.7277485961914063, 0.7276800537109375, 0.7273492431640625, 0.7277352905273438, 0.7280148315429688, 0.7277557983398437, 0.7279503173828125, 0.72796875, 0.727677978515625, 0.7273717651367188, 0.7274014892578125, 0.7277393798828125, 0.7273564453125, 0.7273922729492187, 0.7276431274414062, 0.7273267211914063, 0.727183349609375, 0.7276964111328125, 0.7270574340820313, 0.7279892578125, 0.7274506225585937, 0.7283281860351563, 0.7276236572265625, 0.7274864501953126, 0.7277271118164063, 0.7274741821289062, 0.7276431274414062, 0.727984130859375, 0.7277352905273438, 0.727568359375, 0.7290419311523437, 0.7279063110351562, 0.7274669799804687, 0.7279380493164063, 0.7273656616210937, 0.727736328125, 1.4986966552734375, 0.7277783203125, 0.7279697875976563, 0.7273380126953125, 0.7279759521484375, 0.7274772338867187, 0.7276113891601562, 0.72751513671875, 0.727520263671875, 0.7276339111328125, 0.7279185791015625, 0.7278212890625, 0.7281500244140625, 0.7277086791992188, 0.7276032104492187, 0.7277926635742188, 0.7276728515625, 0.7275827026367188, 0.7275140991210938, 0.7280179443359375, 0.7281879272460937, 0.7278212890625, 0.727699462890625, 0.72736767578125, 0.7276728515625, 0.7279083251953125, 0.7273850708007813, 0.7275069580078125, 0.7274024658203125, 0.7273922729492187, 0.7274700927734375, 0.7275847778320312, 0.7282288818359375, 0.7278090209960938, 0.7279667358398437, 0.7283240966796874, 0.7274004516601562, 0.7275899047851563, 0.729038818359375, 0.728173583984375, 0.727530517578125, 0.7272581176757813, 0.727572509765625, 0.7277127685546875, 0.72736767578125, 0.7276503295898438, 0.7283455810546875, 0.72766259765625, 0.7285360717773437, 0.7277281494140625, 0.72745166015625, 0.7276943359375, 0.7278653564453125, 0.7278253784179688, 0.7273615112304688, 0.7273717651367188, 0.7274547119140625, 0.7275899047851563, 0.7274557495117188, 0.7279779663085938, 0.7282565307617187, 0.728158203125, 0.7288606567382813, 1.500291015625, 0.7285186767578125, 0.7282554931640625, 0.727962646484375, 0.72707275390625, 0.7270717163085938, 0.7278837890625, 0.7273738403320312, 0.7277557983398437, 0.7276339111328125, 0.728394775390625, 0.7282606201171875, 0.728369140625, 0.727878662109375, 0.7280169067382812, 0.727794677734375, 0.7282175903320313, 0.7275038452148438, 0.7273502807617187, 0.7271946411132812, 0.7275499267578125, 0.7277998046875, 0.7273502807617187, 0.7283394775390625, 0.7281551513671874, 0.7276728515625, 0.729017333984375, 0.7274383544921875, 0.7284019165039063, 0.7275847778320312, 0.727709716796875, 0.7274024658203125, 0.7274024658203125, 0.7272263793945313, 0.7275448608398437, 0.727203857421875, 0.7272734985351562, 0.7281172485351562, 0.72766259765625, 0.7277854614257813, 0.7278192749023438, 0.7277168579101563, 0.7283128051757812, 0.7274014892578125, 0.7279892578125, 0.7273103637695313, 0.7277905883789062, 0.72785302734375, 0.7278776245117188, 0.727984130859375, 0.7275591430664062, 0.7280732421875, 0.728015869140625, 0.7281817626953125, 0.7277322387695313, 0.72749365234375, 0.7281449584960937, 0.7275447387695313, 0.7287470092773437, 0.727857177734375, 0.727446533203125, 0.7272703857421875, 0.7274813232421875, 1.5015628662109375, 0.7280670776367187, 0.7274475708007813, 0.7279002075195312, 0.7276564331054688, 0.727930908203125, 0.7274649658203125, 0.7278028564453125, 0.7277875366210937, 0.7275980834960938, 0.7279329223632812, 0.7276553955078126, 0.7276932983398438, 0.7277240600585938, 0.7281275024414062, 0.7278305053710937, 0.7283988647460937, 0.727973876953125, 0.7275120849609376, 0.7283138427734375, 0.7280885620117188, 0.728431640625, 0.7279882202148438, 0.7285555419921875, 0.727773193359375, 0.7274137573242188, 0.727572509765625, 0.7277168579101563, 0.7279124755859375, 0.7280169067382812, 0.7274721069335938, 0.7271577758789063, 0.7273502807617187, 0.7277578125, 0.72791552734375, 0.7280302124023438, 0.7272263793945313, 0.727066650390625, 0.7275479125976563, 0.7274383544921875, 0.727099365234375, 0.7272929077148438, 0.7277578125, 0.728215576171875, 0.727783447265625, 0.7276656494140625, 0.72743115234375, 0.7274690551757812, 0.7273277587890625, 0.7281571655273438, 0.7275397338867188, 0.727414794921875, 0.7275867919921875, 0.7271116943359375, 0.727257080078125, 0.727667724609375, 0.7276851196289063, 0.7272703857421875, 0.7279493408203125, 0.7277250366210938, 0.7286610107421875, 0.7273246459960937, 0.7279380493164063, 1.50266162109375, 0.72736767578125, 0.7276441650390625, 0.727141357421875, 0.7274188842773438, 0.7278018798828125, 0.727625732421875, 0.727541748046875, 0.7280783081054687, 0.7277342529296875, 0.72876953125, 0.728369140625, 0.7288411865234375, 0.7279749145507812, 0.7283046264648437, 0.72797900390625, 0.7282718505859375, 0.7281930541992188, 0.728658935546875, 0.7289108276367188, 0.7287183227539062, 0.7283169555664063, 0.7286814575195313, 0.7278428344726563, 0.7285463256835938, 0.72789404296875, 0.72875830078125, 0.72804248046875, 0.7291146240234375, 0.7289763793945313, 0.7286661376953125, 0.728784912109375, 0.728363037109375, 0.7279595336914062, 0.7274700927734375, 0.7275233154296875, 0.7272816772460937, 0.7279974365234375, 0.7284090576171875, 0.72762060546875, 0.728300537109375, 0.727457763671875, 0.7274711303710938, 0.727183349609375, 0.7278960571289063, 0.7272171630859375, 0.7289682006835938, 0.7285933837890625, 0.7276564331054688, 0.7285442504882812, 0.7269273681640624, 0.72795751953125, 0.7275642700195313, 0.7277936401367188, 0.7290480346679687, 0.72764208984375, 0.727804931640625, 0.7280322265625, 0.7278858032226563, 0.727383056640625, 0.7281940307617187, 0.7273421020507812, 0.72789404296875, 1.502066650390625, 0.7280179443359375, 0.7290101928710937, 0.7292303466796874, 0.7280660400390625, 0.728395751953125, 0.72835791015625, 0.72821142578125, 0.7285330200195312, 0.7277250366210938, 0.728616943359375, 0.7284172973632812, 0.7285718994140625, 0.728131591796875, 0.7278837890625, 0.7286558837890625, 0.7279124755859375, 0.7291678466796875, 0.728326171875, 0.7273584594726562, 0.7271588134765625, 0.7275980834960938, 0.7273738403320312, 0.7279912719726562, 0.72772607421875, 0.7297515258789062, 0.7277824096679687, 0.7272632446289062, 0.7278909301757812, 0.7273318481445312, 0.7274424438476562, 0.7272509155273438, 0.7275796508789063, 0.7279483032226562, 0.7278059692382812, 0.7278919677734375, 0.7276881713867187, 0.7284940795898438, 0.7285985107421875, 0.7283128051757812, 0.7282175903320313, 0.7282974853515625, 0.728426513671875, 0.7279165649414062, 0.7284940795898438, 0.7287070922851563, 0.7288955078125, 0.7288780517578125, 0.7277035522460937, 0.72800048828125, 0.72765234375, 0.728056884765625, 0.7274495849609375, 0.7280240478515625, 0.727583740234375, 0.727520263671875, 0.7282698364257812, 0.7273543701171875, 0.7287091064453125, 0.7278960571289063, 0.7277035522460937, 0.7276973876953124, 0.727141357421875]",tokens/s,1.3533475452727455,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1469, in _autoset_attn_implementation cls._check_and_enable_flash_attn_2( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1560, in _check_and_enable_flash_attn_2 raise ValueError( ValueError: XGLMForCausalLM does not support Flash Attention 2.0 yet. Please request to add support where the model is hosted, on its model hub page: https://huggingface.co//tmp/tmpqd8xlz04/no_weights_model/discussions/new or in the Transformers GitHub repo: https://github.com/huggingface/transformers/issues/new ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1312.935936,1023.934464,0.0,377.48736,290.348032,s,10,0.6976366119384766,0.06976366119384766,0.0018590846735664541,0.0696058578491211,0.07176354598999024,0.07230553245544434,0.07273912162780762,"[0.07284751892089844, 0.06800761413574219, 0.06769139099121094, 0.06779033660888673, 0.07104889678955079, 0.07164310455322266, 0.07125389099121093, 0.06824649810791016, 0.07096521759033203, 0.06814214324951172]",tokens/s,3669.5321836488733,kWh,8.134236630148721e-07,4.4571909736688417e-07,2.1182893658638067e-06,3.377432126245563e-06,tokens/kWh,75797230.09402885,MB,1312.935936,1023.934464,0.0,377.48736,337.281536,s,10,43.73100244140625,4.373100244140625,0.07142248616314475,4.400765380859375,4.4402288085937505,4.445402490234375,4.449541435546875,"[4.35173583984375, 4.25024462890625, 4.26398681640625, 4.30877587890625, 4.41468994140625, 4.4390791015625, 4.43881396484375, 4.42625927734375, 4.3868408203125, 4.450576171875]",tokens/s,14.40625562709468,kWh,5.082961926562783e-05,2.7857636841584544e-05,0.0001254377571233551,0.00020412501323056748,tokens/kWh,308634.39518232364,,s,629,44.28504171752928,0.07040547172898139,0.008395184351578173,0.07026687622070313,0.07091015625,0.07132590332031251,0.13567237548828126,"[0.0723609619140625, 0.07140863800048829, 0.07176195526123047, 0.07163388824462891, 0.07094169616699218, 0.07094783782958984, 0.07148441314697265, 0.07121202850341797, 0.07107174682617187, 0.07111577606201172, 0.07114342498779297, 0.07076761627197266, 0.07108198547363281, 0.07081574249267578, 0.07077072143554687, 0.07092835235595703, 0.07097344207763671, 0.0714598388671875, 0.07095500946044922, 0.07107481384277343, 0.07127347564697266, 0.07087923431396484, 0.07049420928955077, 0.07076659393310547, 0.07170150756835937, 0.071552001953125, 0.0690360336303711, 0.06716006469726563, 0.06747545623779297, 0.0673433609008789, 0.06707405090332032, 0.06747135925292969, 0.06733312225341796, 0.06719999694824219, 0.06728806304931641, 0.0673095703125, 0.06710784149169922, 0.06706175994873047, 0.0676126708984375, 0.06733926391601562, 0.06688256072998047, 0.06733414459228515, 0.06721126556396484, 0.0674703369140625, 0.06733926391601562, 0.06750822448730469, 0.06778880310058594, 0.06814105224609375, 0.0675225601196289, 0.06693376159667969, 0.06731980895996094, 0.06751334381103516, 0.07044198608398437, 0.0684615707397461, 0.0671098861694336, 0.06701465606689454, 0.06718463897705078, 0.06769561767578125, 0.06716422271728516, 0.06652819061279297, 0.06804582214355469, 0.06770995330810547, 0.13573631286621093, 0.0672204818725586, 0.06833356475830078, 0.06807555389404298, 0.06771196746826172, 0.06805197143554688, 0.06715392303466797, 0.06708428955078125, 0.06746112060546874, 0.06681702423095703, 0.06697062683105469, 0.06744166564941406, 0.06732288360595703, 0.06716006469726563, 0.06725325012207031, 0.06715602874755859, 0.06704019165039063, 0.06741094207763672, 0.06709555053710937, 0.06650572967529297, 0.06745292663574219, 0.06664704132080078, 0.06701261138916016, 0.06722150421142578, 0.06713855743408204, 0.0671098861694336, 0.06744268798828125, 0.06728704071044922, 0.0696258544921875, 0.06788813018798828, 0.06791372680664062, 0.06849024200439453, 0.06736895751953124, 0.06989209747314454, 0.06821990203857421, 0.06771097564697266, 0.06738432312011719, 0.06751948547363282, 0.06726348876953125, 0.06710784149169922, 0.06749183654785157, 0.06747545623779297, 0.06864179229736328, 0.0675563507080078, 0.06683135986328125, 0.06710681915283204, 0.06729011535644532, 0.06674534606933594, 0.06728089904785156, 0.06722354888916016, 0.06724607849121093, 0.06693990325927734, 0.0674703369140625, 0.06723481750488282, 0.06712223815917968, 0.06780818939208984, 0.06794751739501953, 0.06755840301513671, 0.0675758056640625, 0.06719692993164063, 0.0672573471069336, 0.06723481750488282, 0.06736589050292968, 0.13550796508789062, 0.06692147064208985, 0.06734950256347656, 0.07442227172851562, 0.0679557113647461, 0.06727680206298828, 0.06684569549560547, 0.06775091552734375, 0.06815436553955079, 0.06903807830810547, 0.06718975830078125, 0.06693376159667969, 0.06734031677246094, 0.06811030578613281, 0.06972621154785157, 0.06792601776123047, 0.06736486053466798, 0.06915481567382813, 0.06836736297607422, 0.06893875122070313, 0.07033753967285156, 0.06752665710449218, 0.0672194595336914, 0.06725325012207031, 0.06720822143554687, 0.06680572509765625, 0.06687026977539062, 0.0672143325805664, 0.06730445098876953, 0.06715904235839844, 0.06759014129638671, 0.06702899169921875, 0.06747443389892578, 0.06825984191894531, 0.0673280029296875, 0.06705152130126953, 0.06734130859375, 0.06769664001464844, 0.06806425476074218, 0.06706380462646484, 0.06730137634277343, 0.0671825942993164, 0.0676157455444336, 0.06711398315429687, 0.06727577972412109, 0.06933606719970703, 0.06894182586669922, 0.06846259307861328, 0.06729523468017579, 0.06725836944580078, 0.06733312225341796, 0.0672511978149414, 0.0672890853881836, 0.06693376159667969, 0.0676341781616211, 0.06666957092285156, 0.06659993743896485, 0.0665722885131836, 0.0667514877319336, 0.0672368621826172, 0.06741712188720703, 0.06712726593017578, 0.06705049896240234, 0.1353912353515625, 0.0671488037109375, 0.06749900817871093, 0.0674703369140625, 0.06737715148925781, 0.0673064956665039, 0.06735257720947266, 0.06739974212646484, 0.0676075210571289, 0.07006716918945312, 0.06824960327148437, 0.06842777252197266, 0.06737100982666015, 0.06794239807128906, 0.06855276489257812, 0.0674927978515625, 0.06724813079833984, 0.06728294372558594, 0.06814617919921875, 0.06813286590576172, 0.06723583984375, 0.06725222778320313, 0.06753689575195312, 0.06841139221191406, 0.06750617980957031, 0.06819737243652343, 0.06738438415527344, 0.06882195281982421, 0.06757997131347657, 0.06839907073974609, 0.06708633422851562, 0.06791574096679688, 0.06755020904541016, 0.06742527770996094, 0.06748569488525391, 0.066587646484375, 0.06756761932373047, 0.06675154876708984, 0.06733510589599609, 0.06650163269042969, 0.06747647857666016, 0.06739250946044922, 0.06733824157714843, 0.07072870635986328, 0.07021260833740234, 0.07095807647705078, 0.07044915008544922, 0.07039590454101563, 0.07007437133789063, 0.06753177642822265, 0.06737203216552734, 0.07054438018798828, 0.06973747253417968, 0.07040415954589843, 0.07051052856445313, 0.0710830078125, 0.0708136978149414, 0.06919782257080079, 0.06926131439208984, 0.06997299194335938, 0.07050035095214843, 0.0701286392211914, 0.07048499298095703, 0.14131712341308594, 0.06722457885742188, 0.06749183654785157, 0.06760550689697266, 0.07033446502685547, 0.07015731048583984, 0.07011225891113282, 0.06978765106201172, 0.0705269775390625, 0.06983372497558593, 0.06974156951904296, 0.07031603240966797, 0.07043583679199218, 0.0705802230834961, 0.07040614318847656, 0.07013069152832031, 0.0699504623413086, 0.07051058959960937, 0.07024332427978516, 0.06977126312255859, 0.06987673950195313, 0.07046553802490234, 0.07214806365966797, 0.07040809631347657, 0.06932179260253907, 0.07022994995117188, 0.07043071746826172, 0.07018402862548828, 0.07030262756347656, 0.07083622741699219, 0.07088333129882812, 0.06744166564941406, 0.06735155487060547, 0.06759731292724609, 0.06742733001708984, 0.0693780517578125, 0.07071334075927735, 0.07068876647949218, 0.07128985595703125, 0.07055465698242187, 0.07048291015625, 0.07026483154296875, 0.07063346862792969, 0.07024947357177734, 0.0704563217163086, 0.07028530883789062, 0.0700212173461914, 0.07084226989746094, 0.0705269775390625, 0.07041228485107422, 0.07060889434814453, 0.07039590454101563, 0.07050342559814453, 0.0704686050415039, 0.07056486511230468, 0.07071641540527343, 0.07057920074462891, 0.07043276977539062, 0.07038159942626954, 0.07048802947998047, 0.06970470428466796, 0.07084134674072265, 0.07183257293701172, 0.14264422607421876, 0.07059661102294922, 0.0704337921142578, 0.07088127899169921, 0.07030681610107421, 0.07067340850830078, 0.07019110107421875, 0.06985215759277344, 0.07026483154296875, 0.07035391998291016, 0.0703815689086914, 0.07054847717285156, 0.07035903930664063, 0.07019519805908203, 0.0714567642211914, 0.0703662109375, 0.07029964447021485, 0.07025766754150391, 0.07012351989746093, 0.07053107452392578, 0.07036518096923829, 0.07038873291015625, 0.07040204620361327, 0.07045836639404297, 0.07014093017578125, 0.07181926727294922, 0.07061196899414063, 0.07031705474853515, 0.07053209686279296, 0.07042457580566407, 0.07070719909667969, 0.07003033447265625, 0.0699525146484375, 0.07056486511230468, 0.07099187469482422, 0.07034060668945312, 0.07056281280517578, 0.07041228485107422, 0.070940673828125, 0.07032319641113281, 0.0704901123046875, 0.07044915008544922, 0.07032627105712891, 0.07004057312011719, 0.07051776123046875, 0.07030169677734376, 0.07029043579101563, 0.07063859558105469, 0.07095500946044922, 0.07047993469238281, 0.0705125732421875, 0.07045734405517579, 0.07045222473144531, 0.07037542724609375, 0.07044096374511719, 0.0705638427734375, 0.07061196899414063, 0.07037747192382812, 0.07041024017333984, 0.06980403137207031, 0.07040614318847656, 0.07026585388183594, 0.07064780426025391, 0.14216192626953125, 0.07040102386474609, 0.07040819549560547, 0.07026483154296875, 0.07061504364013672, 0.07052496337890625, 0.07054332733154296, 0.07030989074707031, 0.07011020660400391, 0.07049727630615234, 0.07108812713623047, 0.07056179046630859, 0.07050444793701172, 0.07025971221923828, 0.07032217407226563, 0.07038566589355469, 0.07023513793945313, 0.07034368133544922, 0.07049625396728515, 0.07012556457519531, 0.07023721313476562, 0.06932579040527344, 0.07020543670654297, 0.07085977935791016, 0.07015219116210937, 0.07016242980957031, 0.07007129669189453, 0.07072870635986328, 0.07037542724609375, 0.07045836639404297, 0.07033446502685547, 0.07036313629150391, 0.07039180755615235, 0.07009587097167969, 0.07063346862792969, 0.0701839370727539, 0.07108812713623047, 0.07090892791748046, 0.07049727630615234, 0.07038259124755859, 0.07044403076171875, 0.0706519012451172, 0.070761474609375, 0.0694466552734375, 0.0697528305053711, 0.07026380920410157, 0.07064268493652344, 0.0705433578491211, 0.0703477783203125, 0.07133491516113281, 0.0704368667602539, 0.07131238555908204, 0.07170355224609375, 0.0705771484375, 0.07050752258300781, 0.07046451568603515, 0.07079840087890625, 0.07022892761230469, 0.07035084533691406, 0.07033344268798829, 0.0702402572631836, 0.07078604888916015, 0.07056896209716797, 0.13596263122558594, 0.06696959686279297, 0.06721331024169921, 0.06699417877197265, 0.06940467071533203, 0.07078604888916015, 0.07069593811035156, 0.07035187530517578, 0.07053107452392578, 0.07046246337890626, 0.0706344985961914, 0.07024642944335938, 0.07162467193603515, 0.07080652618408204, 0.07045529937744141, 0.07055359649658204, 0.0706519012451172, 0.07040102386474609, 0.06990335845947265, 0.07026080322265625, 0.07056377410888671, 0.07085670471191406, 0.06976921844482421, 0.07021260833740234, 0.06979583740234375, 0.07011634826660157, 0.0700426254272461, 0.07040716552734375, 0.07058329772949219, 0.0699658203125, 0.07026892852783204, 0.0703272933959961, 0.07030989074707031, 0.07069593811035156, 0.07077683258056641, 0.07038259124755859, 0.07024230194091798, 0.07033548736572266, 0.07012454223632812, 0.07299378967285156, 0.07067033386230469, 0.07059865570068359, 0.07034368133544922, 0.07035699462890625, 0.07052902221679687, 0.070687744140625, 0.06977433776855468, 0.07029862213134766, 0.0704901123046875, 0.07047270202636718, 0.07048191833496094, 0.07052799987792968, 0.0706170883178711, 0.07022182464599609, 0.0703272933959961, 0.070255615234375, 0.07052082824707032, 0.07059967803955078, 0.07068160247802735, 0.0705054702758789, 0.07004364776611328, 0.07048089599609375, 0.07136255645751953, 0.14220700073242187, 0.07030985260009766, 0.07026892852783204, 0.0702033920288086, 0.07120588684082031, 0.07035289764404297, 0.07038361358642578, 0.07045222473144531, 0.070181884765625, 0.07042969512939454, 0.07047577667236328, 0.07076150512695313, 0.07077168273925781, 0.07013990020751953, 0.07032422637939453, 0.07050752258300781, 0.07027200317382812, 0.07128268432617188, 0.070614013671875, 0.07047475433349609, 0.07060889434814453, 0.07019827270507813, 0.07051264190673828, 0.07065702056884765, 0.07134515380859376, 0.07106253051757813, 0.07054950714111329, 0.07042253112792969, 0.07038873291015625, 0.0702740478515625, 0.07026687622070313, 0.06861004638671875, 0.0671272964477539, 0.06711507415771484, 0.0671866226196289, 0.06729523468017579, 0.06710169219970703, 0.06714166259765625, 0.06752764892578125, 0.06734130859375, 0.06720921325683593, 0.06747135925292969, 0.06729933166503907, 0.0674150390625, 0.06712422180175781, 0.06744882965087891, 0.06737612915039062, 0.06713958740234376, 0.06726656341552735, 0.0685823974609375, 0.07029043579101563, 0.07070515441894532, 0.0720742416381836, 0.07064166259765625, 0.07065087890625, 0.07042559814453125, 0.07116806030273437, 0.07063648223876953, 0.07048703765869141, 0.07075430297851562, 0.07056896209716797, 0.07044608306884766, 0.07033036804199219, 0.13839053344726562, 0.07068978881835937, 0.07101030731201172, 0.07077279663085938, 0.07043782043457031, 0.070614013671875, 0.07037542724609375, 0.07067443084716797, 0.070181884765625, 0.07043788909912109, 0.07035391998291016, 0.0702003173828125, 0.07099903869628907, 0.07056690979003906, 0.07074406433105469, 0.07023308563232422, 0.07023616027832032, 0.0702208023071289, 0.070150146484375, 0.0707799072265625, 0.07061199951171875, 0.0703927993774414, 0.07016448211669922, 0.07032115173339844, 0.07010406494140625, 0.07089151763916016, 0.07061913299560547, 0.07181926727294922, 0.07085977935791016, 0.07089663696289063, 0.07025459289550781, 0.07071952056884766, 0.07063139343261719, 0.07043276977539062, 0.07035391998291016, 0.07057920074462891, 0.07042867279052735, 0.07073075103759766, 0.07066726684570312, 0.07058124542236328, 0.07049215698242188, 0.07038668823242188, 0.07035903930664063, 0.07047987365722656, 0.07088025665283203, 0.07053414154052734, 0.07050342559814453, 0.07065395355224609, 0.07077375793457032, 0.07083213043212891, 0.070761474609375, 0.07080242919921875, 0.07068364715576173, 0.0717496337890625, 0.07135539245605468, 0.07119155120849609, 0.07112703704833985, 0.07091506958007812, 0.07129190063476562, 0.07068876647949218, 0.07067545318603516, 0.07053619384765625, 0.07090688323974609]",tokens/s,14.20344151445213,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,2034.978816,5539.10272,0.0,4892.655616,4542.610432,s,10,5.622600402832031,0.5622600402832031,0.0013230028627895694,0.5620163879394531,0.5633294921875001,0.564493603515625,0.565424892578125,"[0.56565771484375, 0.5620866088867188, 0.5614910888671875, 0.5609406127929687, 0.5618070678710938, 0.5607069091796875, 0.5619461669921875, 0.5621692504882813, 0.5627241821289063, 0.56307080078125]",tokens/s,455.30534211724546,kWh,6.621943920114895e-06,3.6285156736691226e-06,3.075088879823785e-05,4.100134839202187e-05,tokens/kWh,6243697.098747441,MB,2035.28192,5539.10272,0.0,4892.655616,4726.279168,s,10,331.3280078125,33.13280078125,0.008409894982587748,33.12982421875,33.142355468750004,33.145203125,33.14748125,"[33.14805078125, 33.124625, 33.1231640625, 33.1260859375, 33.14172265625, 33.1400703125, 33.12555859375, 33.12609375, 33.13908203125, 33.1335546875]",tokens/s,1.9014390125344305,kWh,0.00039121345305516403,0.00021441890828907464,0.0018001254370127661,0.0024057577983570048,tokens/kWh,26187.174803309546,,s,629,335.85907128906234,0.5339571880589229,0.06674281532897068,0.5258792724609375,0.5263243408203125,0.5265016845703125,1.0872363134765626,"[0.5258577880859375, 0.5256417236328125, 0.5264219970703125, 0.5263196411132812, 0.5257113647460937, 0.5258464965820312, 0.525486083984375, 0.5262151489257813, 0.5262940063476562, 0.52614453125, 0.5258035278320312, 0.52607080078125, 0.5257379760742188, 0.5260421142578126, 0.5259489135742188, 0.526060546875, 0.5258331909179688, 0.5260062866210937, 0.5258424072265625, 0.52600732421875, 0.5258045654296875, 0.5261404418945312, 0.5261957397460938, 0.5260748901367187, 0.5259468994140625, 0.5261731567382812, 0.5258363037109375, 0.5262284545898438, 0.5258250122070313, 0.5260421142578126, 0.526023681640625, 0.526244873046875, 0.52615576171875, 0.5261332397460937, 0.52611376953125, 0.5259898681640625, 0.526482421875, 0.526271484375, 0.5264271240234375, 0.5263646850585938, 0.5265366821289063, 0.52632373046875, 0.5263370361328125, 0.5259735107421875, 0.5261414184570312, 0.526107666015625, 0.5262571411132813, 0.5263308715820313, 0.5263267822265625, 0.5261199340820313, 0.52626025390625, 0.5261434936523437, 0.526482421875, 0.5264199829101562, 0.5262622680664063, 0.5264076538085938, 0.5264937133789063, 0.5264998168945313, 0.5264230346679688, 0.5268643798828125, 0.5268623657226562, 0.526497802734375, 1.0878065185546875, 0.5256908569335937, 0.5257564086914063, 0.5259059448242187, 0.5258055419921875, 0.5255567626953125, 0.52566015625, 0.5258875122070312, 0.526002197265625, 0.5259949951171875, 0.5256663208007812, 0.5261107177734375, 0.5259857788085938, 0.5258189086914062, 0.525576171875, 0.525970458984375, 0.5256294555664063, 0.5262294921875, 0.5257728271484375, 0.5257666625976563, 0.525465576171875, 0.5258946533203125, 0.5259468994140625, 0.5260144653320312, 0.5255526123046875, 0.5257932739257812, 0.5255004272460938, 0.5259530029296875, 0.52562841796875, 0.5257584838867188, 0.5257738037109375, 0.525844482421875, 0.5255690307617188, 0.5256724243164063, 0.5258956909179687, 0.5257461547851563, 0.5255751953125, 0.5257482299804688, 0.5255536499023438, 0.5257677001953125, 0.5256314697265625, 0.5257533569335937, 0.5254666137695313, 0.52566015625, 0.5253980102539062, 0.5257482299804688, 0.5259049072265625, 0.5260809936523437, 0.5258383178710937, 0.5262018432617187, 0.5256294555664063, 0.525750244140625, 0.525849609375, 0.5261281127929688, 0.5256796264648438, 0.5259478759765625, 0.5256181640625, 0.5258219604492187, 0.5256539916992188, 0.525886474609375, 0.5259243774414063, 0.5260851440429688, 0.5258331909179688, 1.087382568359375, 0.526060546875, 0.5257789306640624, 0.5258936157226562, 0.5258137817382813, 0.5256263427734374, 0.525675537109375, 0.5257164916992187, 0.525739013671875, 0.525718505859375, 0.5260646362304687, 0.5256406860351562, 0.5256908569335937, 0.5256744995117187, 0.525433837890625, 0.525717529296875, 0.5254287109375, 0.5256908569335937, 0.5254031372070312, 0.5255137329101562, 0.5256263427734374, 0.525770751953125, 0.5254891357421875, 0.5259683837890625, 0.5255454711914063, 0.5256990966796875, 0.5255372924804688, 0.5261035766601563, 0.5256632080078125, 0.5258690795898437, 0.5257625732421874, 0.525728759765625, 0.5260155029296875, 0.5258189086914062, 0.5256539916992188, 0.5256417236328125, 0.5256273803710938, 0.5258055419921875, 0.5257083129882812, 0.525971435546875, 0.5258916015625, 0.52600830078125, 0.5256673583984375, 0.5257686767578125, 0.5254942626953125, 0.52560791015625, 0.5257482299804688, 0.5258803100585937, 0.5259755249023438, 0.5260676879882813, 0.5256151123046875, 0.5258536987304687, 0.5258526611328125, 0.5258741455078125, 0.5255966796875, 0.5259386596679687, 0.5257666625976563, 0.5259059448242187, 0.5259223022460937, 0.5260523681640625, 0.52583935546875, 0.5260933227539063, 0.5256048583984375, 1.0868602294921874, 0.5259120483398437, 0.5255465087890625, 0.5258884887695312, 0.5255741577148437, 0.525613037109375, 0.5256007690429687, 0.5258076171875, 0.5256734619140625, 0.5257328491210937, 0.5254573974609374, 0.525791259765625, 0.525454345703125, 0.5258527221679687, 0.5255577392578125, 0.525912109375, 0.5255772094726563, 0.525727783203125, 0.5255833129882812, 0.5263431396484375, 0.5257984008789063, 0.525792236328125, 0.525896728515625, 0.52569189453125, 0.5255977172851563, 0.52600732421875, 0.525549560546875, 0.5257738037109375, 0.5256058959960938, 0.5258147583007813, 0.52556494140625, 0.5260534057617188, 0.5259837646484375, 0.5260646362304687, 0.5258803100585937, 0.5260369873046875, 0.5255034790039063, 0.5258485717773438, 0.5255608520507813, 0.52600830078125, 0.52562841796875, 0.52608203125, 0.5257349243164062, 0.5257574462890625, 0.5256406860351562, 0.5257717895507813, 0.5257625732421874, 0.5258875122070312, 0.525686767578125, 0.5260492553710937, 0.5258639526367187, 0.5261209716796875, 0.5259990844726562, 0.5260728149414062, 0.5258884887695312, 0.5258946533203125, 0.52564990234375, 0.5258731689453126, 0.5256837158203125, 0.5259202270507812, 0.5259827270507812, 0.5264097290039063, 0.526213134765625, 1.0886195068359374, 0.5266964721679688, 0.5265223388671875, 0.52691455078125, 0.5266104125976563, 0.5267548217773438, 0.5260175170898438, 0.5263544311523437, 0.5261270751953125, 0.5263964233398437, 0.5262694702148437, 0.5260482788085937, 0.5259909057617187, 0.5260492553710937, 0.5259929809570313, 0.5264066772460938, 0.525787109375, 0.5257963256835938, 0.5255126953125, 0.5257431030273437, 0.5255669555664062, 0.5256345825195312, 0.5254779052734375, 0.5258198852539062, 0.5258219604492187, 0.5260687255859375, 0.5260800170898438, 0.525802490234375, 0.5258506469726563, 0.525781005859375, 0.5256857299804687, 0.5258485717773438, 0.52577587890625, 0.5258373413085937, 0.5261895751953125, 0.52682958984375, 0.526614501953125, 0.52650390625, 0.5255485229492187, 0.5258424072265625, 0.525549560546875, 0.5258352661132812, 0.5255639038085937, 0.525822998046875, 0.5257195434570312, 0.525739013671875, 0.5257267456054687, 0.5263533935546875, 0.5261547241210938, 0.5263810424804688, 0.5262264404296875, 0.526551025390625, 0.5261895751953125, 0.5266165771484375, 0.5261998291015625, 0.5261404418945312, 0.5261486206054687, 0.5264189453125, 0.52577587890625, 0.5259304809570312, 0.5256898803710938, 0.5258782958984375, 0.5257636108398438, 1.08657666015625, 0.5256980590820313, 0.52586083984375, 0.5259304809570312, 0.5256939697265625, 0.5258782958984375, 0.5258782958984375, 0.52608203125, 0.5260462036132812, 0.5260513305664063, 0.525549560546875, 0.5258875122070312, 0.5257984008789063, 0.5262018432617187, 0.5257328491210937, 0.5260298461914062, 0.5255608520507813, 0.5259683837890625, 0.52613427734375, 0.52609228515625, 0.5256028442382813, 0.52607080078125, 0.5256304931640625, 0.5257891845703125, 0.525643798828125, 0.5260062866210937, 0.5256611938476563, 0.5261844482421875, 0.5262510375976562, 0.5261025390625, 0.5262673950195312, 0.5262622680664063, 0.5260779418945313, 0.5261721801757813, 0.5261353149414062, 0.5261588745117187, 0.5259642944335937, 0.52661865234375, 0.52625, 0.5263790283203125, 0.5262643432617188, 0.5265131225585937, 0.5259069213867188, 0.526286865234375, 0.5260626220703125, 0.5262888793945313, 0.52619775390625, 0.526224365234375, 0.5260155029296875, 0.526613525390625, 0.526581787109375, 0.5259366455078125, 0.5256642456054688, 0.5257686767578125, 0.5256632080078125, 0.5258198852539062, 0.5258588256835938, 0.5261209716796875, 0.525928466796875, 0.5265029296875, 0.5259878540039062, 0.5262069702148438, 0.5264834594726563, 1.088301025390625, 0.5262653198242188, 0.5256724243164063, 0.5255659790039062, 0.525475830078125, 0.5257636108398438, 0.5255300903320312, 0.5257246704101562, 0.5256058959960938, 0.5256345825195312, 0.5254676513671875, 0.5258137817382813, 0.5256581420898437, 0.5258731689453126, 0.526055419921875, 0.526497802734375, 0.5257297973632813, 0.5260103759765625, 0.5256406860351562, 0.5259428100585938, 0.5259376831054687, 0.5261486206054687, 0.5255321655273437, 0.5259735107421875, 0.5258168334960938, 0.52605029296875, 0.5256571044921875, 0.5257420654296875, 0.5257471923828125, 0.525717529296875, 0.525749267578125, 0.5258168334960938, 0.5255885009765625, 0.5259366455078125, 0.5256325073242187, 0.5258721313476562, 0.5255454711914063, 0.5259120483398437, 0.5255874633789063, 0.5258823852539063, 0.5260584716796874, 0.5260093383789063, 0.525638671875, 0.5257984008789063, 0.5254819946289062, 0.5257584838867188, 0.5255413818359375, 0.5256263427734374, 0.5254993896484375, 0.5260482788085937, 0.5255536499023438, 0.5256744995117187, 0.5257471923828125, 0.5260912475585937, 0.5257963256835938, 0.526045166015625, 0.5258997802734375, 0.5258854370117187, 0.5258956909179687, 0.526097412109375, 0.5258956909179687, 0.52596630859375, 0.525486083984375, 1.0877613525390626, 0.525769775390625, 0.5254829711914063, 0.5259612426757813, 0.525929443359375, 0.5262827758789063, 0.5257748413085938, 0.5258516235351562, 0.5254768676757813, 0.5256775512695312, 0.5254871215820313, 0.5258475341796875, 0.525770751953125, 0.5260123901367187, 0.5260103759765625, 0.52575537109375, 0.525591552734375, 0.5260431518554688, 0.5255874633789063, 0.5257769165039062, 0.52560693359375, 0.5258577880859375, 0.5256089477539062, 0.5257195434570312, 0.52571337890625, 0.5260534057617188, 0.5259540405273437, 0.526023681640625, 0.5256468505859375, 0.5257000732421875, 0.5256878051757813, 0.5258270874023437, 0.5255669555664062, 0.5259325561523438, 0.5256478881835938, 0.5259059448242187, 0.5256539916992188, 0.5259171752929688, 0.525533203125, 0.5258956909179687, 0.5256396484375, 0.5259151611328124, 0.5259089965820313, 0.5258045654296875, 0.525470703125, 0.526087158203125, 0.525570068359375, 0.526012451171875, 0.5258239135742188, 0.526266357421875, 0.5257000732421875, 0.5258916015625, 0.5262540893554688, 0.525970458984375, 0.5258168334960938, 0.5259356079101563, 0.5257216186523438, 0.5257953491210937, 0.5261250610351562, 0.5259458618164062, 0.5255300903320312, 0.5259765625, 0.5258004760742188, 1.0886134033203125, 0.52569189453125, 0.5255536499023438, 0.5258383178710937, 0.5257799682617188, 0.5257584838867188, 0.5256837158203125, 0.5262305297851563, 0.5256099853515624, 0.525781005859375, 0.5258270874023437, 0.5262387084960938, 0.5258424072265625, 0.5259765625, 0.5260114135742188, 0.5261588745117187, 0.5259622192382812, 0.5260318603515625, 0.5257083129882812, 0.526166015625, 0.5256325073242187, 0.52587109375, 0.5258260498046875, 0.5261178588867188, 0.5256458129882813, 0.525897705078125, 0.5261752319335937, 0.5257431030273437, 0.5257799682617188, 0.526298095703125, 0.5260574951171875, 0.52619775390625, 0.52611279296875, 0.526271484375, 0.5262151489257813, 0.5269708862304687, 0.5263012084960937, 0.52657666015625, 0.5264937133789063, 0.5267752685546875, 0.5261527099609375, 0.5262315673828125, 0.5261690673828125, 0.5258997802734375, 0.5257728271484375, 0.5265807495117187, 0.525681640625, 0.5258721313476562, 0.5259049072265625, 0.525970458984375, 0.525633544921875, 0.5261414184570312, 0.5260697631835938, 0.5258782958984375, 0.5258792724609375, 0.52611376953125, 0.5257523193359375, 0.5261752319335937, 0.5259192504882813, 0.52607080078125, 0.5259898681640625, 0.5263278198242187, 0.5262356567382812, 1.089292236328125, 0.5257205810546876, 0.5256959838867188, 0.5258516235351562, 0.525675537109375, 0.525707275390625, 0.525644775390625, 0.5258741455078125, 0.5256427612304687, 0.52611376953125, 0.5256857299804687, 0.5256929321289062, 0.525865966796875, 0.5259274291992188, 0.5254942626953125, 0.52583935546875, 0.5259089965820313, 0.5260984497070312, 0.5255659790039062, 0.52596630859375, 0.5255341796875, 0.5257932739257812, 0.5256161499023437, 0.5257615356445312, 0.5255608520507813, 0.5256908569335937, 0.5256058959960938, 0.5258137817382813, 0.525822998046875, 0.5260390625, 0.5258168334960938, 0.526033935546875, 0.5262326049804688, 0.5259100341796875, 0.5258065795898438, 0.52625, 0.5258567504882813, 0.52642919921875, 0.5261015014648438, 0.525955078125, 0.5256345825195312, 0.5259990844726562, 0.5259017944335938, 0.525970458984375, 0.5258311767578125, 0.5259765625, 0.5258895263671876, 0.5263216552734375, 0.5260062866210937, 0.5263206176757812, 0.5261220092773438, 0.5258639526367187, 0.5263790283203125, 0.52634521484375, 0.52650390625, 0.5263104248046875, 0.5256294555664063, 0.5261782836914063, 0.5257164916992187, 0.526087158203125, 0.526256103515625, 0.52645068359375, 0.5261045532226563]",tokens/s,1.8728093232254581,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,1608.957952,5448.925184,0.0,4802.47808,4489.12128,s,10,5.066485656738281,0.5066485656738281,0.0011948895817140703,0.5068220977783202,0.5079840667724609,0.5080279983520508,0.5080631436157227,"[0.5075723266601563, 0.5061219482421875, 0.504515380859375, 0.5053372497558594, 0.5061441345214843, 0.5055963745117188, 0.5075000610351562, 0.5076519470214844, 0.5080719299316406, 0.5079743041992187]",tokens/s,505.2812093912223,kWh,5.964234835571714e-06,3.2679267622643234e-06,2.7223952334698433e-05,3.6456113932534466e-05,tokens/kWh,7022141.758547072,MB,1610.637312,5448.925184,0.0,4802.47808,4557.793792,s,10,299.1010234375,29.910102343749998,0.01023537902845777,29.913578125,29.921135546875,29.9219037109375,29.9225182421875,"[29.899591796875, 29.901322265625, 29.913013671875, 29.890712890625, 29.920396484375, 29.922671875, 29.92096484375, 29.9152421875, 29.90296484375, 29.914142578125]",tokens/s,2.106311749654192,kWh,0.00035296389131082433,0.00019345485010443554,0.0015722178549955057,0.0021186365964107655,tokens/kWh,29736.104864198915,,s,629,303.18425177001956,0.4820099392210167,0.06023141623810366,0.4746004333496094,0.4757639221191406,0.4761423828125,0.98077423828125,"[0.474365966796875, 0.47431475830078124, 0.4748533630371094, 0.47396148681640626, 0.47435162353515625, 0.47414068603515624, 0.47463320922851565, 0.47518923950195313, 0.47532952880859375, 0.4745502624511719, 0.4740556945800781, 0.47391131591796876, 0.47444989013671873, 0.47430245971679685, 0.4739420166015625, 0.4737402954101563, 0.473987060546875, 0.47486770629882813, 0.4739993591308594, 0.475114501953125, 0.47402597045898437, 0.4752353210449219, 0.47403109741210936, 0.4739563598632813, 0.4750960693359375, 0.4755916748046875, 0.4742686767578125, 0.47484210205078126, 0.4754851989746094, 0.4751022033691406, 0.4755619812011719, 0.4745973815917969, 0.47523736572265624, 0.474829833984375, 0.474850341796875, 0.47421026611328126, 0.4759234619140625, 0.47534796142578123, 0.4750888977050781, 0.4741591186523437, 0.4745441284179687, 0.473744384765625, 0.4741099548339844, 0.473807861328125, 0.47429119873046877, 0.4740597839355469, 0.4742881164550781, 0.47400347900390627, 0.4744253234863281, 0.47619073486328123, 0.4757626953125, 0.4742860717773438, 0.47489535522460935, 0.47489950561523436, 0.47460345458984377, 0.4742686767578125, 0.4739921875, 0.4739420166015625, 0.47433624267578123, 0.4748810119628906, 0.4744110107421875, 0.47424615478515625, 0.980806640625, 0.4736993408203125, 0.47429733276367186, 0.4741478271484375, 0.4741099548339844, 0.47420416259765624, 0.4742000732421875, 0.47553228759765626, 0.4755077209472656, 0.4747817077636719, 0.47485235595703124, 0.4747202453613281, 0.4752384033203125, 0.4742318115234375, 0.4740137023925781, 0.4751011962890625, 0.47453387451171875, 0.4759981994628906, 0.4747898864746094, 0.4742266845703125, 0.47406185913085935, 0.4743239440917969, 0.4744366149902344, 0.47420416259765624, 0.47499981689453125, 0.47433114624023437, 0.47446322631835935, 0.47578317260742187, 0.47489739990234375, 0.4754503784179688, 0.4746977233886719, 0.47437005615234373, 0.4743680114746094, 0.47590194702148436, 0.47595211791992187, 0.47569818115234375, 0.474777587890625, 0.474113037109375, 0.4756346740722656, 0.4741652526855469, 0.47452056884765625, 0.4742778930664063, 0.474176513671875, 0.47388363647460935, 0.473754638671875, 0.47433624267578123, 0.474029052734375, 0.47412841796875, 0.4740433654785156, 0.47558758544921875, 0.4756910400390625, 0.4747693786621094, 0.47467111206054685, 0.4742625427246094, 0.4742799377441406, 0.473807861328125, 0.474387451171875, 0.47398092651367185, 0.4740495300292969, 0.47483187866210935, 0.4742236022949219, 0.4746342468261719, 0.4739717102050781, 0.98069091796875, 0.4739993591308594, 0.4743341979980469, 0.4740321350097656, 0.4741048278808594, 0.47378021240234375, 0.4756019287109375, 0.4752998352050781, 0.47476327514648436, 0.47476531982421877, 0.47458303833007814, 0.474603515625, 0.4745164794921875, 0.47447552490234374, 0.4745482177734375, 0.47400344848632814, 0.4754913330078125, 0.47445709228515626, 0.4743076171875, 0.4742041320800781, 0.47417855834960937, 0.4741171264648438, 0.47425332641601564, 0.47636785888671873, 0.4741949462890625, 0.4747110290527344, 0.4743659973144531, 0.47549435424804687, 0.47543499755859375, 0.4749854736328125, 0.4751646728515625, 0.4754985046386719, 0.4756643981933594, 0.475114501953125, 0.4750315551757813, 0.47576882934570314, 0.47484927368164065, 0.47597467041015623, 0.4747806701660156, 0.47479296875, 0.47538177490234373, 0.4749906005859375, 0.47488409423828126, 0.4746322021484375, 0.4749168701171875, 0.47477862548828126, 0.4759582824707031, 0.47688909912109373, 0.47617330932617186, 0.47571865844726563, 0.4744898681640625, 0.4741591186523437, 0.47446939086914064, 0.4745513000488281, 0.4750899047851562, 0.4740403137207031, 0.474461181640625, 0.4741929016113281, 0.47546676635742186, 0.47404339599609374, 0.474176513671875, 0.47427685546875, 0.47412841796875, 0.979821533203125, 0.4742574157714844, 0.4741734313964844, 0.47405874633789064, 0.47411508178710937, 0.47479910278320314, 0.476015625, 0.47458816528320313, 0.4742225952148437, 0.47418679809570313, 0.4743782043457031, 0.47417138671875, 0.4748810119628906, 0.47444174194335936, 0.4738058166503906, 0.4744478759765625, 0.47413861083984377, 0.47446221923828125, 0.47416116333007813, 0.47388262939453124, 0.47408843994140626, 0.47393484497070315, 0.4740771789550781, 0.47400857543945313, 0.47406695556640627, 0.4738518981933594, 0.4751022033691406, 0.4763402099609375, 0.47449191284179687, 0.4742512512207031, 0.4740157470703125, 0.4743352355957031, 0.47394509887695313, 0.473849853515625, 0.47430963134765625, 0.4738713684082031, 0.47423590087890627, 0.4743987121582031, 0.47423590087890627, 0.4738887634277344, 0.4744407043457031, 0.4742297668457031, 0.4740741271972656, 0.47423590087890627, 0.47649996948242185, 0.4742901611328125, 0.4741949462890625, 0.47464346313476563, 0.476073974609375, 0.47489022827148436, 0.4758292541503906, 0.47497113037109373, 0.47431890869140625, 0.4741651916503906, 0.4740843505859375, 0.47435162353515625, 0.47437310791015624, 0.4743291015625, 0.47532852172851564, 0.474313720703125, 0.474640380859375, 0.47469473266601564, 0.4747908630371094, 0.9814691772460937, 0.4747683715820312, 0.4747376708984375, 0.4743690185546875, 0.4749127807617187, 0.4760391540527344, 0.4749434814453125, 0.4753387451171875, 0.4744488830566406, 0.4743935852050781, 0.4743935852050781, 0.47374334716796873, 0.47430349731445315, 0.47411404418945313, 0.47499264526367185, 0.47437823486328123, 0.4740597839355469, 0.4755292053222656, 0.474429443359375, 0.4746004333496094, 0.47424716186523436, 0.4757176208496094, 0.474365966796875, 0.4744038391113281, 0.47409765625, 0.47461581420898435, 0.47587124633789063, 0.47467724609375, 0.47449700927734373, 0.4752076721191406, 0.47526400756835935, 0.4758815002441406, 0.4752086791992188, 0.4745318298339844, 0.4740362548828125, 0.47401980590820314, 0.474777587890625, 0.47447760009765627, 0.47450723266601563, 0.4745646057128906, 0.4748257141113281, 0.4746987609863281, 0.4746670227050781, 0.4746844177246094, 0.4750899047851562, 0.474967041015625, 0.4748011474609375, 0.47588555908203123, 0.47625112915039064, 0.4750325622558594, 0.4751790161132812, 0.47611801147460936, 0.4751523742675781, 0.4750223388671875, 0.4746065979003906, 0.4746875, 0.47461068725585936, 0.47841998291015625, 0.4753489990234375, 0.47590911865234375, 0.4750417785644531, 0.474998779296875, 0.47562240600585937, 0.9817487182617187, 0.47520870971679685, 0.4753449096679688, 0.47469158935546873, 0.4756121520996094, 0.47580160522460935, 0.47535000610351563, 0.47563983154296874, 0.47536431884765623, 0.4756459655761719, 0.4758149108886719, 0.47476327514648436, 0.47486669921875, 0.4744960021972656, 0.4750315551757813, 0.4745902099609375, 0.4752404479980469, 0.4754586181640625, 0.47481234741210937, 0.47426150512695314, 0.4745635986328125, 0.4747796630859375, 0.4742758483886719, 0.4749291381835937, 0.4753070068359375, 0.4757319641113281, 0.47559988403320314, 0.4744366149902344, 0.4746649475097656, 0.47449087524414063, 0.47499365234375, 0.47450009155273437, 0.47397579956054686, 0.47451034545898435, 0.47416116333007813, 0.4750878601074219, 0.47417752075195313, 0.47466085815429687, 0.4771686401367187, 0.4746567687988281, 0.47440997314453126, 0.4744816589355469, 0.4743618469238281, 0.47448269653320313, 0.47479705810546874, 0.47491070556640624, 0.47615179443359373, 0.4757442626953125, 0.4753827819824219, 0.4749906005859375, 0.4746219482421875, 0.4746925964355469, 0.4743485412597656, 0.4746967163085937, 0.47494552612304686, 0.4743270263671875, 0.47532647705078124, 0.47462603759765626, 0.4751585388183594, 0.4746270751953125, 0.47493734741210936, 0.47487387084960936, 0.4749823913574219, 0.9828372192382813, 0.4750807189941406, 0.4744366149902344, 0.4755558471679687, 0.47547698974609376, 0.47533465576171874, 0.4759132080078125, 0.4759879760742188, 0.47586407470703124, 0.4761640930175781, 0.4759255065917969, 0.4748451843261719, 0.474919921875, 0.476189697265625, 0.4745257263183594, 0.4750560913085938, 0.475325439453125, 0.474986572265625, 0.4742686157226563, 0.4745994262695313, 0.4747817077636719, 0.4749609069824219, 0.4750120849609375, 0.47644467163085935, 0.47719833374023435, 0.47524453735351563, 0.4750530700683594, 0.4744356384277344, 0.47469769287109376, 0.4745891418457031, 0.47447756958007814, 0.4750274658203125, 0.47433831787109376, 0.47436083984375, 0.4751431579589844, 0.4747120666503906, 0.474977294921875, 0.47543295288085935, 0.4742215576171875, 0.47395327758789063, 0.474292236328125, 0.473849853515625, 0.4742840270996094, 0.4745062255859375, 0.47460455322265627, 0.475863037109375, 0.4749680786132813, 0.4751790161132812, 0.4745482177734375, 0.47489227294921876, 0.4745994873046875, 0.47445599365234375, 0.47442022705078124, 0.47415603637695314, 0.47442739868164063, 0.474777587890625, 0.47418060302734377, 0.4745369567871094, 0.47485952758789063, 0.4748001708984375, 0.474187744140625, 0.47446221923828125, 0.47530703735351565, 0.9833133544921875, 0.4750899047851562, 0.4764405822753906, 0.47526705932617186, 0.4746055603027344, 0.47418368530273436, 0.4742369384765625, 0.4743475341796875, 0.474524658203125, 0.474323974609375, 0.4742778930664063, 0.47433010864257813, 0.4754462585449219, 0.47468850708007815, 0.474829833984375, 0.4747304992675781, 0.47638424682617186, 0.4761231994628906, 0.47679379272460937, 0.4750878601074219, 0.4745164794921875, 0.47426150512695314, 0.4747745361328125, 0.47573504638671876, 0.4750520324707031, 0.474893310546875, 0.4743792724609375, 0.4743096923828125, 0.474462158203125, 0.47433831787109376, 0.4749609069824219, 0.4743250427246094, 0.4742419738769531, 0.47488204956054686, 0.47455438232421876, 0.4743075866699219, 0.4756408386230469, 0.47438339233398436, 0.4743741760253906, 0.47458706665039063, 0.47492404174804687, 0.47545547485351564, 0.47435980224609375, 0.47457485961914064, 0.4752353210449219, 0.47622964477539065, 0.47467416381835936, 0.47408126831054687, 0.47500799560546875, 0.47463320922851565, 0.4743915405273437, 0.4742758483886719, 0.47516363525390626, 0.47539404296875, 0.4750796813964844, 0.4738518981933594, 0.4742266845703125, 0.477939697265625, 0.474281982421875, 0.47408126831054687, 0.4742758483886719, 0.4744366149902344, 0.4743372802734375, 0.9826785278320312, 0.4751769714355469, 0.47564697265625, 0.47452569580078124, 0.4744765319824219, 0.47413861083984377, 0.47415090942382815, 0.4741754760742187, 0.4749885559082031, 0.47412017822265623, 0.4737423400878906, 0.4744488830566406, 0.4739051513671875, 0.4738478088378906, 0.4745400390625, 0.47486770629882813, 0.47466085815429687, 0.47589376831054686, 0.47535205078125, 0.4755548095703125, 0.4743424072265625, 0.47426663208007813, 0.47488409423828126, 0.47611911010742186, 0.47500384521484373, 0.4744591369628906, 0.47539813232421874, 0.47478475952148436, 0.47490567016601565, 0.47446112060546874, 0.475104248046875, 0.4745164794921875, 0.47489535522460935, 0.47453900146484373, 0.4745594787597656, 0.47434445190429686, 0.47417752075195313, 0.47448269653320313, 0.4744724731445312, 0.4748072509765625, 0.47473458862304685, 0.4740843505859375, 0.4747591552734375, 0.4748912658691406, 0.4760514526367188, 0.47554150390625, 0.4754565124511719, 0.47558041381835936, 0.4751912841796875, 0.47503564453125, 0.47414990234375, 0.4742850646972656, 0.47414886474609375, 0.47481036376953123, 0.47404852294921873, 0.4742584228515625, 0.4739358825683594, 0.4744171447753906, 0.4741263427734375, 0.47414169311523435, 0.4739491882324219, 0.47383755493164065, 0.4742522888183594, 0.983693359375, 0.47566241455078123, 0.47446722412109377, 0.4746977233886719, 0.4751247253417969, 0.47474688720703123, 0.47455032348632814, 0.4745143737792969, 0.474745849609375, 0.4746485900878906, 0.4754575500488281, 0.47418881225585935, 0.474029052734375, 0.47392666625976565, 0.4740771789550781, 0.4740321350097656, 0.4739317626953125, 0.4744591369628906, 0.47454925537109377, 0.47516058349609375, 0.47410687255859374, 0.4743935852050781, 0.4761661376953125, 0.4749885559082031, 0.47419699096679685, 0.47460250854492186, 0.47660543823242185, 0.4745430908203125, 0.4743280639648437, 0.47440179443359376, 0.4743813171386719, 0.47454721069335937, 0.4742799377441406, 0.4744407043457031, 0.4743854064941406, 0.47423898315429686, 0.47517593383789064, 0.4753879089355469, 0.47501516723632814, 0.47492202758789065, 0.4744867858886719, 0.47409762573242187, 0.47553741455078125, 0.4761282653808594, 0.47627365112304687, 0.47592141723632814, 0.474745849609375, 0.47454931640625, 0.47427374267578126, 0.47429937744140627, 0.47435775756835935, 0.47429937744140627, 0.475188232421875, 0.4755568542480469, 0.4755281982421875, 0.4754483337402344, 0.4758005676269531, 0.47536639404296877, 0.4753592224121094, 0.4763494567871094, 0.47446011352539064, 0.4744376220703125, 0.4739686279296875]",tokens/s,2.074646015839662,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,,cuda,0,42,,,,,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.0,217063f5c507ed7cc255df7e1f64c4333a0b4dfe,4.40.2,,0.30.1,,,,1.19.2,,,,0.10.0,,,,MB,2234.441728,2932.342784,0.0,2285.89568,2082.575872,s,10,2.433214202880859,0.24332142028808593,0.0008617435568583196,0.24325077056884764,0.24459763488769531,0.24462198333740234,0.24464146209716797,"[0.24380015563964844, 0.24204917907714843, 0.24258213806152343, 0.24336614990234376, 0.24313539123535155, 0.24226588439941407, 0.24288755798339845, 0.24388919067382814, 0.24464633178710937, 0.24459222412109374]",tokens/s,1052.1063032465574,kWh,2.856464352872637e-06,1.5652168180297718e-06,1.2995936322662033e-05,1.7417617493564442e-05,tokens/kWh,14697762.199370166,MB,2234.441728,2959.60576,0.0,2313.158656,2180.684288,s,10,139.734884765625,13.9734884765625,0.01146198033243705,13.9710908203125,13.98807626953125,13.992370166015625,13.995805283203126,"[13.9871220703125, 13.9770771484375, 13.9966640625, 13.980609375, 13.95928515625, 13.96146875, 13.9670087890625, 13.9751728515625, 13.965376953125, 13.965099609375]",tokens/s,4.508537728833344,kWh,0.00016483188493384256,9.03412226583766e-05,0.0007461166431891384,0.0010012897507813577,tokens/kWh,62918.85036358144,,s,629,141.7022607421874,0.22528181358058424,0.02898003334479797,0.22168063354492187,0.2224162872314453,0.22265180053710937,0.4647246643066406,"[0.22235647583007812, 0.22139903259277344, 0.22141644287109374, 0.22147891235351563, 0.2213693389892578, 0.22170726013183595, 0.22163967895507813, 0.2214256591796875, 0.22199398803710937, 0.22159564208984375, 0.22243431091308594, 0.22190284729003906, 0.22202674865722657, 0.2210723876953125, 0.22141133117675782, 0.22209945678710938, 0.22192127990722657, 0.2224189453125, 0.22194073486328125, 0.22234317016601562, 0.22204518127441406, 0.22228582763671875, 0.2221793212890625, 0.22186904907226562, 0.222603271484375, 0.22207487487792968, 0.22233599853515626, 0.22205238342285155, 0.22235133361816406, 0.2235494384765625, 0.22263296508789063, 0.22236671447753906, 0.22127410888671875, 0.2215219268798828, 0.2213519287109375, 0.22189056396484375, 0.22161100769042968, 0.22196940612792967, 0.22203187561035156, 0.22196531677246092, 0.22140007019042968, 0.2213396453857422, 0.2219325408935547, 0.2218577880859375, 0.22200933837890624, 0.22186495971679687, 0.22244557189941405, 0.22216499328613282, 0.22224179077148437, 0.221955078125, 0.2217943115234375, 0.2224179229736328, 0.22189776611328124, 0.2225540771484375, 0.22166732788085938, 0.22277632141113282, 0.22537420654296875, 0.22221209716796875, 0.22146047973632813, 0.22176870727539064, 0.22216294860839844, 0.22160383605957032, 0.46420379638671877, 0.2221997833251953, 0.22188236999511718, 0.22198272705078126, 0.2217697296142578, 0.22201139831542968, 0.2215034942626953, 0.22119526672363282, 0.22219879150390626, 0.22249369812011718, 0.22189773559570314, 0.22246092224121095, 0.22249574279785156, 0.22148197937011718, 0.22124134826660155, 0.221802490234375, 0.22128536987304687, 0.2213744659423828, 0.22234214782714845, 0.22216806030273437, 0.22141746520996095, 0.22162637329101562, 0.22157516479492187, 0.22105191040039063, 0.2215782470703125, 0.2218741760253906, 0.2216898498535156, 0.22149427795410156, 0.2215536651611328, 0.2222725067138672, 0.22197760009765624, 0.22207795715332032, 0.2216847381591797, 0.22156083679199218, 0.2215741424560547, 0.2220595245361328, 0.22160179138183594, 0.22255001831054688, 0.22156492614746093, 0.22203904724121093, 0.22160179138183594, 0.22140109252929688, 0.22128640747070313, 0.22133660888671874, 0.2213908233642578, 0.22154035949707032, 0.22508236694335937, 0.22183013916015626, 0.22202879333496095, 0.2222407684326172, 0.2216785888671875, 0.22169293212890626, 0.22160691833496093, 0.22218649291992187, 0.2214686737060547, 0.22141439819335937, 0.222487548828125, 0.2225991668701172, 0.22195301818847657, 0.22147276306152344, 0.22199909973144533, 0.22176666259765626, 0.22219879150390626, 0.4654428100585937, 0.2218987579345703, 0.22210765075683594, 0.2219008026123047, 0.22165504455566407, 0.22182707214355468, 0.22211891174316406, 0.22192640686035156, 0.221770751953125, 0.22130482482910158, 0.22165913391113282, 0.22189263916015625, 0.2217932434082031, 0.22167552185058595, 0.22172467041015625, 0.22150143432617186, 0.22293606567382812, 0.22162535095214844, 0.2228019256591797, 0.22176255798339845, 0.22248243713378907, 0.22172979736328124, 0.22229402160644532, 0.22310092163085937, 0.22198988342285156, 0.22210150146484375, 0.22238616943359374, 0.22228480529785155, 0.22175334167480468, 0.22205337524414062, 0.22236058044433593, 0.2222407684326172, 0.2220369873046875, 0.2219929656982422, 0.2221854705810547, 0.2224127960205078, 0.22233395385742188, 0.22231858825683593, 0.22264627075195312, 0.22218751525878908, 0.22241587829589843, 0.22255718994140625, 0.2218741760253906, 0.22225920104980468, 0.22183935546875, 0.22192332458496095, 0.22207795715332032, 0.223752197265625, 0.22219673156738282, 0.22203289794921874, 0.22212197875976564, 0.2223206329345703, 0.22238616943359374, 0.22253575134277342, 0.22210047912597655, 0.22248442077636718, 0.2219356231689453, 0.2220912628173828, 0.22202572631835937, 0.22224896240234376, 0.22215168762207033, 0.22308761596679688, 0.22262168884277345, 0.46642996215820315, 0.22230323791503906, 0.22220390319824218, 0.22247935485839843, 0.22223155212402343, 0.22242303466796876, 0.22273023986816406, 0.2226411590576172, 0.22237493896484375, 0.22229808044433594, 0.2220185546875, 0.22259507751464844, 0.22249984741210938, 0.22244659423828125, 0.22167654418945312, 0.22273228454589844, 0.2222581787109375, 0.22126182556152343, 0.2230200653076172, 0.22316950988769532, 0.22224179077148437, 0.22140518188476563, 0.22184857177734374, 0.22160281372070312, 0.22178099060058593, 0.22235443115234374, 0.2213928985595703, 0.22145535278320314, 0.22126591491699218, 0.22138368225097657, 0.22140518188476563, 0.22138983154296876, 0.22134886169433593, 0.22174208068847656, 0.22144720458984374, 0.22132118225097655, 0.22146354675292967, 0.2214001007080078, 0.22144508361816406, 0.2214297637939453, 0.22163763427734376, 0.221517822265625, 0.22244044494628906, 0.22324838256835938, 0.22501274108886718, 0.22167861938476563, 0.22164067077636718, 0.2215004119873047, 0.22143283081054688, 0.22134579467773438, 0.22140518188476563, 0.22115122985839844, 0.22140415954589843, 0.2217195587158203, 0.22167449951171875, 0.2215034942626953, 0.22143795776367187, 0.2215854034423828, 0.22149530029296874, 0.22169497680664063, 0.22140931701660158, 0.22172157287597657, 0.22152806091308594, 0.4667914123535156, 0.22145228576660156, 0.2217512969970703, 0.2216468505859375, 0.22147789001464843, 0.221444091796875, 0.22145126342773438, 0.22144717407226563, 0.22157005310058595, 0.22128947448730468, 0.22147071838378907, 0.22156903076171874, 0.22139801025390626, 0.2215116729736328, 0.2211778564453125, 0.22171034240722656, 0.22124748229980468, 0.22265548706054689, 0.22149836730957032, 0.22151065063476563, 0.22136422729492186, 0.22120550537109376, 0.22162431335449218, 0.22184754943847657, 0.2219530487060547, 0.22183625793457032, 0.22151065063476563, 0.22187826538085936, 0.2214799346923828, 0.22132736206054687, 0.22136524963378906, 0.22137344360351563, 0.22129356384277343, 0.22121778869628905, 0.22129254150390626, 0.2211164093017578, 0.22158746337890625, 0.22140415954589843, 0.22128128051757812, 0.22147071838378907, 0.22140313720703125, 0.22121273803710936, 0.22131808471679687, 0.2216048583984375, 0.22186810302734375, 0.22218540954589844, 0.221623291015625, 0.22134477233886718, 0.22152088928222657, 0.22141746520996095, 0.22138368225097657, 0.22134169006347656, 0.22129458618164063, 0.22138983154296876, 0.22134169006347656, 0.2212956085205078, 0.221159423828125, 0.2240184326171875, 0.22193971252441405, 0.2216089630126953, 0.22141952514648439, 0.22154853820800782, 0.2216837158203125, 0.46434201049804685, 0.22140415954589843, 0.22150758361816406, 0.22161817932128905, 0.2220298309326172, 0.22168780517578124, 0.2222530517578125, 0.22144613647460937, 0.2217840576171875, 0.22155264282226564, 0.22125669860839844, 0.22169197082519532, 0.22176358032226562, 0.22181779479980468, 0.22157926940917969, 0.2215782470703125, 0.2214246368408203, 0.22152294921875, 0.22142771911621092, 0.22154547119140625, 0.22132429504394532, 0.22135606384277343, 0.22152394104003906, 0.22135398864746095, 0.22151577758789062, 0.2220185546875, 0.22214349365234376, 0.22169088745117188, 0.2212464599609375, 0.22148403930664062, 0.221370361328125, 0.22125363159179687, 0.22161305236816406, 0.22148197937011718, 0.22166015625, 0.2215977020263672, 0.22134783935546876, 0.2212351989746094, 0.2241535949707031, 0.22163558959960938, 0.2216785888671875, 0.22160691833496093, 0.221549560546875, 0.22167141723632813, 0.22142054748535156, 0.2214297637939453, 0.22139903259277344, 0.2217943115234375, 0.22149119567871095, 0.221876220703125, 0.22163250732421874, 0.221765625, 0.2215004119873047, 0.2217400360107422, 0.22149427795410156, 0.2213939208984375, 0.2211758117675781, 0.22143487548828125, 0.22141644287109374, 0.22124339294433593, 0.22147071838378907, 0.22166630554199218, 0.22171136474609374, 0.46487347412109375, 0.22141133117675782, 0.22127206420898438, 0.221338623046875, 0.2219622344970703, 0.22154342651367187, 0.22202265930175782, 0.22145330810546876, 0.22258995056152345, 0.22142874145507813, 0.22155264282226564, 0.22175334167480468, 0.2215741424560547, 0.22162739562988282, 0.22164378356933595, 0.2218803253173828, 0.22163250732421874, 0.22160076904296874, 0.22158848571777343, 0.2215116729736328, 0.2215679931640625, 0.22120550537109376, 0.22134477233886718, 0.22316543579101564, 0.22167039489746093, 0.22168063354492187, 0.22168678283691406, 0.2215116729736328, 0.2215188751220703, 0.22155363464355468, 0.22205235290527345, 0.22159359741210938, 0.22261351013183595, 0.2219059143066406, 0.22219161987304686, 0.22189773559570314, 0.22165504455566407, 0.22153114318847655, 0.2218076171875, 0.22219570922851561, 0.22192445373535155, 0.22225091552734375, 0.22204415893554688, 0.22189260864257812, 0.22191104125976563, 0.2215352325439453, 0.22143385314941405, 0.22180557250976562, 0.22147584533691406, 0.22139698791503906, 0.22178713989257812, 0.22213325500488282, 0.22149221801757812, 0.22222848510742188, 0.22150860595703126, 0.2214686737060547, 0.22136627197265624, 0.22127001953125, 0.22115020751953124, 0.22117478942871094, 0.22150143432617186, 0.22140211486816405, 0.22120550537109376, 0.46591384887695314, 0.22151065063476563, 0.22132838439941407, 0.22122496032714845, 0.22145433044433593, 0.22153318786621093, 0.2213519287109375, 0.22116044616699218, 0.22144717407226563, 0.22135296630859375, 0.2212833251953125, 0.22198477172851563, 0.22165811157226561, 0.22226739501953124, 0.22169804382324218, 0.22206361389160156, 0.22276095581054686, 0.22171136474609374, 0.22175949096679687, 0.22171034240722656, 0.22183833312988283, 0.2217021484375, 0.22223257446289063, 0.221876220703125, 0.22166323852539063, 0.22185369873046876, 0.22207693481445312, 0.22190386962890624, 0.2217379913330078, 0.22185165405273438, 0.22234214782714845, 0.22208717346191406, 0.22299136352539062, 0.22185061645507811, 0.22382899475097656, 0.22139187622070314, 0.22159461975097655, 0.22185369873046876, 0.2214072265625, 0.22193971252441405, 0.22215577697753905, 0.22185267639160156, 0.22164480590820312, 0.22156594848632813, 0.2215188751220703, 0.22147990417480468, 0.22186189270019532, 0.22172262573242188, 0.22174310302734376, 0.22215577697753905, 0.22219366455078124, 0.2219448699951172, 0.2216007385253906, 0.22184959411621094, 0.22170930480957032, 0.22189164733886718, 0.2221639404296875, 0.22170518493652344, 0.22201344299316406, 0.2217830352783203, 0.22169395446777343, 0.22200831604003907, 0.22163148498535157, 0.46692044067382815, 0.2217400360107422, 0.22165196228027345, 0.22197555541992187, 0.22193458557128906, 0.22203904724121093, 0.22154655456542968, 0.22138873291015626, 0.22145330810546876, 0.22146354675292967, 0.22145024108886718, 0.22175640869140625, 0.22180351257324218, 0.22217727661132813, 0.22153727722167968, 0.221896728515625, 0.2214164123535156, 0.2225797119140625, 0.22176051330566407, 0.22177381896972656, 0.22154649353027345, 0.221233154296875, 0.2214615020751953, 0.22134681701660155, 0.22129356384277343, 0.22127615356445313, 0.22128640747070313, 0.22148095703125, 0.22134169006347656, 0.22124339294433593, 0.22114816284179686, 0.2213191680908203, 0.2215188446044922, 0.22131814575195313, 0.22146969604492187, 0.22117990112304686, 0.22134375, 0.22164889526367187, 0.22181581115722657, 0.222171142578125, 0.22175640869140625, 0.22172979736328124, 0.22134066772460936, 0.22151986694335937, 0.22140313720703125, 0.22141030883789062, 0.22220594787597656, 0.22215887451171876, 0.22227349853515624, 0.22183116149902343, 0.22192640686035156, 0.22173184204101562, 0.2216898498535156, 0.22144717407226563, 0.22147584533691406, 0.22146354675292967, 0.22156903076171874, 0.22176051330566407, 0.22247833251953125, 0.221802490234375, 0.22252543640136718, 0.22219468688964844, 0.22166015625, 0.46707403564453126, 0.22175640869140625, 0.22192536926269532, 0.22187315368652344, 0.22208204650878907, 0.22162535095214844, 0.22142361450195314, 0.22128025817871094, 0.22170008850097656, 0.2215782470703125, 0.22144613647460937, 0.22178201293945313, 0.22167654418945312, 0.22178816223144532, 0.2213816375732422, 0.2216816711425781, 0.22158131408691406, 0.22198066711425782, 0.22218853759765625, 0.221770751953125, 0.22207693481445312, 0.2219069366455078, 0.22215782165527342, 0.2219069366455078, 0.2217830352783203, 0.2216417236328125, 0.2232033233642578, 0.22143283081054688, 0.22127719116210937, 0.22148300170898438, 0.22127104187011717, 0.2214615020751953, 0.22147584533691406, 0.22175538635253905, 0.22177690124511718, 0.2215290832519531, 0.22144717407226563, 0.22135296630859375, 0.22129356384277343, 0.22165196228027345, 0.22160076904296874, 0.22165402221679686, 0.22142771911621092, 0.22134783935546876, 0.22133555603027344, 0.22152088928222657, 0.22161509704589843, 0.2215034942626953, 0.22159257507324218, 0.22131712341308593, 0.22130586242675782, 0.22127410888671875, 0.22141850280761718, 0.22161305236816406, 0.22188954162597657, 0.22163456726074218, 0.2218014678955078, 0.2217902069091797, 0.22162124633789063, 0.22154444885253907, 0.22161305236816406, 0.2218956756591797, 0.22184857177734374]",tokens/s,4.438884720014453,,,main,False,False,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,4008.419328,15760.621568,0.0,15114.174464,14046.123008,s,10,15.848047607421876,1.5848047607421876,0.001058458314234293,1.584490295410156,1.5859812255859376,1.5864652832031252,1.5868525292968751,"[1.5851588134765624, 1.583447265625, 1.5845269775390625, 1.583828125, 1.58445361328125, 1.583693603515625, 1.5843594970703125, 1.5857567138671875, 1.5869493408203126, 1.5858736572265626]",tokens/s,161.53409324698862,kWh,1.869280167751842e-05,1.0243654597979911e-05,8.793465368099884e-05,0.00011687110995649717,tokens/kWh,2190447.23794692,MB,4008.419328,15760.621568,0.0,15114.174464,14170.904576,s,10,927.2718203125,92.72718203125001,0.005797509915113206,92.7283046875,92.73256640625,92.73465820312501,92.736331640625,"[92.7307421875, 92.72909375, 92.73675, 92.7321015625, 92.7264921875, 92.7241015625, 92.7158671875, 92.7194609375, 92.7296953125, 92.727515625]",tokens/s,0.6794124292353493,kWh,0.0010946948910421795,0.0005999896856933628,0.005209015139431006,0.006903699716166547,tokens/kWh,9125.541751543959,,s,629,940.0627094726563,1.494535309177514,0.18825786848907494,1.47182177734375,1.4724240966796875,1.4726346923828124,3.055808037109375,"[1.4722620849609376, 1.4717921142578125, 1.47181982421875, 1.4719560546875, 1.4718125, 1.4717183837890624, 1.472183349609375, 1.47190576171875, 1.4715218505859375, 1.47171630859375, 1.47156787109375, 1.4714951171875, 1.471922119140625, 1.4721187744140625, 1.4715177001953126, 1.47153515625, 1.4718924560546875, 1.4718126220703125, 1.4714593505859375, 1.4722181396484375, 1.4722314453125, 1.4714234619140625, 1.471951904296875, 1.4719990234375, 1.471762451171875, 1.4720880126953124, 1.472605224609375, 1.471382568359375, 1.4716119384765625, 1.471868896484375, 1.4716876220703126, 1.4716446533203125, 1.472257080078125, 1.4723829345703126, 1.472047119140625, 1.4720819091796875, 1.47234716796875, 1.4716507568359376, 1.47162109375, 1.471951904296875, 1.472247802734375, 1.472111572265625, 1.471752197265625, 1.4720460205078125, 1.4718433837890625, 1.471847412109375, 1.473238037109375, 1.4716826171875, 1.4716129150390624, 1.471711181640625, 1.4720286865234375, 1.4715771484375, 1.47129443359375, 1.4723441162109374, 1.47237890625, 1.4720123291015625, 1.4720389404296874, 1.4725570068359375, 1.4715821533203124, 1.47163232421875, 1.472152587890625, 1.4719139404296875, 3.055659912109375, 1.4719190673828124, 1.4714122314453124, 1.4715750732421875, 1.4726441650390625, 1.4716497802734374, 1.4719395751953126, 1.4714327392578126, 1.4716334228515624, 1.4717982177734374, 1.4725570068359375, 1.472247802734375, 1.4715555419921875, 1.4713795166015624, 1.47167431640625, 1.4718515625, 1.471489990234375, 1.4721033935546874, 1.4721280517578126, 1.471662109375, 1.47139990234375, 1.4714696044921876, 1.471595458984375, 1.4714849853515626, 1.472489501953125, 1.4717716064453126, 1.471542236328125, 1.4713282470703124, 1.4715074462890625, 1.4714327392578126, 1.4716241455078125, 1.4727454833984375, 1.4718914794921876, 1.472322509765625, 1.4719334716796875, 1.47182177734375, 1.4713436279296874, 1.4722703857421875, 1.47228466796875, 1.471656982421875, 1.471455322265625, 1.4724454345703124, 1.4727833251953124, 1.472058349609375, 1.472762939453125, 1.4725816650390624, 1.47173583984375, 1.4722109375, 1.4722037353515625, 1.4719927978515626, 1.4716068115234375, 1.4723389892578125, 1.4720194091796874, 1.4716221923828126, 1.4717747802734376, 1.47186279296875, 1.4714490966796876, 1.471636474609375, 1.4723154296875, 1.471784912109375, 1.47171533203125, 1.4717327880859374, 1.472248779296875, 3.057259521484375, 1.47224267578125, 1.4718597412109375, 1.472489501953125, 1.4721064453125, 1.4717808837890625, 1.471899658203125, 1.4724228515625, 1.472541748046875, 1.47203173828125, 1.472416748046875, 1.47173583984375, 1.471767578125, 1.4718350830078124, 1.472779296875, 1.4719927978515626, 1.47137841796875, 1.4718525390625, 1.4718648681640625, 1.4717603759765625, 1.4714869384765625, 1.47251611328125, 1.4720552978515624, 1.47161083984375, 1.4713856201171875, 1.4714736328125, 1.4710916748046876, 1.471134765625, 1.4725887451171875, 1.4723441162109374, 1.4716630859375, 1.471782958984375, 1.4716395263671875, 1.4718515625, 1.4722734375, 1.472278564453125, 1.4714920654296875, 1.47167431640625, 1.47209423828125, 1.4719703369140624, 1.47245263671875, 1.4724505615234376, 1.47195703125, 1.47188525390625, 1.472077880859375, 1.4723031005859375, 1.4720552978515624, 1.4719139404296875, 1.4722252197265624, 1.4726502685546874, 1.4722242431640624, 1.471540283203125, 1.472048095703125, 1.4718955078125, 1.4713743896484375, 1.47283154296875, 1.4720828857421875, 1.472143310546875, 1.472288818359375, 1.4720460205078125, 1.4718065185546876, 1.471910888671875, 1.472711669921875, 3.055857666015625, 1.471562744140625, 1.4717071533203125, 1.471541259765625, 1.4722652587890626, 1.47290625, 1.4720972900390625, 1.4717244873046875, 1.4717379150390626, 1.47239111328125, 1.4715084228515625, 1.4723399658203125, 1.47213720703125, 1.47205224609375, 1.472205810546875, 1.4718515625, 1.471974365234375, 1.4717972412109375, 1.472838623046875, 1.472647216796875, 1.47232666015625, 1.47219970703125, 1.4716876220703126, 1.4716630859375, 1.4720552978515624, 1.4729554443359374, 1.471932373046875, 1.471942626953125, 1.471614990234375, 1.4719990234375, 1.4717265625, 1.4722620849609376, 1.47253857421875, 1.47146337890625, 1.4715965576171874, 1.4714061279296875, 1.471909912109375, 1.4716488037109374, 1.4721239013671874, 1.472101318359375, 1.4714869384765625, 1.471604736328125, 1.4719661865234375, 1.4716927490234375, 1.4718228759765626, 1.471889404296875, 1.4719180908203124, 1.4718177490234374, 1.4717174072265624, 1.471542236328125, 1.471983642578125, 1.4715238037109375, 1.4717327880859374, 1.472067626953125, 1.471873046875, 1.4716876220703126, 1.4719764404296876, 1.4723194580078125, 1.4719017333984374, 1.4723870849609375, 1.4717716064453126, 1.4715146484375, 1.4714757080078125, 3.056364501953125, 1.4723338623046875, 1.472585693359375, 1.4714593505859375, 1.471646728515625, 1.4715494384765626, 1.4717880859375, 1.472016357421875, 1.4726204833984375, 1.4721822509765625, 1.4717747802734376, 1.4717244873046875, 1.4717716064453126, 1.47205322265625, 1.4717276611328125, 1.472067626953125, 1.471805419921875, 1.4715555419921875, 1.471551513671875, 1.4714962158203124, 1.47133544921875, 1.4717747802734376, 1.471994873046875, 1.47158837890625, 1.471520751953125, 1.4713466796875, 1.4714747314453125, 1.4717276611328125, 1.4718135986328125, 1.4724290771484374, 1.4716488037109374, 1.4714132080078124, 1.4716231689453125, 1.4723389892578125, 1.4713077392578124, 1.47160888671875, 1.471899658203125, 1.4723778076171874, 1.471910888671875, 1.4719447021484375, 1.47195703125, 1.47152685546875, 1.472251953125, 1.471962158203125, 1.4716077880859375, 1.471457275390625, 1.4723092041015624, 1.4719969482421875, 1.4716549072265626, 1.4718648681640625, 1.47160986328125, 1.4718760986328125, 1.471826904296875, 1.4713251953125, 1.47182177734375, 1.4716077880859375, 1.472786376953125, 1.472522216796875, 1.4720809326171875, 1.4717982177734374, 1.4721248779296876, 1.471731689453125, 1.4721033935546874, 3.055680419921875, 1.4718280029296875, 1.4716129150390624, 1.4713046875, 1.4718914794921876, 1.4720511474609375, 1.4723931884765624, 1.471605712890625, 1.4720911865234374, 1.47203173828125, 1.4718822021484375, 1.4717869873046876, 1.4723829345703126, 1.4720809326171875, 1.471552490234375, 1.4721136474609375, 1.471494140625, 1.4717767333984375, 1.471177734375, 1.472689208984375, 1.4716497802734374, 1.4718802490234375, 1.4714306640625, 1.472184326171875, 1.4717860107421874, 1.47194677734375, 1.47276904296875, 1.4713814697265626, 1.4714398193359375, 1.471753173828125, 1.472689208984375, 1.472320556640625, 1.472069580078125, 1.47273828125, 1.4716497802734374, 1.4718289794921875, 1.4717060546875, 1.471951904296875, 1.4716866455078126, 1.47211669921875, 1.471942626953125, 1.4714388427734375, 1.471298583984375, 1.4713538818359375, 1.4712965087890626, 1.4712689208984375, 1.4717244873046875, 1.472357421875, 1.471731689453125, 1.4715218505859375, 1.471215576171875, 1.4711285400390626, 1.4713046875, 1.47230615234375, 1.4722723388671874, 1.4720819091796875, 1.471382568359375, 1.4713026123046875, 1.472006103515625, 1.47163232421875, 1.472415771484375, 1.471131591796875, 1.4713272705078124, 3.056773193359375, 1.4716077880859375, 1.4716282958984375, 1.471858642578125, 1.4712208251953125, 1.4718197021484376, 1.472443359375, 1.4715648193359374, 1.471594482421875, 1.472443359375, 1.4719036865234374, 1.47152587890625, 1.4711285400390626, 1.4714869384765625, 1.4713077392578124, 1.4713907470703125, 1.47195703125, 1.47201123046875, 1.4714542236328125, 1.4714757080078125, 1.471456298828125, 1.471595458984375, 1.47175732421875, 1.4725191650390625, 1.4716539306640626, 1.4716416015625, 1.47211474609375, 1.4713333740234376, 1.47162109375, 1.4716436767578125, 1.4722816162109376, 1.47161083984375, 1.471515625, 1.4715064697265625, 1.4715709228515625, 1.47129345703125, 1.4712586669921874, 1.4722181396484375, 1.471456298828125, 1.4713170166015626, 1.4715576171875, 1.47175732421875, 1.47146337890625, 1.472247802734375, 1.471603759765625, 1.4716273193359375, 1.4715709228515625, 1.472047119140625, 1.4721197509765624, 1.472300048828125, 1.4724617919921874, 1.472036865234375, 1.4715872802734375, 1.47150537109375, 1.47152685546875, 1.4712227783203125, 1.4709483642578125, 1.4714481201171874, 1.4717808837890625, 1.4714285888671874, 1.4712176513671875, 1.4715013427734376, 1.471331298828125, 3.058272216796875, 1.4714920654296875, 1.4712022705078125, 1.47119921875, 1.4717470703125, 1.471373291015625, 1.4719886474609376, 1.4716968994140625, 1.4716273193359375, 1.4718914794921876, 1.471751220703125, 1.4717174072265624, 1.471711181640625, 1.472184326171875, 1.472363525390625, 1.471656982421875, 1.4721873779296875, 1.4718863525390624, 1.4719764404296876, 1.4720511474609375, 1.4719754638671876, 1.4718924560546875, 1.4713037109375, 1.4712841796875, 1.47162109375, 1.4717725830078126, 1.472, 1.4716497802734374, 1.4716558837890625, 1.4713426513671874, 1.471215576171875, 1.4712484130859376, 1.47142041015625, 1.471537109375, 1.4719190673828124, 1.4712698974609375, 1.471478759765625, 1.47183203125, 1.4715013427734376, 1.4721925048828124, 1.47177978515625, 1.472447509765625, 1.4714500732421876, 1.4717501220703124, 1.47160888671875, 1.4718299560546875, 1.471974365234375, 1.4720081787109376, 1.472126953125, 1.4715023193359376, 1.4715084228515625, 1.47169482421875, 1.471330322265625, 1.471171630859375, 1.4719794921875, 1.4723297119140626, 1.471705078125, 1.4717818603515624, 1.4718740234375, 1.4721126708984376, 1.471266845703125, 1.472437255859375, 1.4715064697265625, 3.05955126953125, 1.4719764404296876, 1.47195703125, 1.47245361328125, 1.472227294921875, 1.4713221435546875, 1.4720296630859375, 1.4715545654296875, 1.4716138916015624, 1.4715606689453125, 1.47230517578125, 1.4725037841796875, 1.471837158203125, 1.47186376953125, 1.47192724609375, 1.4720911865234374, 1.47192626953125, 1.4723450927734374, 1.4718555908203126, 1.47135791015625, 1.4716026611328126, 1.471425537109375, 1.4714224853515625, 1.4716395263671875, 1.4727669677734374, 1.4724013671875, 1.4724403076171875, 1.47211669921875, 1.4722672119140625, 1.472236572265625, 1.47209423828125, 1.4720142822265625, 1.471340576171875, 1.4719610595703125, 1.471952880859375, 1.4716343994140626, 1.4717020263671876, 1.472510009765625, 1.4722958984375, 1.47205322265625, 1.471494140625, 1.471520751953125, 1.4711224365234374, 1.4714521484375, 1.471753173828125, 1.4722447509765626, 1.4713282470703124, 1.47150341796875, 1.472290771484375, 1.471889404296875, 1.4715074462890625, 1.4724771728515624, 1.4729246826171876, 1.4722078857421874, 1.4716180419921876, 1.471309814453125, 1.47177978515625, 1.47196923828125, 1.4722120361328126, 1.471774658203125, 1.4713466796875, 1.471520751953125, 1.4712811279296876, 3.058113525390625, 1.4730526123046874, 1.4717235107421875, 1.4721668701171875, 1.471916015625, 1.472654296875, 1.4716385498046876, 1.4719088134765625, 1.4720716552734374, 1.4713763427734374, 1.471236083984375, 1.471573974609375, 1.4717041015625, 1.471826904296875, 1.4721710205078125, 1.472248779296875, 1.471709228515625, 1.47167333984375, 1.4722354736328125, 1.47183203125, 1.4724495849609376, 1.47257861328125, 1.4722344970703125, 1.471520751953125, 1.4716630859375, 1.47169384765625, 1.472069580078125, 1.4725938720703124, 1.472268310546875, 1.471952880859375, 1.4720880126953124, 1.4720225830078124, 1.471952880859375, 1.4723450927734374, 1.472373779296875, 1.472194580078125, 1.4716927490234375, 1.471921142578125, 1.471922119140625, 1.472109619140625, 1.471762451171875, 1.4723779296875, 1.4719671630859374, 1.4716590576171875, 1.47093603515625, 1.4710968017578125, 1.4713907470703125, 1.47113671875, 1.471277099609375, 1.4721495361328125, 1.4713795166015624, 1.4710025634765624, 1.471087646484375, 1.47147265625, 1.4714593505859375, 1.4727730712890625, 1.472385986328125, 1.4715115966796875, 1.47182177734375, 1.471372314453125, 1.471321044921875, 1.4717603759765625, 1.47186181640625]",tokens/s,0.6691042987470994,,,,,,,, 4bit-awq-gemm-flash_attention_2,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,flash_attention_2,,False,,False,forward,awq,4,gemm,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.40.2,,0.30.1,,,,1.19.2,,,,0.11.1,,,,MB,1308.962816,6256.328704,0.0,5609.8816,5292.617216,s,10,5.571213256835938,0.5571213256835937,0.0007666840176307737,0.5568714599609375,0.557456005859375,0.5584093994140625,0.5591721142578125,"[0.55936279296875, 0.556702392578125, 0.5567649536132813, 0.5570609741210938, 0.5566168212890625, 0.5568453979492187, 0.557244140625, 0.5567742919921875, 0.5569439697265625, 0.5568975219726563]",tokens/s,459.5049376110048,kWh,6.58186607890659e-06,3.606554268359711e-06,3.10157809854445e-05,4.1204201332710805e-05,tokens/kWh,6212958.672172324,MB,1308.962816,6256.328704,0.0,5609.8816,5503.948288,s,10,327.20778125,32.720778124999995,0.005308491364446453,32.7202353515625,32.7266998046875,32.72907548828125,32.73097603515625,"[32.72102734375, 32.713080078125, 32.72320703125, 32.723677734375, 32.719443359375, 32.726171875, 32.7142578125, 32.71880859375, 32.71665625, 32.731451171875]",tokens/s,1.9253820847208232,kWh,0.00038621306252148413,0.00021167820178359155,0.0018056511605691545,0.00240354242487423,tokens/kWh,26211.31183207494,,s,629,331.6996517333982,0.527344438367883,0.06619643329461054,0.519362548828125,0.5196838745117187,0.5197864990234375,1.0765131103515626,"[0.5193318481445313, 0.5191546630859375, 0.5195264282226563, 0.5193011474609375, 0.519161865234375, 0.5189949340820312, 0.519024658203125, 0.5189488525390625, 0.51919873046875, 0.518950927734375, 0.5190645751953125, 0.5191629028320313, 0.5196636352539062, 0.5194833984375, 0.5193001098632812, 0.5194168090820312, 0.5192283935546875, 0.5194127807617187, 0.5191557006835937, 0.5193123779296875, 0.5200199584960937, 0.5194977416992187, 0.5196605224609375, 0.519141357421875, 0.5195980834960937, 0.5191690063476563, 0.5194024658203125, 0.51964208984375, 0.5193021240234375, 0.5191884765625, 0.5192212524414063, 0.5194844360351563, 0.5194926147460938, 0.5195140991210937, 0.519436279296875, 0.5195079956054688, 0.5191536865234375, 0.5194281005859375, 0.5195120849609375, 0.5192755126953125, 0.5196728515625, 0.5195980834960937, 0.5193359375, 0.5193533325195312, 0.5194619140625, 0.5194895629882812, 0.5192806396484375, 0.5191629028320313, 0.51945166015625, 0.519245849609375, 0.5195632934570312, 0.519736328125, 0.5192724609375, 0.5192140502929687, 0.5193277587890625, 0.5196113891601563, 0.5192888793945313, 0.519089111328125, 0.5195858154296875, 0.519647216796875, 0.5195612182617187, 0.5193564453125, 1.0763663330078126, 0.518898681640625, 0.5189488525390625, 0.5190789184570312, 0.5190236206054688, 0.5189979858398438, 0.5195140991210937, 0.5191475219726562, 0.5191536865234375, 0.5191444702148438, 0.5191588134765625, 0.519103515625, 0.5190625, 0.5190420532226563, 0.5190901489257812, 0.5192744750976562, 0.5193707275390625, 0.5191854248046875, 0.5194649658203125, 0.5193666381835937, 0.5193236694335938, 0.5194281005859375, 0.5191649169921875, 0.5191342163085938, 0.519046142578125, 0.5191383056640625, 0.5194874877929687, 0.5194271240234375, 0.5192376098632813, 0.5192274169921876, 0.5190830078125, 0.519161865234375, 0.51907275390625, 0.51944140625, 0.5194660034179688, 0.5193697509765625, 0.5191290893554688, 0.5192161254882812, 0.51917822265625, 0.51928271484375, 0.5191895141601562, 0.5192591552734375, 0.5195140991210937, 0.51934619140625, 0.5191177978515625, 0.51966259765625, 0.5194188842773437, 0.5195499267578125, 0.5193359375, 0.5193554077148438, 0.5193215942382813, 0.5193707275390625, 0.5193646240234375, 0.5194526977539062, 0.5195642700195312, 0.5192232666015625, 0.5191710815429688, 0.519193603515625, 0.5191710815429688, 0.519245849609375, 0.5192489013671875, 0.519278564453125, 0.5190645751953125, 1.076937744140625, 0.5192662963867187, 0.5193164672851562, 0.5193584594726562, 0.5193011474609375, 0.5191680297851563, 0.5191188354492188, 0.5191270141601563, 0.5195693969726562, 0.5193901977539063, 0.5192611694335938, 0.519352294921875, 0.519352294921875, 0.5192232666015625, 0.5193594970703125, 0.51930419921875, 0.519245849609375, 0.5191546630859375, 0.5192427368164062, 0.5194291381835937, 0.5196943359375, 0.519520263671875, 0.5194066162109375, 0.5193318481445313, 0.5193011474609375, 0.5194178466796875, 0.5193533325195312, 0.5193809814453125, 0.5195612182617187, 0.5193554077148438, 0.5194926147460938, 0.5192898559570313, 0.5194373168945312, 0.5194137573242188, 0.519341064453125, 0.5194496459960938, 0.5192283935546875, 0.51919873046875, 0.5195048828125, 0.5196339111328125, 0.5195560913085937, 0.519572509765625, 0.51938916015625, 0.5192929077148437, 0.5191874389648438, 0.5194547119140625, 0.5193114013671875, 0.5195682983398437, 0.5193922729492187, 0.5194547119140625, 0.5196246948242188, 0.5193451538085937, 0.5195867919921875, 0.5193400268554688, 0.5194721069335938, 0.5192376098632813, 0.51959912109375, 0.5194547119140625, 0.5198776245117187, 0.5195980834960937, 0.519794677734375, 0.5196093139648438, 0.5194988403320312, 1.07694482421875, 0.51944140625, 0.5193348999023437, 0.519572509765625, 0.5193656616210938, 0.5192765502929687, 0.5190225830078125, 0.5192960205078125, 0.5193001098632812, 0.519414794921875, 0.5196113891601563, 0.5192478637695312, 0.5194208984375, 0.5195591430664063, 0.5194547119140625, 0.5194055786132813, 0.5193932495117187, 0.5193236694335938, 0.519299072265625, 0.519203857421875, 0.519172119140625, 0.519413818359375, 0.5194751586914063, 0.5194956665039062, 0.5193031616210938, 0.5192714233398438, 0.5192642822265625, 0.5195704345703125, 0.5194055786132813, 0.5192109985351563, 0.5192069702148437, 0.5193225708007813, 0.5193994140625, 0.5194393310546875, 0.5193717651367188, 0.51938916015625, 0.5194823608398438, 0.5195161743164063, 0.5194229736328125, 0.5193123779296875, 0.5194803466796875, 0.5196267700195313, 0.5195346069335938, 0.5195284423828125, 0.5194066162109375, 0.5194495849609375, 0.5196309204101562, 0.5191699829101563, 0.5192222900390625, 0.5191680297851563, 0.5192140502929687, 0.5192509155273437, 0.5197455444335938, 0.51945166015625, 0.5199011840820312, 0.5196165161132813, 0.5196021728515625, 0.51944140625, 0.519468017578125, 0.5196871948242188, 0.5198428344726562, 0.5196503295898437, 0.5193164672851562, 1.076336669921875, 0.5190471801757812, 0.51890380859375, 0.5190809326171875, 0.5190072021484375, 0.5190051879882812, 0.519067626953125, 0.5190942993164063, 0.5192550659179688, 0.5191608276367188, 0.51911474609375, 0.5190000610351563, 0.5190625, 0.5189099731445312, 0.5189284057617187, 0.5190532836914062, 0.5191976928710937, 0.5195181884765625, 0.5195192260742187, 0.5192243041992187, 0.5194373779296875, 0.5194208374023438, 0.5193380126953125, 0.51938818359375, 0.5193707275390625, 0.5193901977539063, 0.5194732055664063, 0.5196451416015625, 0.5194977416992187, 0.5196513061523438, 0.5195796508789062, 0.5192212524414063, 0.5195172119140625, 0.5192601318359376, 0.5197967529296875, 0.5195601806640625, 0.5197598876953125, 0.5193154296875, 0.5192755126953125, 0.5190604858398438, 0.519140380859375, 0.519161865234375, 0.5192714233398438, 0.5192130737304688, 0.519172119140625, 0.5193922729492187, 0.5197404174804687, 0.5195489501953126, 0.5196461791992187, 0.5194475708007813, 0.5194373168945312, 0.5194741821289063, 0.5194240112304688, 0.5196431274414063, 0.51991552734375, 0.5195888671875, 0.5194813232421875, 0.5196830444335937, 0.5193574829101563, 0.5195447387695312, 0.519552001953125, 0.5194495849609375, 0.5193912353515625, 1.0770308837890625, 0.519299072265625, 0.519161865234375, 0.519352294921875, 0.519202880859375, 0.519232421875, 0.5192161254882812, 0.5191495971679687, 0.5193451538085937, 0.5194649658203125, 0.5192969970703125, 0.51949365234375, 0.5189734497070313, 0.5189652709960938, 0.5190738525390625, 0.5190205078125, 0.5188731079101563, 0.5189837036132813, 0.5191946411132813, 0.5191895141601562, 0.5194915771484375, 0.5194823608398438, 0.5194649658203125, 0.5193861083984375, 0.519541748046875, 0.5195222778320312, 0.5193441162109375, 0.5196687622070313, 0.5194107055664062, 0.5194291381835937, 0.5196728515625, 0.519736328125, 0.5194956665039062, 0.5196932983398438, 0.51970458984375, 0.5194557495117188, 0.5194752197265625, 0.5195489501953126, 0.5196452026367188, 0.51976904296875, 0.5196585693359375, 0.5195037841796875, 0.5197609252929688, 0.5197005004882812, 0.5194229736328125, 0.51953564453125, 0.5195899047851562, 0.5200025634765625, 0.5196328735351563, 0.5195530395507812, 0.5195929565429688, 0.5197322387695312, 0.519456787109375, 0.5192703857421875, 0.5194905395507813, 0.5194588012695313, 0.5194240112304688, 0.5194588012695313, 0.519572509765625, 0.5200353393554688, 0.5195612182617187, 0.5195438232421875, 0.5196062622070312, 1.0774559326171875, 0.5191044921875, 0.5191157836914062, 0.5191874389648438, 0.5194168090820312, 0.519161865234375, 0.5189949340820312, 0.5192335205078125, 0.5191475219726562, 0.5193502807617187, 0.5192642822265625, 0.5191946411132813, 0.5190123291015625, 0.5190532836914062, 0.5194086303710937, 0.5190615234375, 0.5189734497070313, 0.5190645751953125, 0.5192376098632813, 0.5190942993164063, 0.5191700439453125, 0.5191280517578125, 0.5192765502929687, 0.5192151489257812, 0.5193707275390625, 0.5190543212890625, 0.5191290893554688, 0.5193359375, 0.5194874877929687, 0.5193267211914062, 0.5189846801757813, 0.51919873046875, 0.5191116943359375, 0.5194178466796875, 0.51928369140625, 0.5192315063476562, 0.51917724609375, 0.5193430786132812, 0.5191895141601562, 0.5191710815429688, 0.5193011474609375, 0.5194956665039062, 0.5192806396484375, 0.519357421875, 0.5193850708007812, 0.5195530395507812, 0.5191905517578125, 0.519109619140625, 0.5193072509765625, 0.5192550659179688, 0.5195325317382813, 0.5192079467773437, 0.5195632934570312, 0.5195438232421875, 0.5192283935546875, 0.5193164672851562, 0.5193871459960937, 0.519130126953125, 0.5190543212890625, 0.519066650390625, 0.519762939453125, 0.5196871948242188, 0.51964208984375, 1.0765701904296876, 0.5193727416992188, 0.5190225830078125, 0.51900927734375, 0.51913525390625, 0.5190553588867187, 0.5191137084960937, 0.519404541015625, 0.5193369750976562, 0.5193421020507812, 0.5189796142578125, 0.5190471801757812, 0.5191260375976563, 0.5190000610351563, 0.5188761596679687, 0.5193082885742187, 0.5192929077148437, 0.5189151000976563, 0.5193871459960937, 0.5191874389648438, 0.5193277587890625, 0.5193389892578125, 0.5194967041015625, 0.5193380126953125, 0.518867919921875, 0.5191762084960938, 0.5194178466796875, 0.5198345947265625, 0.5194864501953125, 0.519278564453125, 0.5193430786132812, 0.5191434326171875, 0.51901953125, 0.519066650390625, 0.5193963623046876, 0.5190963134765625, 0.51917822265625, 0.5191843872070312, 0.5193840942382812, 0.5189468383789062, 0.5197189331054688, 0.5195069580078125, 0.5195929565429688, 0.5196318969726562, 0.5192345581054687, 0.5199708251953125, 0.5198817138671875, 0.5194895629882812, 0.5193318481445313, 0.51960009765625, 0.519520263671875, 0.5197168579101562, 0.5195089721679688, 0.5197127685546875, 0.5197619018554688, 0.5193789672851562, 0.5196011352539063, 0.5199605712890625, 0.5192171630859375, 0.519773193359375, 0.5193154296875, 0.5193421020507812, 0.5191127319335938, 1.076674560546875, 0.5193175048828125, 0.5198510131835937, 0.5194219360351563, 0.5192171630859375, 0.5192335205078125, 0.5190491943359375, 0.5191076049804687, 0.519288818359375, 0.519351318359375, 0.51945166015625, 0.5192109985351563, 0.5194281005859375, 0.5190523071289063, 0.518887451171875, 0.5189160766601563, 0.5193380126953125, 0.5191219482421875, 0.5194137573242188, 0.5189949340820312, 0.5194024658203125, 0.5194833984375, 0.5191393432617187, 0.5193871459960937, 0.5191823120117187, 0.519035888671875, 0.5191137084960937, 0.5192263793945312, 0.5193850708007812, 0.5195980834960937, 0.519099365234375, 0.5191802978515625, 0.519103515625, 0.5190819702148437, 0.5191076049804687, 0.5194823608398438, 0.519362548828125, 0.5193380126953125, 0.5192847290039062, 0.5194066162109375, 0.5196646118164062, 0.519319580078125, 0.5197086791992187, 0.5193389892578125, 0.5191321411132812, 0.5191423950195313, 0.5191177978515625, 0.51974658203125, 0.5195028686523437, 0.5193082885742187, 0.5194967041015625, 0.5192734985351563, 0.5193185424804687, 0.5191393432617187, 0.5193789672851562, 0.5193001098632812, 0.519245849609375, 0.5193215942382813, 0.5193871459960937, 0.5200670776367188, 0.5194752197265625, 0.5193564453125, 0.519161865234375, 1.0765865478515626, 0.5188423461914062, 0.5193768920898437, 0.5195335693359375, 0.5195069580078125, 0.5196103515625, 0.5192714233398438, 0.5193687133789062, 0.5194761962890625, 0.5191976928710937, 0.5193594970703125, 0.5194977416992187, 0.5192212524414063, 0.5192960205078125, 0.5195693969726562, 0.51974755859375, 0.519372802734375, 0.5196605224609375, 0.5194926147460938, 0.5194332275390625, 0.5192581176757812, 0.5195612182617187, 0.519888916015625, 0.52008251953125, 0.519522216796875, 0.519762939453125, 0.5197742309570312, 0.5199441528320312, 0.5196011352539063, 0.5197128295898438, 0.519615478515625, 0.5195438232421875, 0.5197282104492188, 0.52025439453125, 0.52010595703125, 0.5197557983398438, 0.5197957153320313, 0.5193380126953125, 0.5193861083984375, 0.5192591552734375, 0.5193103637695312, 0.5194598388671875, 0.519741455078125, 0.5195346069335938, 0.519635986328125, 0.5194772338867187, 0.5194158325195313, 0.5195806884765625, 0.5193267211914062, 0.5193666381835937, 0.5194485473632813, 0.5195233154296875, 0.5197117309570313, 0.519857177734375, 0.519552001953125, 0.519488525390625, 0.5193113403320313, 0.519372802734375, 0.5194302368164062, 0.5193245849609375, 0.5197352905273438, 0.51986328125, 0.5195817260742187]",tokens/s,1.8962938209701676,,,,,,,, 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa6e-2addff2d3636d5c66f0d7b0e;540f09a1-f8d5-44f7-9a31-4c35f6e45cb8) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa1c-007c37da082a575d102a98bd;d62f780e-9e6a-4dd1-9710-246195337376) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa537-319e27b2631f0106330d203e;1f015b9a-cc9a-4442-a1ca-3f308f8f238f) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9c2-758b38da2fab44a872a7eefb;dfcf6b04-c4b9-4836-aeac-32beee847fc8) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaac0-2256a0e5174bdade23205d9c;d17d72c7-67d3-42ab-9d95-4f698844f852) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) TypeError: DeciCoderAttention.forward() got an unexpected keyword argument 'cache_position' ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa5f-63892e1e661fe8ce4e9deab1;691eba98-1cd4-4777-a25a-e64bc7bfde6c) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 906, in __init__ self.model = InternLMModel(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in __init__ self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 729, in self.layers = nn.ModuleList([InternLMDecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm-20b/80729bcf52fbc4553d965926b27304ac5e156d98/modeling_internlm.py"", line 545, in __init__ self.self_attn = INTERNLM_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa0d-7090eb320a3a9677364f74d7;8e660734-5141-4aff-845a-e1a4de473d20) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: DeciLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTJForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: CodeGenForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3640, in from_pretrained hf_quantizer.preprocess_model( File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 182, in preprocess_model return self._process_model_before_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 85, in _process_model_before_weight_loading model, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 178, in replace_with_awq_linear _, has_been_replaced = replace_with_awq_linear( [Previous line repeated 1 more time] File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 165, in replace_with_awq_linear model._modules[name] = target_cls( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 46, in __init__ assert out_features % (32 // self.w_bit) == 0 AssertionError ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa529-6094d41f55ed664a136e5a2f;0a009b52-eafb-476c-bdc3-9a83a02e5974) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9b1-67e0fd51178f04d9724b0a74;3cf4d4ee-2a88-4567-90a1-31421aa01148) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaab2-0518e2d112534a360e150f71;aec3db9e-6a2b-4e9e-a0cd-1529f6d19130) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1149, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 1034, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 748, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/qwen2/modeling_qwen2.py"", line 644, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 64, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1139, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 1024, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 738, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/mistral/modeling_mistral.py"", line 639, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: XGLMForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 550, in from_pretrained model_class = get_class_from_dynamic_module( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 498, in get_class_from_dynamic_module final_module = get_cached_module_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 323, in get_cached_module_file modules_needed = check_imports(resolved_module_file) File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 181, in check_imports raise ImportError( ImportError: This modeling file requires the following packages that were not found in your environment: transformers_stream_generator. Run `pip install transformers_stream_generator` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 68, in run report = scenario.run(backend) File ""/workspace/optimum_benchmark/scenarios/inference/scenario.py"", line 117, in run _ = backend.generate(self.inputs, self.config.generate_kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 369, in generate return self.pretrained_model.generate(**inputs, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 1736, in generate result = self._sample( File ""/usr/local/lib/python3.10/dist-packages/transformers/generation/utils.py"", line 2375, in _sample outputs = self( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 1164, in forward outputs = self.model( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 968, in forward layer_outputs = decoder_layer( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 713, in forward hidden_states, self_attn_weights, present_key_value = self.self_attn( File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/llama/modeling_llama.py"", line 615, in forward query_states = self.q_proj(hidden_states) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1532, in _wrapped_call_impl return self._call_impl(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1541, in _call_impl return forward_call(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/utils/_contextlib.py"", line 115, in decorate_context return func(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/gemv.py"", line 162, in forward assert AWQ_INSTALLED, ( AssertionError: AWQ kernels could not be loaded. Please install them from https://github.com/casper-hansen/AutoAWQ_kernels ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: OPTForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 558, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3626, in from_pretrained model = cls(config, *model_args, **model_kwargs) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 974, in __init__ self.model = InternLM2Model(config) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in __init__ self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 796, in self.layers = nn.ModuleList([InternLM2DecoderLayer(config) for _ in range(config.num_hidden_layers)]) File ""/root/.cache/huggingface/modules/transformers_modules/internlm/internlm2-20b/f363ea8a116b3ea829c7a068ca24bc9d3e668083/modeling_internlm2.py"", line 598, in __init__ self.attention = INTERNLM2_ATTENTION_CLASSES[config.attn_implementation](config=config) KeyError: 'sdpa' ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-gemv-sdpa,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,sdpa,,False,,False,forward,awq,4,gemv,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3620, in from_pretrained config = cls._autoset_attn_implementation( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1478, in _autoset_attn_implementation config = cls._check_and_enable_sdpa( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 1644, in _check_and_enable_sdpa raise ValueError( ValueError: GPTNeoXForCausalLM does not support an attention implementation through torch.nn.functional.scaled_dot_product_attention yet. Please request the support for this architecture: https://github.com/huggingface/transformers/issues/28005. If you believe this error is a bug, please open an issue in Transformers GitHub repository and load your model with the argument `attn_implementation=""eager""` meanwhile. Example: `model = AutoModel.from_pretrained(""openai/whisper-tiny"", attn_implementation=""eager"")` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.4b,EleutherAI/pythia-1.4b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-6B-nl,Salesforce/codegen-6B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-70m,EleutherAI/pythia-70m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-110B,Qwen/Qwen1.5-110B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 32.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciCoder-1b,Deci/DeciCoder-1b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciCoder-1b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciCoder-1b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-2b,google/recurrentgemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aaa52-0f35cf87603651cb4a6737df;6ff300d9-260a-42bd-97a7-879160c6673f) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/recurrentgemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/recurrentgemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm-20b,internlm/internlm-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-2.7b,facebook/opt-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-125m,facebook/opt-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-7b,google/gemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9ff-22ae56ac6d35a58840a8c5fe;e9439071-a0e6-4776-8c01-3f5de3095ae3) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-7b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-7b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-13b,huggyllama/llama-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-7B,Qwen/Qwen-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-72B,Qwen/Qwen1.5-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Deci/DeciLM-7B,Deci/DeciLM-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Deci/DeciLM-7B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Deci/DeciLM-7B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-j-6b,EleutherAI/gpt-j-6b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-0.5B,Qwen/Qwen1.5-0.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-160m,EleutherAI/pythia-160m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Salesforce/codegen-16B-nl,Salesforce/codegen-16B-nl,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-MoE-A2.7B,Qwen/Qwen1.5-MoE-A2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-1.3b,EleutherAI/pythia-1.3b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-2.7b,EleutherAI/pythia-2.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-14B,Qwen/Qwen1.5-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-14B,Qwen/Qwen2-beta-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-30b,facebook/opt-30b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-1.3B,EleutherAI/gpt-neo-1.3B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,databricks/dbrx-base,databricks/dbrx-base,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa51c-07afd52a3e2e6c0365f37f40;45918444-23e9-467a-8d0e-39e5665732fd) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/databricks/dbrx-base/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like databricks/dbrx-base is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,huggyllama/llama-7b,huggyllama/llama-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neox-20b,EleutherAI/gpt-neox-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-4B,Qwen/Qwen1.5-4B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-1.8B,Qwen/Qwen1.5-1.8B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-14B,Qwen/Qwen-14B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-14B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-14B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-7.5B,facebook/xglm-7.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-12b,EleutherAI/pythia-12b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-7B,Qwen/Qwen1.5-7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-34B,01-ai/Yi-34B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/gemma-2b,google/gemma-2b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 403 Client Error: Forbidden for url: https://huggingface.co/google/gemma-2b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 367, in hf_raise_for_status raise HfHubHTTPError(message, response=response) from e huggingface_hub.utils._errors.HfHubHTTPError: (Request ID: Root=1-664aa9a3-05d3169600d9db1b46cdaf73;67a4853e-9348-4efc-8225-c1d28cd81aa2) 403 Forbidden: Authorization error.. Cannot access content at: https://huggingface.co/google/gemma-2b/resolve/main/config.json. If you are trying to create or update content,make sure you have a token with the `write` role. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1826, in _raise_on_head_call_error raise LocalEntryNotFoundError( huggingface_hub.utils._errors.LocalEntryNotFoundError: An error happened while trying to locate the file on the Hub and we cannot find the requested files in the local cache. Please check your connection and try again or make sure your Internet connection is on. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 442, in cached_file raise EnvironmentError( OSError: We couldn't connect to 'https://huggingface.co' to load this file, couldn't find it in the cached files and it looks like google/gemma-2b is not the path to a directory containing a file named config.json. Checkout your internet connection or see how to run the library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/polyglot-ko-12.8b,EleutherAI/polyglot-ko-12.8b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-125m,EleutherAI/gpt-neo-125m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-13b,facebook/opt-13b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,google/recurrentgemma-7b,google/recurrentgemma-7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 304, in hf_raise_for_status response.raise_for_status() File ""/usr/local/lib/python3.10/dist-packages/requests/models.py"", line 1021, in raise_for_status raise HTTPError(http_error_msg, response=self) requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 399, in cached_file resolved_file = hf_hub_download( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1221, in hf_hub_download return _hf_hub_download_to_cache_dir( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1325, in _hf_hub_download_to_cache_dir _raise_on_head_call_error(head_call_error, force_download, local_files_only) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1823, in _raise_on_head_call_error raise head_call_error File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1722, in _get_metadata_or_catch_error metadata = get_hf_file_metadata(url=url, proxies=proxies, timeout=etag_timeout, headers=headers) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_validators.py"", line 114, in _inner_fn return fn(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 1645, in get_hf_file_metadata r = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 372, in _request_wrapper response = _request_wrapper( File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/file_download.py"", line 396, in _request_wrapper hf_raise_for_status(response) File ""/usr/local/lib/python3.10/dist-packages/huggingface_hub/utils/_errors.py"", line 352, in hf_raise_for_status raise RepositoryNotFoundError(message, response) from e huggingface_hub.utils._errors.RepositoryNotFoundError: 404 Client Error. (Request ID: Root=1-664aaaa4-4dac6ffc22548609380f1682;3e0c4a0f-09dd-4297-8ab0-3edf4ea2f509) Repository Not Found for url: https://huggingface.co/google/recurrentgemma-7b/resolve/main/config.json. Please make sure you specified the correct `repo_id` and `repo_type`. If you are trying to access a private or gated repo, make sure you are authenticated. The above exception was the direct cause of the following exception: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 37, in __init__ super().__init__(config) File ""/workspace/optimum_benchmark/backends/base.py"", line 62, in __init__ self.pretrained_config = get_transformers_pretrained_config(self.config.model, **self.config.model_kwargs) File ""/workspace/optimum_benchmark/backends/transformers_utils.py"", line 22, in get_transformers_pretrained_config return AutoConfig.from_pretrained(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py"", line 934, in from_pretrained config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 632, in get_config_dict config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py"", line 689, in _get_config_dict resolved_config_file = cached_file( File ""/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py"", line 422, in cached_file raise EnvironmentError( OSError: google/recurrentgemma-7b is not a local folder and is not a valid model identifier listed on 'https://huggingface.co/models' If this is a private repository, make sure to pass a token having permission to this repo either by logging in with `huggingface-cli login` or by passing `token=` ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-410m,EleutherAI/pythia-410m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen1.5-32B,Qwen/Qwen1.5-32B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-4.5B,facebook/xglm-4.5B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,TencentARC/Mistral_Pro_8B_v0.1,TencentARC/Mistral_Pro_8B_v0.1,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/xglm-564M,facebook/xglm-564M,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-350m,facebook/opt-350m,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-6.7b,facebook/opt-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen-72B,Qwen/Qwen-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for Qwen/Qwen-72B contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/Qwen/Qwen-72B. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,01-ai/Yi-6B,01-ai/Yi-6B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,Qwen/Qwen2-beta-72B,Qwen/Qwen2-beta-72B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 96.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/gpt-neo-2.7B,EleutherAI/gpt-neo-2.7B,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 47, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 66, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 102, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 60, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",False,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,facebook/opt-66b,facebook/opt-66b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3820, in from_pretrained dispatch_model(model, **device_map_kwargs) File ""/usr/local/lib/python3.10/dist-packages/accelerate/big_modeling.py"", line 488, in dispatch_model model.to(device) File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 2724, in to return super().to(*args, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1173, in to return self._apply(convert) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 779, in _apply module._apply(fn) [Previous line repeated 2 more times] File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 853, in _apply self._buffers[key] = fn(buf) File ""/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py"", line 1159, in convert return t.to( torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 162.00 MiB. GPU ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,internlm/internlm2-20b,internlm/internlm2-20b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 609, in resolve_trust_remote_code answer = input( EOFError: EOF when reading a line During handling of the above exception, another exception occurred: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 228, in load_model_with_no_weights self.create_no_weights_model() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 210, in create_no_weights_model meta_model = self.automodel_class.from_config(self.pretrained_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 418, in from_config trust_remote_code = resolve_trust_remote_code( File ""/usr/local/lib/python3.10/dist-packages/transformers/dynamic_module_utils.py"", line 622, in resolve_trust_remote_code raise ValueError( ValueError: The repository for internlm/internlm2-20b contains custom code which must be executed to correctly load the model. You can inspect the repository content at https://hf.co/internlm/internlm2-20b. Please pass the argument `trust_remote_code=True` to allow custom code to be run. ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1 4bit-awq-exllama-v2-eager,pytorch,2.3.0+cu121,optimum_benchmark.backends.pytorch.backend.PyTorchBackend,text-generation,transformers,EleutherAI/pythia-6.7b,EleutherAI/pythia-6.7b,cuda,0,42,,,True,True,True,True,,float16,True,False,,eager,,False,,False,forward,awq,4,exllama,False,,inference,optimum_benchmark.scenarios.inference.scenario.InferenceScenario,10,10,10,1,2,256,,True,True,True,64,64,process,optimum_benchmark.launchers.process.launcher.ProcessLauncher,True,kill,spawn, AMD EPYC 7R32,16,66697.29792,Linux,x86_64,Linux-5.10.215-203.850.amzn2.x86_64-x86_64-with-glibc2.35,x86_64,3.10.12,['NVIDIA A10G'],1,24146608128,0.2.1,,4.41.0,,0.30.1,,,,1.19.2,,,,0.11.1,,"Traceback (most recent call last): File ""/workspace/llm_perf/update_llm_perf_cuda_pytorch.py"", line 148, in benchmark_cuda_pytorch benchmark_report = Benchmark.launch(benchmark_config) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 43, in launch report = launcher.launch(worker=cls.run, worker_args=[config]) File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 54, in launch raise ChildProcessError(response[""traceback""]) ChildProcessError: Traceback (most recent call last): File ""/workspace/optimum_benchmark/launchers/process/launcher.py"", line 87, in target report = worker(*worker_args) File ""/workspace/optimum_benchmark/benchmark/base.py"", line 56, in run backend: Backend = backend_factory(backend_config) File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 79, in __init__ self.load_model_with_no_weights() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 255, in load_model_with_no_weights self.load_model_from_pretrained() File ""/workspace/optimum_benchmark/backends/pytorch/backend.py"", line 169, in load_model_from_pretrained self.pretrained_model = self.automodel_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/models/auto/auto_factory.py"", line 563, in from_pretrained return model_class.from_pretrained( File ""/usr/local/lib/python3.10/dist-packages/transformers/modeling_utils.py"", line 3823, in from_pretrained hf_quantizer.postprocess_model(model) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/base.py"", line 195, in postprocess_model return self._process_model_after_weight_loading(model, **kwargs) File ""/usr/local/lib/python3.10/dist-packages/transformers/quantizers/quantizer_awq.py"", line 107, in _process_model_after_weight_loading model = post_init_awq_exllama_modules(model, self.quantization_config.exllama_config) File ""/usr/local/lib/python3.10/dist-packages/transformers/integrations/awq.py"", line 465, in post_init_awq_exllama_modules model = exllamav2_post_init( File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 198, in exllamav2_post_init submodule.post_init(scratch_space=model.scratch_spaces[device]) File ""/usr/local/lib/python3.10/dist-packages/awq/modules/linear/exllamav2.py"", line 81, in post_init self.q_handle = exlv2_ext.make_q_matrix( NameError: name 'exlv2_ext' is not defined ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,2,64,1