diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 5861632d778f9181ab0753c1572b626793450023..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index d4342083fc2ce2300d6774ea1678d406bf35e756..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,80330.22771199999,0.0318,31.4,6.03,33.2 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/main.log deleted file mode 100644 index 3edecf27af09314786b5a3e705189c659a7129ab..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/0/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-10 21:25:46,773][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:25:46,774][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:25:47,065][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:25:47,065][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:25:47,065][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:25:47,490][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:25:47,512][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:25:47,514][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-10 21:27:15,232][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:27:15,234][inference][INFO] - Running inference benchmark -[2023-08-10 21:27:23,248][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:27:24,569][memory_tracker][INFO] - Peak memory usage: 80330.22771199999 MB -[2023-08-10 21:27:24,570][inference][INFO] - + Forward pass peak memory: 80330.22771199999 (MB) -[2023-08-10 21:27:24,570][inference][INFO] - + Warming up the forward pass -[2023-08-10 21:27:24,888][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-10 21:27:45,192][inference][INFO] - + Forward pass latency: 3.18e-02 (s) -[2023-08-10 21:27:45,193][inference][INFO] - + Forward pass throughput: 31.40 (samples/s) -[2023-08-10 21:27:45,193][inference][INFO] - + Warming up the generation pass -[2023-08-10 21:27:51,915][inference][INFO] - + Tracking generation latency and throughput -[2023-08-10 21:28:16,026][inference][INFO] - + Generation pass latency: 6.03e+00 (s) -[2023-08-10 21:28:16,029][inference][INFO] - + Generation pass throughput: 33.20 (tokens/s) -[2023-08-10 21:28:16,029][inference][INFO] - Saving inference results -[2023-08-10 21:28:16,037][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 2ea04d844237069ca3077b602664d4264271e8b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/main.log deleted file mode 100644 index 81fba88812ff5e8369d679cfa0a776bf8e29232d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/1/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-10 21:28:16,493][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:28:16,494][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:28:16,695][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:28:16,695][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:28:16,696][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:28:17,024][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:28:17,063][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:28:17,064][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-10 21:28:17,294][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 172.00 MiB (GPU 0; 79.35 GiB total capacity; 18.39 GiB already allocated; 33.12 MiB free; 18.40 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-10 21:28:17,295][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 04dec6b8db7548c6b4fa961455f48821749f95d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index b12729fb5cb6176bc26efd2cbeaf031ba92cb8ab..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,82039.406592,0.0331,60.4,6.27,63.8 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/main.log deleted file mode 100644 index a5d1ccf2dcdebbc72fde4495402df756e42a22a5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/2/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-10 21:28:17,673][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:28:17,674][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:28:17,884][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:28:17,884][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:28:17,885][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:28:18,203][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:28:18,238][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:28:18,239][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-10 21:28:28,915][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:28:28,917][inference][INFO] - Running inference benchmark -[2023-08-10 21:28:36,839][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:28:36,885][memory_tracker][INFO] - Peak memory usage: 82039.406592 MB -[2023-08-10 21:28:36,885][inference][INFO] - + Forward pass peak memory: 82039.406592 (MB) -[2023-08-10 21:28:36,886][inference][INFO] - + Warming up the forward pass -[2023-08-10 21:28:37,754][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-10 21:29:11,298][inference][INFO] - + Forward pass latency: 3.31e-02 (s) -[2023-08-10 21:29:11,299][inference][INFO] - + Forward pass throughput: 60.40 (samples/s) -[2023-08-10 21:29:11,300][inference][INFO] - + Warming up the generation pass -[2023-08-10 21:29:18,363][inference][INFO] - + Tracking generation latency and throughput -[2023-08-10 21:29:43,432][inference][INFO] - + Generation pass latency: 6.27e+00 (s) -[2023-08-10 21:29:43,434][inference][INFO] - + Generation pass throughput: 63.80 (tokens/s) -[2023-08-10 21:29:43,434][inference][INFO] - Saving inference results -[2023-08-10 21:29:43,441][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index d8c0e85745bca0011a607da301b3cffa5d675b23..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/main.log deleted file mode 100644 index 34949a8df4b4d498f67e560b0aebef24e7b1d60e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/3/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-10 21:29:43,918][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:29:43,920][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:29:44,130][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:29:44,130][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:29:44,130][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:29:44,449][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:29:44,484][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:29:44,485][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-10 21:29:44,710][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 64.00 MiB (GPU 0; 79.35 GiB total capacity; 18.09 GiB already allocated; 17.12 MiB free; 18.10 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-10 21:29:44,710][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/hydra_config.yaml deleted file mode 100644 index 516dc86898e3374932c3a2e91fa07c84830dc8c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/inference_results.csv b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/inference_results.csv deleted file mode 100644 index 90866d82a0a279124da80bc41a3bbf62d5dd5508..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,83182.354432,0.0396,101.0,6.84,117.0 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/main.log deleted file mode 100644 index 2eb2a08dd523787a3dd385413d9724aa8e31f3b3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/4/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-10 21:29:45,091][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:29:45,092][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:29:45,287][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:29:45,287][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:29:45,288][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:29:45,601][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:29:45,636][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:29:45,637][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-10 21:29:56,097][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:29:56,099][inference][INFO] - Running inference benchmark -[2023-08-10 21:30:03,868][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:30:03,918][memory_tracker][INFO] - Peak memory usage: 83182.354432 MB -[2023-08-10 21:30:03,918][inference][INFO] - + Forward pass peak memory: 83182.354432 (MB) -[2023-08-10 21:30:03,919][inference][INFO] - + Warming up the forward pass -[2023-08-10 21:30:04,683][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-10 21:30:55,393][inference][INFO] - + Forward pass latency: 3.96e-02 (s) -[2023-08-10 21:30:55,395][inference][INFO] - + Forward pass throughput: 101.00 (samples/s) -[2023-08-10 21:30:55,395][inference][INFO] - + Warming up the generation pass -[2023-08-10 21:31:04,341][inference][INFO] - + Tracking generation latency and throughput -[2023-08-10 21:31:24,854][inference][INFO] - + Generation pass latency: 6.84e+00 (s) -[2023-08-10 21:31:24,856][inference][INFO] - + Generation pass throughput: 117.00 (tokens/s) -[2023-08-10 21:31:24,856][inference][INFO] - Saving inference results -[2023-08-10 21:31:24,862][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/hydra_config.yaml deleted file mode 100644 index e912b18877e10712ce45051f516df118dc44f275..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/main.log deleted file mode 100644 index 035c3561c9d33877750913cda46dde89501d35f2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/5/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-10 21:31:25,367][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:31:25,367][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:31:25,553][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:31:25,554][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:31:25,554][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:31:25,870][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:31:25,906][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:31:25,906][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-10 21:31:26,126][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 64.00 MiB (GPU 0; 79.35 GiB total capacity; 17.98 GiB already allocated; 7.12 MiB free; 17.99 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-10 21:31:26,126][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/hydra_config.yaml deleted file mode 100644 index 3232cd9a112e4df94192badff7c8696fc3180919..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/main.log deleted file mode 100644 index 437aa971138513a99f223b40f7a1bccaf16fc883..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/6/main.log +++ /dev/null @@ -1,13 +0,0 @@ -[2023-08-10 21:31:26,507][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:31:26,509][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:31:26,704][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:31:26,705][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:31:26,705][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:31:27,027][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:31:27,062][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:31:27,063][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-10 21:31:37,679][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:31:37,681][inference][INFO] - Running inference benchmark -[2023-08-10 21:31:45,516][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:31:45,833][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 392.00 MiB (GPU 0; 79.35 GiB total capacity; 17.11 GiB already allocated; 101.12 MiB free; 17.90 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-10 21:31:45,833][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/hydra_config.yaml deleted file mode 100644 index 1ce61eae1f2afe20c3c608af99709c321053fae2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/main.log deleted file mode 100644 index b60c41964cdc56d27768fa9708ca3b0646ff37b8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/llama_1gpu_inference/7/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-10 21:31:46,234][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:31:46,235][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:31:46,542][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-10 21:31:46,543][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:31:46,543][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:31:46,863][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:31:46,898][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:31:46,899][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-10 21:31:47,007][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 500.00 MiB (GPU 0; 79.35 GiB total capacity; 17.11 GiB already allocated; 101.12 MiB free; 17.90 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-10 21:31:47,007][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index db29d1f4358ce930b18becb29a2f8a23aec68a3d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 10 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 84f99d65cd88c4646fd33cbb6486530673d124cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s) -0,459.374592,0.00379,264.0 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/main.log deleted file mode 100644 index 5302553fb154bcb97c554724615cd05213a414cc..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-10 21:31:51,183][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:31:51,184][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:31:51,374][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-10 21:31:51,374][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:31:51,374][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:31:51,374][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:31:51,376][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:31:51,377][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-10 21:31:51,961][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:31:51,961][inference][INFO] - Running inference benchmark -[2023-08-10 21:31:52,083][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-10 21:31:52,084][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:31:52,135][inference][INFO] - + Forward pass peak memory: 459.374592 (MB) -[2023-08-10 21:31:52,136][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-10 21:31:52,138][inference][INFO] - + Warming up the forward pass -[2023-08-10 21:31:52,169][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-10 21:32:02,266][inference][INFO] - + Forward pass latency: 3.79e-03 (s) -[2023-08-10 21:32:02,269][inference][INFO] - + Forward pass throughput: 264.00 (samples/s) -[2023-08-10 21:32:02,269][inference][INFO] - Saving inference results -[2023-08-10 21:32:02,285][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 9328a9fb768e48ccae428eb55def593023431c55..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 10 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 4150b0436f75eec8f38a160d07d95987604d3d07..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,463.53203199999996,0.0036,278.0,0.491,204.0 diff --git a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 4121cba0bcabdcf7a64065bd661d2c2e0c350eef..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-10_20:06:29_55db70c63de2c07b6ffe36f24c0e7df8f967e935/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,22 +0,0 @@ -[2023-08-10 21:32:06,170][benchmark][INFO] - Configuring inference benchmark -[2023-08-10 21:32:06,172][benchmark][INFO] - + Setting seed(42) -[2023-08-10 21:32:06,352][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-10 21:32:06,352][backend][INFO] - Configuring pytorch backend -[2023-08-10 21:32:06,352][backend][INFO] - + Checking initial device isolation -[2023-08-10 21:32:06,352][backend][INFO] - + Checking contineous device isolation -[2023-08-10 21:32:06,354][pytorch][INFO] - + Disabling gradients -[2023-08-10 21:32:06,354][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-10 21:32:06,993][pytorch][INFO] - + Turning on eval mode -[2023-08-10 21:32:06,994][inference][INFO] - Running inference benchmark -[2023-08-10 21:32:07,195][inference][INFO] - + Tracking forward pass peak memory -[2023-08-10 21:32:07,245][inference][INFO] - + Forward pass peak memory: 463.53203199999996 (MB) -[2023-08-10 21:32:07,246][inference][INFO] - + Warming up the forward pass -[2023-08-10 21:32:07,280][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-10 21:32:17,381][inference][INFO] - + Forward pass latency: 3.60e-03 (s) -[2023-08-10 21:32:17,384][inference][INFO] - + Forward pass throughput: 278.00 (samples/s) -[2023-08-10 21:32:17,385][inference][INFO] - + Warming up the generation pass -[2023-08-10 21:32:17,892][inference][INFO] - + Tracking generation latency and throughput -[2023-08-10 21:32:28,205][inference][INFO] - + Generation pass latency: 4.91e-01 (s) -[2023-08-10 21:32:28,206][inference][INFO] - + Generation pass throughput: 204.00 (tokens/s) -[2023-08-10 21:32:28,206][inference][INFO] - Saving inference results -[2023-08-10 21:32:28,221][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 5861632d778f9181ab0753c1572b626793450023..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 3fc9acadac9bf16b43311e19438b7fcf383baded..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,39455.686656,0.0328,30.5,6.53,30.6 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/main.log deleted file mode 100644 index e5f69b2665184eb4fbe208a242c041d03e21cfe1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/0/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:27:19,729][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:27:19,730][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:27:20,016][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:27:20,017][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:27:20,017][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:27:20,152][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:27:20,166][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:27:20,167][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-11 15:28:26,614][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:28:26,615][inference][INFO] - Running inference benchmark -[2023-08-11 15:28:34,701][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:28:36,016][memory_tracker][INFO] - Peak memory usage: 39455.686656 MB -[2023-08-11 15:28:36,017][inference][INFO] - + Forward pass peak memory: 39455.686656 (MB) -[2023-08-11 15:28:36,017][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:28:36,336][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:28:56,621][inference][INFO] - + Forward pass latency: 3.28e-02 (s) -[2023-08-11 15:28:56,622][inference][INFO] - + Forward pass throughput: 30.50 (samples/s) -[2023-08-11 15:28:56,623][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:29:03,962][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:29:30,082][inference][INFO] - + Generation pass latency: 6.53e+00 (s) -[2023-08-11 15:29:30,086][inference][INFO] - + Generation pass throughput: 30.60 (tokens/s) -[2023-08-11 15:29:30,086][inference][INFO] - Saving inference results -[2023-08-11 15:29:30,095][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 2ea04d844237069ca3077b602664d4264271e8b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index 9e0ece733f968af954abc1b466b30aeeabb8d476..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,54148.333567999995,0.0643,15.6,5.71,35.0 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/main.log deleted file mode 100644 index 80585a57732b004ca884f2a5f1e33a8c533799c6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/1/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:29:30,829][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:29:30,830][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:29:31,018][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:29:31,018][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:29:31,019][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:29:31,239][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:29:31,265][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:29:31,266][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-11 15:29:48,052][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:29:48,053][inference][INFO] - Running inference benchmark -[2023-08-11 15:29:55,792][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:29:55,888][memory_tracker][INFO] - Peak memory usage: 54148.333567999995 MB -[2023-08-11 15:29:55,888][inference][INFO] - + Forward pass peak memory: 54148.333567999995 (MB) -[2023-08-11 15:29:55,888][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:29:58,007][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:31:04,195][inference][INFO] - + Forward pass latency: 6.43e-02 (s) -[2023-08-11 15:31:04,196][inference][INFO] - + Forward pass throughput: 15.60 (samples/s) -[2023-08-11 15:31:04,196][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:31:09,917][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:31:32,752][inference][INFO] - + Generation pass latency: 5.71e+00 (s) -[2023-08-11 15:31:32,755][inference][INFO] - + Generation pass throughput: 35.00 (tokens/s) -[2023-08-11 15:31:32,756][inference][INFO] - Saving inference results -[2023-08-11 15:31:32,762][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 04dec6b8db7548c6b4fa961455f48821749f95d3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index 91d0fff8ee21538515bb3e8cfc13a8cc01968181..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,40212.758528,0.0324,61.7,6.11,65.5 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/main.log deleted file mode 100644 index f20b3e8c4b37fb775ae95e3e5a83f68d546d4bd1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/2/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:31:33,257][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:31:33,258][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:31:33,446][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:31:33,446][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:31:33,446][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:31:33,666][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:31:33,689][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:31:33,690][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-11 15:31:44,143][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:31:44,144][inference][INFO] - Running inference benchmark -[2023-08-11 15:31:51,874][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:31:51,914][memory_tracker][INFO] - Peak memory usage: 40212.758528 MB -[2023-08-11 15:31:51,915][inference][INFO] - + Forward pass peak memory: 40212.758528 (MB) -[2023-08-11 15:31:51,915][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:31:52,395][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:32:22,140][inference][INFO] - + Forward pass latency: 3.24e-02 (s) -[2023-08-11 15:32:22,141][inference][INFO] - + Forward pass throughput: 61.70 (samples/s) -[2023-08-11 15:32:22,141][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:32:29,336][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:32:53,786][inference][INFO] - + Generation pass latency: 6.11e+00 (s) -[2023-08-11 15:32:53,790][inference][INFO] - + Generation pass throughput: 65.50 (tokens/s) -[2023-08-11 15:32:53,790][inference][INFO] - Saving inference results -[2023-08-11 15:32:53,796][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index d8c0e85745bca0011a607da301b3cffa5d675b23..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 246d49736a2de7e066700c95b818bb4c7d445141..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,30780.817408,0.115,17.4,8.88,45.0 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/main.log deleted file mode 100644 index 68e2b44accb848c4fd1e29aa055494da645e69fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/3/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:32:54,284][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:32:54,285][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:32:54,493][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:32:54,493][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:32:54,493][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:32:54,594][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:32:54,617][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:32:54,618][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-11 15:33:11,911][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:33:11,913][inference][INFO] - Running inference benchmark -[2023-08-11 15:33:19,823][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:33:19,949][memory_tracker][INFO] - Peak memory usage: 30780.817408 MB -[2023-08-11 15:33:19,949][inference][INFO] - + Forward pass peak memory: 30780.817408 (MB) -[2023-08-11 15:33:19,950][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:33:23,777][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:34:34,698][inference][INFO] - + Forward pass latency: 1.15e-01 (s) -[2023-08-11 15:34:34,698][inference][INFO] - + Forward pass throughput: 17.40 (samples/s) -[2023-08-11 15:34:34,699][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:34:41,894][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:35:08,550][inference][INFO] - + Generation pass latency: 8.88e+00 (s) -[2023-08-11 15:35:08,554][inference][INFO] - + Generation pass throughput: 45.00 (tokens/s) -[2023-08-11 15:35:08,554][inference][INFO] - Saving inference results -[2023-08-11 15:35:08,560][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/hydra_config.yaml deleted file mode 100644 index 516dc86898e3374932c3a2e91fa07c84830dc8c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/inference_results.csv deleted file mode 100644 index 671fbcf61864d490c8f158c5585934a4a6b17815..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,40833.51552,0.0311,129.0,6.12,131.0 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/main.log deleted file mode 100644 index 4c7099b33c8a312c9e0c426e253f815b024904bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/4/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:35:09,107][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:35:09,107][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:35:09,292][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:35:09,292][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:35:09,292][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:35:09,535][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:35:09,559][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:35:09,560][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-11 15:35:19,934][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:35:19,936][inference][INFO] - Running inference benchmark -[2023-08-11 15:35:27,605][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:35:27,650][memory_tracker][INFO] - Peak memory usage: 40833.51552 MB -[2023-08-11 15:35:27,650][inference][INFO] - + Forward pass peak memory: 40833.51552 (MB) -[2023-08-11 15:35:27,651][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:35:28,415][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:36:16,906][inference][INFO] - + Forward pass latency: 3.11e-02 (s) -[2023-08-11 15:36:16,907][inference][INFO] - + Forward pass throughput: 129.00 (samples/s) -[2023-08-11 15:36:16,907][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:36:23,989][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:36:48,487][inference][INFO] - + Generation pass latency: 6.12e+00 (s) -[2023-08-11 15:36:48,490][inference][INFO] - + Generation pass throughput: 131.00 (tokens/s) -[2023-08-11 15:36:48,490][inference][INFO] - Saving inference results -[2023-08-11 15:36:48,497][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/hydra_config.yaml deleted file mode 100644 index e912b18877e10712ce45051f516df118dc44f275..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/inference_results.csv deleted file mode 100644 index ee6bff6b3b913fd660cad82f76a31c30d6b50141..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,31481.266175999997,0.215,18.6,7.71,104.0 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/main.log deleted file mode 100644 index d8eb487195ce8fd14cfd81d582586e4cf6be8d34..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/5/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:36:48,997][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:36:48,998][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:36:49,182][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:36:49,182][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:36:49,182][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:36:49,286][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:36:49,312][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:36:49,313][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-11 15:37:06,400][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:37:06,401][inference][INFO] - Running inference benchmark -[2023-08-11 15:37:14,292][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:37:14,505][memory_tracker][INFO] - Peak memory usage: 31481.266175999997 MB -[2023-08-11 15:37:14,505][inference][INFO] - + Forward pass peak memory: 31481.266175999997 (MB) -[2023-08-11 15:37:14,510][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:37:21,438][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:38:35,969][inference][INFO] - + Forward pass latency: 2.15e-01 (s) -[2023-08-11 15:38:35,971][inference][INFO] - + Forward pass throughput: 18.60 (samples/s) -[2023-08-11 15:38:35,972][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:38:43,813][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:39:06,951][inference][INFO] - + Generation pass latency: 7.71e+00 (s) -[2023-08-11 15:39:06,955][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s) -[2023-08-11 15:39:06,955][inference][INFO] - Saving inference results -[2023-08-11 15:39:06,961][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/hydra_config.yaml deleted file mode 100644 index 3232cd9a112e4df94192badff7c8696fc3180919..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/inference_results.csv b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/inference_results.csv deleted file mode 100644 index 2c62fe0de2ab7f438979a14e7fa2b11a728f64b6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,43849.220096,0.0974,164.0,6.59,486.0 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/main.log deleted file mode 100644 index cec6775885b74e75c3200d599a4e25ea289602b2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/6/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-11 15:39:07,533][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:39:07,534][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:39:07,717][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:39:07,718][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:39:07,718][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:39:07,961][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:39:07,985][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:39:07,986][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-11 15:39:18,666][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:39:18,668][inference][INFO] - Running inference benchmark -[2023-08-11 15:39:26,443][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:39:26,554][memory_tracker][INFO] - Peak memory usage: 43849.220096 MB -[2023-08-11 15:39:26,554][inference][INFO] - + Forward pass peak memory: 43849.220096 (MB) -[2023-08-11 15:39:26,554][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:39:29,188][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:40:23,336][inference][INFO] - + Forward pass latency: 9.74e-02 (s) -[2023-08-11 15:40:23,336][inference][INFO] - + Forward pass throughput: 164.00 (samples/s) -[2023-08-11 15:40:23,337][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:40:30,015][inference][INFO] - + Tracking generation latency and throughput -[2023-08-11 15:40:56,380][inference][INFO] - + Generation pass latency: 6.59e+00 (s) -[2023-08-11 15:40:56,381][inference][INFO] - + Generation pass throughput: 486.00 (tokens/s) -[2023-08-11 15:40:56,381][inference][INFO] - Saving inference results -[2023-08-11 15:40:56,386][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/hydra_config.yaml b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/hydra_config.yaml deleted file mode 100644 index 1ce61eae1f2afe20c3c608af99709c321053fae2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 4 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: togethercomputer/LLaMA-2-7B-32K -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/main.log b/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/main.log deleted file mode 100644 index e5e176e752845d5df8d4905f0d6f25a071a323ac..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-11_14:26:45_5e5fa0d88c293e6d5be2517b4f45680ba3bb5df2/llama_1gpu_inference/7/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-11 15:40:57,035][benchmark][INFO] - Configuring inference benchmark -[2023-08-11 15:40:57,035][benchmark][INFO] - + Setting seed(42) -[2023-08-11 15:40:57,232][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-11 15:40:57,233][backend][INFO] - Configuring pytorch backend -[2023-08-11 15:40:57,233][backend][INFO] - + Checking initial device isolation -[2023-08-11 15:40:57,482][backend][INFO] - + Checking contineous device isolation -[2023-08-11 15:40:57,505][pytorch][INFO] - + Disabling gradients -[2023-08-11 15:40:57,506][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-11 15:41:15,119][pytorch][INFO] - + Turning on eval mode -[2023-08-11 15:41:15,121][inference][INFO] - Running inference benchmark -[2023-08-11 15:41:23,009][inference][INFO] - + Tracking forward pass peak memory -[2023-08-11 15:41:23,733][memory_tracker][INFO] - Peak memory usage: 35824.467968 MB -[2023-08-11 15:41:23,734][inference][INFO] - + Forward pass peak memory: 35824.467968 (MB) -[2023-08-11 15:41:23,748][inference][INFO] - + Warming up the forward pass -[2023-08-11 15:41:54,685][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-11 15:43:11,935][inference][INFO] - + Forward pass latency: 1.19e+00 (s) -[2023-08-11 15:43:11,936][inference][INFO] - + Forward pass throughput: 13.40 (samples/s) -[2023-08-11 15:43:11,936][inference][INFO] - + Warming up the generation pass -[2023-08-11 15:43:34,896][main][ERROR] - Error during benchmarking: CUDA out of memory. Tried to allocate 96.00 MiB (GPU 0; 79.35 GiB total capacity; 37.73 GiB already allocated; 8.12 MiB free; 39.24 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF -[2023-08-11 15:43:34,897][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index 8b3622f1330fb23912e2d01e499b452c641e6df6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 16 - use_ddp: false - ddp_config: null -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/main.log deleted file mode 100644 index 029656605e8d3a25d75b87832830db10e154d726..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/main.log +++ /dev/null @@ -1,13 +0,0 @@ -[2023-08-14 11:28:09,272][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:28:09,273][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:28:10,495][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:28:10,495][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:28:10,496][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:28:10,579][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:28:10,591][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:28:14,795][training][INFO] - Running training benchmark -[2023-08-14 11:28:14,834][pytorch][INFO] - Running training benchmark -[2023-08-14 11:28:14,835][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:28:15,034][pytorch][INFO] - Training model -[2023-08-14 11:28:37,416][training][INFO] - Saving training results -[2023-08-14 11:28:37,422][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 045d312827d4485aa16b5c1ef044b24bac1be6be..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,271.371,22.1099 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index 92f250dca01c68a3ed1846d80567eccec42762a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 16 - use_ddp: false - ddp_config: null -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/main.log deleted file mode 100644 index fcc91c49cd4e88f5050f4740b6369e698d71a808..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/main.log +++ /dev/null @@ -1,13 +0,0 @@ -[2023-08-14 11:28:38,073][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:28:38,074][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:28:38,522][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:28:38,522][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:28:38,522][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:28:38,637][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:28:38,673][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:28:39,016][training][INFO] - Running training benchmark -[2023-08-14 11:28:39,049][pytorch][INFO] - Running training benchmark -[2023-08-14 11:28:39,049][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:28:39,250][pytorch][INFO] - Training model -[2023-08-14 11:29:55,686][training][INFO] - Saving training results -[2023-08-14 11:29:55,690][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 4355200c525ae0667ecb42ebcf2ba511f3e3dcd7..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,78.79,76.152 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/hydra_config.yaml deleted file mode 100644 index 5c596c64b60bc52f14b72fdff3bc729f988afe9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/hydra_config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 32 - use_ddp: false - ddp_config: null -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/main.log deleted file mode 100644 index ca0a366967377e4b8dd8eb29fab6ea4fc7a4c7cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/main.log +++ /dev/null @@ -1,13 +0,0 @@ -[2023-08-14 11:29:56,326][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:29:56,327][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:29:56,774][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:29:56,774][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:29:56,774][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:29:56,890][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:29:56,925][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:29:57,257][training][INFO] - Running training benchmark -[2023-08-14 11:29:57,291][pytorch][INFO] - Running training benchmark -[2023-08-14 11:29:57,292][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:29:57,521][pytorch][INFO] - Training model -[2023-08-14 11:30:14,182][training][INFO] - Saving training results -[2023-08-14 11:30:14,187][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/training_results.csv deleted file mode 100644 index d1993532b24d71ff14c728b83b02e0480797d28b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,368.784,16.2697 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/hydra_config.yaml deleted file mode 100644 index a4aa8155f537e6cfe258ebf2d64cda2e02ff40e6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/hydra_config.yaml +++ /dev/null @@ -1,64 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 32 - use_ddp: false - ddp_config: null -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/main.log deleted file mode 100644 index 7e152a858057c50d0ea98c63de64832424ab0566..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/main.log +++ /dev/null @@ -1,13 +0,0 @@ -[2023-08-14 11:30:14,852][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:30:14,853][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:30:15,300][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:30:15,300][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:30:15,300][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:30:15,410][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:30:15,446][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:30:15,782][training][INFO] - Running training benchmark -[2023-08-14 11:30:15,833][pytorch][INFO] - Running training benchmark -[2023-08-14 11:30:15,833][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:30:16,057][pytorch][INFO] - Training model -[2023-08-14 11:31:26,923][training][INFO] - Saving training results -[2023-08-14 11:31:26,927][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/training_results.csv deleted file mode 100644 index 187a0d78a43bfea46f355420050f8f47ddd01dc1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_1gpu_training/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,85.043,70.5522 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/hydra_config.yaml deleted file mode 100644 index fc7efcac011e9103c291c63dc7e03627746aef77..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/hydra_config.yaml +++ /dev/null @@ -1,76 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 16 - use_ddp: true - ddp_config: - min_nodes: 1 - max_nodes: 1 - nproc_per_node: 2 - run_id: none - role: default - rdzv_endpoint: 127.0.0.1:29500 - rdzv_backend: static - rdzv_configs: - timeout: 900 - rank: 0 - max_restarts: 0 - monitor_interval: 5 -experiment_name: bert_2gpu_training_ddp -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/main.log deleted file mode 100644 index c047f0321d5a700d95ffd980cf2e1db6cc82dcf8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:31:34,366][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:31:34,367][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:31:34,816][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:31:34,816][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:31:34,816][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:31:34,901][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:31:34,913][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:31:36,485][training][INFO] - Running training benchmark -[2023-08-14 11:31:36,522][pytorch][INFO] - Running training benchmark -[2023-08-14 11:31:36,524][pytorch][INFO] - PyTorch DDP launch config: LaunchConfig(min_nodes=1, max_nodes=1, nproc_per_node=2, run_id='none', role='default', rdzv_endpoint='127.0.0.1:29500', rdzv_backend='static', rdzv_configs={'timeout': 900, 'rank': 0}, rdzv_timeout=-1, max_restarts=0, monitor_interval=5, start_method='spawn', log_dir=None, redirects=, tee=, metrics_cfg={}, local_addr=None) -[2023-08-14 11:31:36,526][torch.distributed.elastic.rendezvous.static_tcp_rendezvous][INFO] - Creating TCPStore as the c10d::Store implementation -[2023-08-14 11:31:39,973][training-ddp-worker][INFO] - RANK: 0 -[2023-08-14 11:31:39,973][training-ddp-worker][INFO] - WORLD_SIZE: 2 -[2023-08-14 11:31:39,973][training-ddp-worker][INFO] - MASTER_ADDR: 127.0.0.1 -[2023-08-14 11:31:39,973][training-ddp-worker][INFO] - MASTER_PORT: 29500 -[2023-08-14 11:31:39,973][training-ddp-worker][INFO] - TORCHELASTIC_MAX_RESTARTS: 0 -[2023-08-14 11:31:39,974][training-ddp-worker][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:31:39,975][torch.distributed.distributed_c10d][INFO] - Added key: store_based_barrier_key:1 to store for rank: 0 -[2023-08-14 11:31:43,421][torch.distributed.distributed_c10d][INFO] - Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 2 nodes. -[2023-08-14 11:31:43,594][training-ddp-worker][INFO] - Training model -[2023-08-14 11:31:59,125][torch.distributed.elastic.multiprocessing.api][WARNING] - Closing process 1464 via signal SIGTERM -[2023-08-14 11:31:59,198][training][INFO] - Saving training results -[2023-08-14 11:31:59,207][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/training_results.csv deleted file mode 100644 index 5547b9062ff17252e141bfc1b1af6b5611206935..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,478.59,12.5368 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/hydra_config.yaml deleted file mode 100644 index 669d567a280da32aaae92aa97841beeb55ad45bf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/hydra_config.yaml +++ /dev/null @@ -1,76 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 16 - use_ddp: true - ddp_config: - min_nodes: 1 - max_nodes: 1 - nproc_per_node: 2 - run_id: none - role: default - rdzv_endpoint: 127.0.0.1:29500 - rdzv_backend: static - rdzv_configs: - timeout: 900 - rank: 0 - max_restarts: 0 - monitor_interval: 5 -experiment_name: bert_2gpu_training_ddp -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/main.log deleted file mode 100644 index 8d9eb4e3c6b10c1df13c18a4c3f676b974eedfc2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:32:00,048][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:32:00,049][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:32:00,498][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:32:00,498][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:32:00,498][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:32:00,631][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:32:00,658][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:32:00,962][training][INFO] - Running training benchmark -[2023-08-14 11:32:01,004][pytorch][INFO] - Running training benchmark -[2023-08-14 11:32:01,006][pytorch][INFO] - PyTorch DDP launch config: LaunchConfig(min_nodes=1, max_nodes=1, nproc_per_node=2, run_id='none', role='default', rdzv_endpoint='127.0.0.1:29500', rdzv_backend='static', rdzv_configs={'timeout': 900, 'rank': 0}, rdzv_timeout=-1, max_restarts=0, monitor_interval=5, start_method='spawn', log_dir=None, redirects=, tee=, metrics_cfg={}, local_addr=None) -[2023-08-14 11:32:01,006][torch.distributed.elastic.rendezvous.static_tcp_rendezvous][INFO] - Creating TCPStore as the c10d::Store implementation -[2023-08-14 11:32:04,535][training-ddp-worker][INFO] - RANK: 0 -[2023-08-14 11:32:04,535][training-ddp-worker][INFO] - WORLD_SIZE: 2 -[2023-08-14 11:32:04,535][training-ddp-worker][INFO] - MASTER_ADDR: 127.0.0.1 -[2023-08-14 11:32:04,535][training-ddp-worker][INFO] - MASTER_PORT: 29500 -[2023-08-14 11:32:04,536][training-ddp-worker][INFO] - TORCHELASTIC_MAX_RESTARTS: 0 -[2023-08-14 11:32:04,536][training-ddp-worker][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:32:04,537][torch.distributed.distributed_c10d][INFO] - Added key: store_based_barrier_key:1 to store for rank: 0 -[2023-08-14 11:32:07,991][torch.distributed.distributed_c10d][INFO] - Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 2 nodes. -[2023-08-14 11:32:08,161][training-ddp-worker][INFO] - Training model -[2023-08-14 11:32:53,724][torch.distributed.elastic.multiprocessing.api][WARNING] - Closing process 1819 via signal SIGTERM -[2023-08-14 11:32:53,810][training][INFO] - Saving training results -[2023-08-14 11:32:53,815][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/training_results.csv deleted file mode 100644 index c4a7e19b5fdafa8ce5ea51fc397d19ba1319e9ca..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,147.854,40.5806 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/hydra_config.yaml deleted file mode 100644 index b8d8816efa11c8c5f446b6a3d2b6033af595fcb4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/hydra_config.yaml +++ /dev/null @@ -1,76 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 32 - use_ddp: true - ddp_config: - min_nodes: 1 - max_nodes: 1 - nproc_per_node: 2 - run_id: none - role: default - rdzv_endpoint: 127.0.0.1:29500 - rdzv_backend: static - rdzv_configs: - timeout: 900 - rank: 0 - max_restarts: 0 - monitor_interval: 5 -experiment_name: bert_2gpu_training_ddp -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/main.log deleted file mode 100644 index 5b8d74c084e28422f9f7a9fdd93dd6c921774f9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:32:54,522][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:32:54,524][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:32:55,012][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:32:55,012][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:32:55,012][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:32:55,133][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:32:55,160][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:32:55,494][training][INFO] - Running training benchmark -[2023-08-14 11:32:55,527][pytorch][INFO] - Running training benchmark -[2023-08-14 11:32:55,529][pytorch][INFO] - PyTorch DDP launch config: LaunchConfig(min_nodes=1, max_nodes=1, nproc_per_node=2, run_id='none', role='default', rdzv_endpoint='127.0.0.1:29500', rdzv_backend='static', rdzv_configs={'timeout': 900, 'rank': 0}, rdzv_timeout=-1, max_restarts=0, monitor_interval=5, start_method='spawn', log_dir=None, redirects=, tee=, metrics_cfg={}, local_addr=None) -[2023-08-14 11:32:55,530][torch.distributed.elastic.rendezvous.static_tcp_rendezvous][INFO] - Creating TCPStore as the c10d::Store implementation -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - RANK: 0 -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - WORLD_SIZE: 2 -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - MASTER_ADDR: 127.0.0.1 -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - MASTER_PORT: 29500 -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - TORCHELASTIC_MAX_RESTARTS: 0 -[2023-08-14 11:32:59,162][training-ddp-worker][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:32:59,163][torch.distributed.distributed_c10d][INFO] - Added key: store_based_barrier_key:1 to store for rank: 0 -[2023-08-14 11:33:02,626][torch.distributed.distributed_c10d][INFO] - Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 2 nodes. -[2023-08-14 11:33:02,860][training-ddp-worker][INFO] - Training model -[2023-08-14 11:33:18,329][torch.distributed.elastic.multiprocessing.api][WARNING] - Closing process 2374 via signal SIGTERM -[2023-08-14 11:33:18,369][training][INFO] - Saving training results -[2023-08-14 11:33:18,375][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/training_results.csv deleted file mode 100644 index 7aec3300906f0630420457b84e39ba00182b201d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/2/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,624.715,9.6044 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/hydra_config.yaml deleted file mode 100644 index 4e3d7e78def2c5b59cf61bfca5949d7e50c97604..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/hydra_config.yaml +++ /dev/null @@ -1,76 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: false - eval_mode: false -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.TrainingBenchmark - seed: 42 - dataset_shapes: - dataset_size: 2000 - sequence_length: 273 - num_choices: 4 - training_arguments: - output_dir: ./trainer_output - skip_memory_metrics: false - use_cpu: false - do_train: true - do_eval: false - do_predict: false - per_device_train_batch_size: 32 - use_ddp: true - ddp_config: - min_nodes: 1 - max_nodes: 1 - nproc_per_node: 2 - run_id: none - role: default - rdzv_endpoint: 127.0.0.1:29500 - rdzv_backend: static - rdzv_configs: - timeout: 900 - rank: 0 - max_restarts: 0 - monitor_interval: 5 -experiment_name: bert_2gpu_training_ddp -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/main.log deleted file mode 100644 index 6a8d132f3d1fa424658ef3fcfce1f4db4f75b2c0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:33:19,178][benchmark][INFO] - Configuring training benchmark -[2023-08-14 11:33:19,180][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:33:19,644][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:33:19,644][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:33:19,644][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:33:19,787][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:33:19,813][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:33:20,108][training][INFO] - Running training benchmark -[2023-08-14 11:33:20,159][pytorch][INFO] - Running training benchmark -[2023-08-14 11:33:20,161][pytorch][INFO] - PyTorch DDP launch config: LaunchConfig(min_nodes=1, max_nodes=1, nproc_per_node=2, run_id='none', role='default', rdzv_endpoint='127.0.0.1:29500', rdzv_backend='static', rdzv_configs={'timeout': 900, 'rank': 0}, rdzv_timeout=-1, max_restarts=0, monitor_interval=5, start_method='spawn', log_dir=None, redirects=, tee=, metrics_cfg={}, local_addr=None) -[2023-08-14 11:33:20,161][torch.distributed.elastic.rendezvous.static_tcp_rendezvous][INFO] - Creating TCPStore as the c10d::Store implementation -[2023-08-14 11:33:23,689][training-ddp-worker][INFO] - RANK: 0 -[2023-08-14 11:33:23,689][training-ddp-worker][INFO] - WORLD_SIZE: 2 -[2023-08-14 11:33:23,689][training-ddp-worker][INFO] - MASTER_ADDR: 127.0.0.1 -[2023-08-14 11:33:23,690][training-ddp-worker][INFO] - MASTER_PORT: 29500 -[2023-08-14 11:33:23,690][training-ddp-worker][INFO] - TORCHELASTIC_MAX_RESTARTS: 0 -[2023-08-14 11:33:23,690][training-ddp-worker][INFO] - + Wrapping model with transformers.Trainer -[2023-08-14 11:33:23,691][torch.distributed.distributed_c10d][INFO] - Added key: store_based_barrier_key:1 to store for rank: 0 -[2023-08-14 11:33:27,398][torch.distributed.distributed_c10d][INFO] - Rank 0: Completed store-based barrier for key:store_based_barrier_key:1 with 2 nodes. -[2023-08-14 11:33:27,567][training-ddp-worker][INFO] - Training model -[2023-08-14 11:34:08,148][torch.distributed.elastic.multiprocessing.api][WARNING] - Closing process 3032 via signal SIGTERM -[2023-08-14 11:34:08,223][training][INFO] - Saving training results -[2023-08-14 11:34:08,228][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/training_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/training_results.csv deleted file mode 100644 index e1e8684f29e7682101c6c2882df84e05255b552d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/bert_2gpu_training_ddp/3/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,training_throughput,training_runtime -0,160.376,37.4121 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 865c27f5876d8a97fb493e5e8c5a39edd726efa4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 289df382dea101fa29386ead47b4f4b14c44c221..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,15651.96288,0.0313,31.9,5.93,33.7 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/main.log deleted file mode 100644 index ac839ea9612d0b2a0efe7aa42120ffd0c3fbe2bd..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/0/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:34:58,672][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:34:58,673][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:35:00,320][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:35:00,321][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:35:00,321][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:35:00,398][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:35:00,411][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:35:00,413][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:36:13,158][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:36:13,159][inference][INFO] - Running inference benchmark -[2023-08-14 11:36:21,147][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:36:22,287][memory_tracker][INFO] - Peak memory usage: 15651.96288 MB -[2023-08-14 11:36:22,287][inference][INFO] - + Forward pass peak memory: 15651.96288 (MB) -[2023-08-14 11:36:22,288][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:36:22,600][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:36:42,944][inference][INFO] - + Forward pass latency: 3.13e-02 (s) -[2023-08-14 11:36:42,945][inference][INFO] - + Forward pass throughput: 31.90 (samples/s) -[2023-08-14 11:36:42,946][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:36:49,578][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:37:13,296][inference][INFO] - + Generation pass latency: 5.93e+00 (s) -[2023-08-14 11:37:13,299][inference][INFO] - + Generation pass throughput: 33.70 (tokens/s) -[2023-08-14 11:37:13,299][inference][INFO] - Saving inference results -[2023-08-14 11:37:13,316][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 41fd704de723e7f0556e3ae3b1b43aa1073a6d9b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index ca24e20f80eca2df665c87dd0de64b64dbed2541..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,29317.005311999998,0.0642,15.6,5.63,35.5 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/main.log deleted file mode 100644 index 02f571e2560b86255c4546bd4f7f27e9a3fbab12..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/1/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:37:14,070][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:37:14,071][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:37:14,552][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:37:14,552][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:37:14,552][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:37:14,671][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:37:14,702][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:37:14,703][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:37:31,876][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:37:31,877][inference][INFO] - Running inference benchmark -[2023-08-14 11:37:40,001][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:37:40,075][memory_tracker][INFO] - Peak memory usage: 29317.005311999998 MB -[2023-08-14 11:37:40,076][inference][INFO] - + Forward pass peak memory: 29317.005311999998 (MB) -[2023-08-14 11:37:40,076][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:37:42,193][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:38:48,464][inference][INFO] - + Forward pass latency: 6.42e-02 (s) -[2023-08-14 11:38:48,465][inference][INFO] - + Forward pass throughput: 15.60 (samples/s) -[2023-08-14 11:38:48,466][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:38:54,112][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:39:16,625][inference][INFO] - + Generation pass latency: 5.63e+00 (s) -[2023-08-14 11:39:16,625][inference][INFO] - + Generation pass throughput: 35.50 (tokens/s) -[2023-08-14 11:39:16,625][inference][INFO] - Saving inference results -[2023-08-14 11:39:16,633][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 34dbc3501579e34830656f2576328a6190d4ecce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index e27e8c8cd273215bb469ccc6dd7af1f3191e2caa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,15859.580928,0.0313,63.9,6.11,65.5 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/main.log deleted file mode 100644 index 8721fbcd65bfaceaab2d4429d3a60cbb5632dc16..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/2/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:39:17,287][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:39:17,288][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:39:17,767][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:39:17,767][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:39:17,767][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:39:17,887][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:39:17,918][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:39:17,919][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:39:28,682][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:39:28,684][inference][INFO] - Running inference benchmark -[2023-08-14 11:39:36,802][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:39:36,848][memory_tracker][INFO] - Peak memory usage: 15859.580928 MB -[2023-08-14 11:39:36,848][inference][INFO] - + Forward pass peak memory: 15859.580928 (MB) -[2023-08-14 11:39:36,849][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:39:37,327][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:40:08,074][inference][INFO] - + Forward pass latency: 3.13e-02 (s) -[2023-08-14 11:40:08,075][inference][INFO] - + Forward pass throughput: 63.90 (samples/s) -[2023-08-14 11:40:08,075][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:40:14,977][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:40:39,420][inference][INFO] - + Generation pass latency: 6.11e+00 (s) -[2023-08-14 11:40:39,422][inference][INFO] - + Generation pass throughput: 65.50 (tokens/s) -[2023-08-14 11:40:39,423][inference][INFO] - Saving inference results -[2023-08-14 11:40:39,430][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index ccf921238d8342e49c205b391d2376d6d6ee8a80..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 264f4af20341ade4ac1607048ad23f54355c9084..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,29776.2816,0.109,18.3,7.0,57.1 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/main.log deleted file mode 100644 index 33ba23f6b75412eac485cfac5360a8d1611e1e0c..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/3/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:40:40,001][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:40:40,001][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:40:40,478][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:40:40,478][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:40:40,479][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:40:40,582][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:40:40,614][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:40:40,615][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:40:57,715][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:40:57,716][inference][INFO] - Running inference benchmark -[2023-08-14 11:41:05,860][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:41:05,985][memory_tracker][INFO] - Peak memory usage: 29776.2816 MB -[2023-08-14 11:41:05,985][inference][INFO] - + Forward pass peak memory: 29776.2816 (MB) -[2023-08-14 11:41:05,985][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:41:09,817][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:42:20,388][inference][INFO] - + Forward pass latency: 1.09e-01 (s) -[2023-08-14 11:42:20,388][inference][INFO] - + Forward pass throughput: 18.30 (samples/s) -[2023-08-14 11:42:20,389][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:42:27,396][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:42:48,398][inference][INFO] - + Generation pass latency: 7.00e+00 (s) -[2023-08-14 11:42:48,400][inference][INFO] - + Generation pass throughput: 57.10 (tokens/s) -[2023-08-14 11:42:48,400][inference][INFO] - Saving inference results -[2023-08-14 11:42:48,407][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/hydra_config.yaml deleted file mode 100644 index d505cb4f972224f7486590c3ee048a02445cc311..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/inference_results.csv deleted file mode 100644 index 9e1d71f18a675e80388d34f219ccd4ef82cef1b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,16360.800255999999,0.0317,126.0,6.16,130.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/main.log deleted file mode 100644 index f934d69da963bb3b18d0a836f81a59baf6d7a7b4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/4/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:42:49,007][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:42:49,008][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:42:49,568][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:42:49,568][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:42:49,568][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:42:49,697][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:42:49,727][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:42:49,729][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:43:00,486][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:43:00,488][inference][INFO] - Running inference benchmark -[2023-08-14 11:43:08,635][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:43:08,695][memory_tracker][INFO] - Peak memory usage: 16360.800255999999 MB -[2023-08-14 11:43:08,695][inference][INFO] - + Forward pass peak memory: 16360.800255999999 (MB) -[2023-08-14 11:43:08,695][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:43:09,424][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:43:56,954][inference][INFO] - + Forward pass latency: 3.17e-02 (s) -[2023-08-14 11:43:56,956][inference][INFO] - + Forward pass throughput: 126.00 (samples/s) -[2023-08-14 11:43:56,956][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:44:04,105][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:44:28,757][inference][INFO] - + Generation pass latency: 6.16e+00 (s) -[2023-08-14 11:44:28,760][inference][INFO] - + Generation pass throughput: 130.00 (tokens/s) -[2023-08-14 11:44:28,761][inference][INFO] - Saving inference results -[2023-08-14 11:44:28,768][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/hydra_config.yaml deleted file mode 100644 index 56d08c1e2b4d64f28486d3247a9f8dfbc5a6e5aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/inference_results.csv deleted file mode 100644 index ec7cdd1ef8daa8686c23809cd551ef1f21723627..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,30472.536064,0.187,21.4,7.69,104.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/main.log deleted file mode 100644 index a09435f37e325074ad8d742c280a6efc83a4bd7b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/5/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:44:29,460][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:44:29,461][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:44:29,927][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:44:29,928][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:44:29,928][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:44:30,059][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:44:30,089][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:44:30,090][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:44:47,130][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:44:47,132][inference][INFO] - Running inference benchmark -[2023-08-14 11:44:55,380][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:44:55,593][memory_tracker][INFO] - Peak memory usage: 30472.536064 MB -[2023-08-14 11:44:55,594][inference][INFO] - + Forward pass peak memory: 30472.536064 (MB) -[2023-08-14 11:44:55,598][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:45:02,518][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:46:17,188][inference][INFO] - + Forward pass latency: 1.87e-01 (s) -[2023-08-14 11:46:17,191][inference][INFO] - + Forward pass throughput: 21.40 (samples/s) -[2023-08-14 11:46:17,192][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:46:24,933][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:46:48,011][inference][INFO] - + Generation pass latency: 7.69e+00 (s) -[2023-08-14 11:46:48,011][inference][INFO] - + Generation pass throughput: 104.00 (tokens/s) -[2023-08-14 11:46:48,012][inference][INFO] - Saving inference results -[2023-08-14 11:46:48,018][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/hydra_config.yaml deleted file mode 100644 index 51fb4b81a4e0e88989baa6cf79dce45529f9a0fb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/inference_results.csv deleted file mode 100644 index ff0165f5348a1b3dcff75a42fcf94c2949249dce..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,18843.828224,0.0978,164.0,6.37,502.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/main.log deleted file mode 100644 index 209824254c8c9010af32ddc1332ecdbaa0bb3500..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/6/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:46:48,818][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:46:48,819][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:46:49,293][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:46:49,293][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:46:49,293][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:46:49,421][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:46:49,453][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:46:49,454][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:47:00,251][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:47:00,253][inference][INFO] - Running inference benchmark -[2023-08-14 11:47:08,396][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:47:08,510][memory_tracker][INFO] - Peak memory usage: 18843.828224 MB -[2023-08-14 11:47:08,510][inference][INFO] - + Forward pass peak memory: 18843.828224 (MB) -[2023-08-14 11:47:08,511][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:47:11,150][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:48:05,191][inference][INFO] - + Forward pass latency: 9.78e-02 (s) -[2023-08-14 11:48:05,192][inference][INFO] - + Forward pass throughput: 164.00 (samples/s) -[2023-08-14 11:48:05,193][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:48:11,732][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:48:37,204][inference][INFO] - + Generation pass latency: 6.37e+00 (s) -[2023-08-14 11:48:37,208][inference][INFO] - + Generation pass throughput: 502.00 (tokens/s) -[2023-08-14 11:48:37,208][inference][INFO] - Saving inference results -[2023-08-14 11:48:37,214][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/hydra_config.yaml deleted file mode 100644 index 5b80c59ee075669eb7347c4aebfcd96c673c61e2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_1gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/inference_results.csv deleted file mode 100644 index b9bf0f428fc62e284a4dfd714f7538e66d5d0a4b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.peak_memory(MB),forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,34803.154944,0.684,23.4,13.0,246.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/main.log deleted file mode 100644 index e1812eaed48b4bd3934a2e32f7299a8948768cc4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_1gpu_inference/7/main.log +++ /dev/null @@ -1,23 +0,0 @@ -[2023-08-14 11:48:37,978][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:48:37,979][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:48:38,459][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:48:38,459][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:48:38,459][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:48:38,573][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:48:38,603][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:48:38,604][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:48:55,661][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:48:55,662][inference][INFO] - Running inference benchmark -[2023-08-14 11:49:03,815][inference][INFO] - + Tracking forward pass peak memory -[2023-08-14 11:49:04,516][memory_tracker][INFO] - Peak memory usage: 34803.154944 MB -[2023-08-14 11:49:04,516][inference][INFO] - + Forward pass peak memory: 34803.154944 (MB) -[2023-08-14 11:49:04,532][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:49:29,897][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:50:47,898][inference][INFO] - + Forward pass latency: 6.84e-01 (s) -[2023-08-14 11:50:47,899][inference][INFO] - + Forward pass throughput: 23.40 (samples/s) -[2023-08-14 11:50:47,900][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:51:01,209][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:51:27,115][inference][INFO] - + Generation pass latency: 1.30e+01 (s) -[2023-08-14 11:51:27,116][inference][INFO] - + Generation pass throughput: 246.00 (tokens/s) -[2023-08-14 11:51:27,117][inference][INFO] - Saving inference results -[2023-08-14 11:51:27,122][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/hydra_config.yaml deleted file mode 100644 index e78b9a11a325062e8961dac59bf83839b3aed417..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/main.log deleted file mode 100644 index 9466c21322079ea7c814dc0e7db14fc7d8ff0ef2..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/0/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:33,252][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:33,253][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:33,787][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:33,787][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:33,788][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:33,876][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:33,886][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:33,887][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:34,002][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:34,002][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/hydra_config.yaml deleted file mode 100644 index b3920f0a7e2801204bd10786d5746fda7f7e8447..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/main.log deleted file mode 100644 index 4777dda12dc05428d3c199d6e20ba72d6e5e2b69..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/1/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:34,606][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:34,608][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:35,107][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:35,107][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:35,107][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:35,193][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:35,204][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:35,205][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:35,303][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:35,303][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 20da9d2d01901b26120f571ae523e635d4ebf203..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/main.log deleted file mode 100644 index 0934f1a25407743bca876f868f967a45ec557729..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/2/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:35,782][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:35,783][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:36,280][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:36,280][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:36,280][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:36,373][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:36,383][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:36,384][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:36,481][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:36,481][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 700749c6c0fe4d161634d0f6be3aa1656645a1f4..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/main.log deleted file mode 100644 index 951cec7ea9d1bacacacb542f226e90e70704e7d0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/3/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:36,946][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:36,947][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:37,453][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:37,453][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:37,454][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:37,542][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:37,552][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:37,553][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:37,652][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:37,652][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/hydra_config.yaml deleted file mode 100644 index 561983df456fcf0f81da77fcb0e1c6729c4c5424..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/main.log deleted file mode 100644 index 29373c91824bbf04c1b584f71ee58ae7457ea0af..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/4/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:38,342][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:38,342][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:39,029][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:39,030][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:39,030][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:39,101][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:39,113][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:39,114][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:39,209][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:39,209][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/hydra_config.yaml deleted file mode 100644 index b3a45488a56ce90c3d3d40f8d472923752301522..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/main.log deleted file mode 100644 index bb50c6c5084aea31c357a2e24f2976d16b23572d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/5/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:39,691][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:39,692][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:40,226][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:40,226][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:40,226][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:40,312][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:40,324][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:40,325][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:40,421][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:40,421][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/hydra_config.yaml deleted file mode 100644 index 9ae2204baef6843dad324fc480830a98e29edca0..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/main.log deleted file mode 100644 index 63b65080bead210213ab8f329a7bc976584974c3..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/6/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:41,026][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:41,028][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:41,515][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:41,515][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:41,516][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:41,604][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:41,614][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:41,615][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:41,714][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:41,714][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/hydra_config.yaml deleted file mode 100644 index 874b4abf7bc16ae75246a8d35e90f6215afb7fa6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_2gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/main.log deleted file mode 100644 index 026dbae9560f7871b90540c1b62a82aa67968a46..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_2gpu_inference/7/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:42,174][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:42,175][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:42,715][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:42,716][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:42,716][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:42,809][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:42,819][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:42,820][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:42,913][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:42,913][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 56a122b69c5fbc5aff30ad86b0e55a1c940b6a7b..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/main.log deleted file mode 100644 index 5fdb2a9115b8c9d71e21344958ab6c4b86c51b04..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/0/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:48,544][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:48,545][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:49,033][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:49,033][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:49,034][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:49,112][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:49,121][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:49,123][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:49,235][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:49,236][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/hydra_config.yaml deleted file mode 100644 index baa40d48956b9aae21ce94f5498b42b027d50471..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/main.log deleted file mode 100644 index 0d43cf8a1cb20b56b4c5a58349679ec9b6ad3740..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/1/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:49,696][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:49,698][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:50,189][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:50,189][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:50,189][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:50,286][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:50,295][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:50,296][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:50,393][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:50,393][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/hydra_config.yaml deleted file mode 100644 index e3733683c9532d62b01598ef85501aa378ab18c8..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/main.log deleted file mode 100644 index 42d37b9e2be8198572a3c65de6b9c1496e7b3632..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/2/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:50,868][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:50,869][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:51,363][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:51,364][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:51,364][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:51,449][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:51,458][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:51,459][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:51,556][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:51,556][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 771b3f099a8b28a7a070394a0a2fa7542f890dfa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/main.log deleted file mode 100644 index 285e9297d8c78ae800f0a0f4add45ab7ed11b88a..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/3/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:52,025][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:52,026][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:52,520][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:52,520][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:52,520][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:52,624][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:52,633][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:52,634][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:52,731][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:52,731][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/hydra_config.yaml deleted file mode 100644 index 11e7ccc23b8a7b6418c41dde0c7c9c07e3de286e..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/main.log deleted file mode 100644 index 80f8ccc13b328e3cf871c4b7c630f5eaad9abcd1..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/4/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:53,187][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:53,188][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:53,675][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:53,676][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:53,676][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:53,767][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:53,777][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:53,778][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:53,870][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:53,871][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/hydra_config.yaml deleted file mode 100644 index ae177b93addb53ccc13a66872cb22c16ada54939..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 4 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/main.log deleted file mode 100644 index 92f7234c350dc8329348d1b0925c4c14296c3a9d..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/5/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:54,335][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:54,337][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:54,834][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:54,834][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:54,835][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:54,927][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:54,939][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:54,940][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:55,032][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:55,032][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/hydra_config.yaml deleted file mode 100644 index e013637a229fb2b84782f03c0e4f39e8fd992e97..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float16 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/main.log deleted file mode 100644 index 8f524a3d3f67dd1e70fcd346d336ff1a6112e9aa..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/6/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:55,498][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:55,499][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:55,995][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:55,996][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:55,996][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:56,080][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:56,090][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:56,091][pytorch][INFO] - + Loading pretrained model weights in dtype: float16 on device: cuda -[2023-08-14 11:51:56,183][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:56,183][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/hydra_config.yaml deleted file mode 100644 index 3e52e0a53ed2baf61c0aecf24c3e448391178eed..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: float32 - device_map: auto - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: true - warmup_runs: 10 - benchmark_duration: 20 - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 -experiment_name: llama_4gpu_inference -model: daryl149/llama-2-7b-chat-hf -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/main.log deleted file mode 100644 index 729c6117a2b6883fcae7c0b75bfd414b1ac9a8bb..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/llama_4gpu_inference/7/main.log +++ /dev/null @@ -1,10 +0,0 @@ -[2023-08-14 11:51:56,651][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:51:56,653][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:51:57,148][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-08-14 11:51:57,148][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:51:57,149][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:51:57,250][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:51:57,260][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:51:57,261][pytorch][INFO] - + Loading pretrained model weights in dtype: float32 on device: cuda -[2023-08-14 11:51:57,359][main][ERROR] - Error during benchmarking: LlamaForCausalLM.__init__() got an unexpected keyword argument 'llm_int8_threshold' -[2023-08-14 11:51:57,359][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/hydra_config.yaml deleted file mode 100644 index 10e2eade8f9bf6e626ce05c33d6fd4c6b8aa40cf..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: false - warmup_runs: 10 - benchmark_duration: 10 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_bert_inference -model: hf-internal-testing/tiny-random-bert -device: cpu -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/inference_results.csv deleted file mode 100644 index 6dcf5407133dc2e525604c2b58b12ddfc80bf050..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s) -0,0.00334,299.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/main.log deleted file mode 100644 index 549d4f33e0b89b78e54dd45a92a2b99c902b82d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_bert_inference/0/main.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-08-14 11:34:13,343][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:34:13,344][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:34:14,534][pytorch][INFO] - + Infered AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-08-14 11:34:14,535][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:34:14,535][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:34:14,535][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:34:14,535][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:34:14,535][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-14 11:34:15,081][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:34:15,081][inference][INFO] - Running inference benchmark -[2023-08-14 11:34:15,201][dummy_input][INFO] - Generating dummy input for: ['input_ids', 'attention_mask', 'token_type_ids'] -[2023-08-14 11:34:15,202][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:34:15,255][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:34:25,363][inference][INFO] - + Forward pass latency: 3.34e-03 (s) -[2023-08-14 11:34:25,365][inference][INFO] - + Forward pass throughput: 299.00 (samples/s) -[2023-08-14 11:34:25,365][inference][INFO] - Saving inference results -[2023-08-14 11:34:25,376][backend][INFO] - Cleaning backend diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/hydra_config.yaml b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/hydra_config.yaml deleted file mode 100644 index 24963fc96366f31a1779516cc9afecac851e3933..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/hydra_config.yaml +++ /dev/null @@ -1,66 +0,0 @@ -backend: - name: pytorch - version: 2.0.1+cu117 - _target_: optimum_benchmark.backends.pytorch.PyTorchBackend - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - torch_dtype: null - device_map: null - load_in_8bit: false - load_in_4bit: false - bettertransformer: false - torch_compile: false - torch_compile_config: - fullgraph: false - dynamic: false - backend: inductor - mode: null - options: null - disable: false - amp_autocast: false - amp_dtype: null - disable_grad: true - eval_mode: true -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.InferenceBenchmark - seed: 42 - memory: false - warmup_runs: 10 - benchmark_duration: 10 - input_shapes: - batch_size: 1 - sequence_length: 16 - num_choices: 1 - width: 64 - height: 64 - num_channels: 3 - point_batch_size: 3 - nb_points_per_image: 2 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 100 -experiment_name: pytorch_gpt2_inference -model: hf-internal-testing/tiny-random-gpt2 -device: cpu -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.11.0 - transformers_version: 4.32.0.dev0 - accelerate_version: 0.21.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' Intel(R) Xeon(R) Platinum 8275CL CPU @ 3.00GHz' - cpu_count: 96 - cpu_ram_mb: 1204539.797504 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/inference_results.csv b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/inference_results.csv deleted file mode 100644 index 45596909cd0db6cb2af9b8f6113d2ff8f6dee6d5..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -,forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0,0.00368,272.0,0.52,192.0 diff --git a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/main.log b/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/main.log deleted file mode 100644 index 697413679e4bbfc2c62caf318de8fed3e79b9d03..0000000000000000000000000000000000000000 --- a/raw_results/2023-08-13_17:47:04_2b22cde71e549e194a3b7046b7595030382382e9/pytorch_gpt2_inference/0/main.log +++ /dev/null @@ -1,20 +0,0 @@ -[2023-08-14 11:34:30,059][benchmark][INFO] - Configuring inference benchmark -[2023-08-14 11:34:30,060][benchmark][INFO] - + Setting seed(42) -[2023-08-14 11:34:31,526][pytorch][INFO] - + Infered AutoModel class AutoModelForCausalLM for task text-generation and model_type gpt2 -[2023-08-14 11:34:31,527][backend][INFO] - Configuring pytorch backend -[2023-08-14 11:34:31,527][backend][INFO] - + Checking initial device isolation -[2023-08-14 11:34:31,527][backend][INFO] - + Checking contineous device isolation -[2023-08-14 11:34:31,527][pytorch][INFO] - + Disabling gradients -[2023-08-14 11:34:31,527][pytorch][INFO] - + Loading pretrained model weights in dtype: None on device: cpu -[2023-08-14 11:34:32,169][pytorch][INFO] - + Turning on eval mode -[2023-08-14 11:34:32,170][inference][INFO] - Running inference benchmark -[2023-08-14 11:34:32,361][inference][INFO] - + Warming up the forward pass -[2023-08-14 11:34:32,396][inference][INFO] - + Tracking forward pass latency and throughput -[2023-08-14 11:34:42,492][inference][INFO] - + Forward pass latency: 3.68e-03 (s) -[2023-08-14 11:34:42,494][inference][INFO] - + Forward pass throughput: 272.00 (samples/s) -[2023-08-14 11:34:42,495][inference][INFO] - + Warming up the generation pass -[2023-08-14 11:34:43,074][inference][INFO] - + Tracking generation latency and throughput -[2023-08-14 11:34:53,477][inference][INFO] - + Generation pass latency: 5.20e-01 (s) -[2023-08-14 11:34:53,478][inference][INFO] - + Generation pass throughput: 192.00 (tokens/s) -[2023-08-14 11:34:53,478][inference][INFO] - Saving inference results -[2023-08-14 11:34:53,489][backend][INFO] - Cleaning backend