summaryrefslogtreecommitdiff
path: root/python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch
diff options
context:
space:
mode:
authorEric Dao <eric@erickhangdao.com>2025-03-10 17:54:31 -0400
committerEric Dao <eric@erickhangdao.com>2025-03-10 17:54:31 -0400
commitab224e2e6ba65f5a369ec392f99cd8845ad06c98 (patch)
treea1e757e9341863ed52b8ad4c5a1c45933aab9da4 /python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch
parent40da1752f2c8639186b72f6838aa415e854d0b1d (diff)
downloadthesis-master.tar.gz
thesis-master.tar.bz2
thesis-master.zip
completed thesisHEADmaster
Diffstat (limited to 'python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch')
-rw-r--r--python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch106
1 files changed, 106 insertions, 0 deletions
diff --git a/python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch b/python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch
new file mode 100644
index 0000000..99afb40
--- /dev/null
+++ b/python/openvino/runtime/python_demos/OpenVINO_benchmark_app/main.patch
@@ -0,0 +1,106 @@
+--- /nfs/site/disks/swip_dla_1/resources/inference_engine/2023.3.0_with_dev_tools/1/linux64/suse12/python/openvino/tools/benchmark/main.py 2024-03-01 14:01:50.466871000 -0500
++++ main.py 2024-10-29 11:10:06.569928000 -0400
+@@ -7,11 +7,11 @@
+
+ from openvino.runtime import Dimension,properties
+
+-from openvino.tools.benchmark.benchmark import Benchmark
++import benchmark as openvino_benchmark
+ from openvino.tools.benchmark.parameters import parse_args
+ from openvino.tools.benchmark.utils.constants import MULTI_DEVICE_NAME, \
+ CPU_DEVICE_NAME, GPU_DEVICE_NAME, \
+- BLOB_EXTENSION, AUTO_DEVICE_NAME
++ BIN_EXTENSION, AUTO_DEVICE_NAME
+ from openvino.tools.benchmark.utils.inputs_filling import get_input_data
+ from openvino.tools.benchmark.utils.logging import logger
+ from openvino.tools.benchmark.utils.utils import next_step, get_number_iterations, pre_post_processing, \
+@@ -41,13 +41,13 @@
+ if args.report_type == "average_counters" and MULTI_DEVICE_NAME in args.target_device:
+ raise Exception("only detailed_counters report type is supported for MULTI device")
+
+- _, ext = os.path.splitext(args.path_to_model)
+- is_network_compiled = True if ext == BLOB_EXTENSION else False
+- is_precisiton_set = not (args.input_precision == "" and args.output_precision == "" and args.input_output_precision == "")
++ if args.number_infer_requests != 1 and "FPGA" in args.target_device:
++ logger.warning(f"If the target FPGA design uses JTAG to access the CSRs on the FPGA AI Suite IP "\
++ "(e.g. the Agilex 5E Premium Development Kit JTAG Design Example), "\
++ "then the number of inference request must be 1.")
+
+- if is_network_compiled and is_precisiton_set:
+- raise Exception("Cannot set precision for a compiled model. " \
+- "Please re-compile your model with required precision.")
++ _, ext = os.path.splitext(args.path_to_model)
++ is_network_compiled = True if ext == BIN_EXTENSION else False
+
+ return args, is_network_compiled
+
+@@ -84,7 +84,7 @@
+ # ------------------------------ 2. Loading OpenVINO Runtime -------------------------------------------
+ next_step(step_id=2)
+
+- benchmark = Benchmark(args.target_device, args.number_infer_requests,
++ benchmark = openvino_benchmark.Benchmark(args.target_device, args.number_infer_requests,
+ args.number_iterations, args.time, args.api_type, args.inference_only)
+
+ if args.extensions:
+@@ -166,8 +166,11 @@
+ supported_properties = benchmark.core.get_property(device, properties.supported_properties())
+ if device not in config.keys():
+ config[device] = {}
+-
+ ## high-level performance modes
++ # The orginial OV 2022.3 Python API fails with the pc flag, so we comment it out
++ # for both the HETERO and FPGA devices in our patched version of the Python demos
++ if device in ['HETERO', 'FPGA']:
++ continue
+ set_performance_hint(device)
+
+ if is_flag_set_in_command_line('nireq'):
+@@ -429,16 +432,21 @@
+ next_step()
+
+ start_time = datetime.utcnow()
+- compiled_model = benchmark.core.import_model(args.path_to_model, benchmark.device, device_config)
+- duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
+- logger.info(f"Import model took {duration_ms} ms")
+- if statistics:
+- statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
+- [
+- ('import model time (ms)', duration_ms)
+- ])
+- app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.scale_values, args.mean_values, compiled_model.inputs)
+- batch_size = get_network_batch_size(app_inputs_info)
++ try:
++ with open(args.path_to_model, "rb") as model_stream:
++ model_bytes = model_stream.read()
++ compiled_model = benchmark.core.import_model(model_bytes, device_name)
++ duration_ms = f"{(datetime.utcnow() - start_time).total_seconds() * 1000:.2f}"
++ logger.info(f"Import model took {duration_ms} ms")
++ if statistics:
++ statistics.add_parameters(StatisticsReport.Category.EXECUTION_RESULTS,
++ [
++ ('import model time (ms)', duration_ms)
++ ])
++ app_inputs_info, _ = get_inputs_info(args.shape, args.data_shape, args.layout, args.batch_size, args.scale_values, args.mean_values, compiled_model.inputs)
++ batch_size = get_network_batch_size(app_inputs_info)
++ except Exception as e:
++ raise RuntimeError(f"Cannot open or import compiled model file: {args.path_to_model}. Error: {str(e)}")
+
+ # --------------------- 8. Querying optimal runtime parameters --------------------------------------------------
+ next_step()
+@@ -653,7 +661,7 @@
+ exeDevice = compiled_model.get_property("EXECUTION_DEVICES")
+ logger.info(f'Execution Devices:{exeDevice}')
+ except:
+- pass
++ exeDevice = None
+ logger.info(f'Count: {iteration} iterations')
+ logger.info(f'Duration: {get_duration_in_milliseconds(total_duration_sec):.2f} ms')
+ if MULTI_DEVICE_NAME not in device_name:
+@@ -692,4 +700,4 @@
+ [('error', str(e))]
+ )
+ statistics.dump()
+- sys.exit(1)
++ sys.exit(1)
+\ No newline at end of file