summaryrefslogtreecommitdiff
path: root/python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch
diff options
context:
space:
mode:
Diffstat (limited to 'python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch')
-rw-r--r--python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch116
1 files changed, 116 insertions, 0 deletions
diff --git a/python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch b/python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch
new file mode 100644
index 0000000..28ae75c
--- /dev/null
+++ b/python/openvino/runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.patch
@@ -0,0 +1,116 @@
+--- /nfs/site/disks/swip_dla_1/resources/inference_engine/2023.3.0_with_dev_tools/1/linux64/suse12/samples/python/classification_sample_async/classification_sample_async.py 2024-03-01 14:01:24.460131000 -0500
++++ ./runtime/python_demos/OpenVINO_classification_sample_async/classification_sample_async.py 2024-04-16 10:33:28.810439000 -0400
+@@ -1,15 +1,18 @@
+ #!/usr/bin/env python3
+ # -*- coding: utf-8 -*-
+-# Copyright (C) 2018-2023 Intel Corporation
++# Copyright (C) 2018-2022 Intel Corporation
+ # SPDX-License-Identifier: Apache-2.0
+
+ import argparse
+ import logging as log
++import os
+ import sys
++import warnings
+
+ import cv2
+ import numpy as np
+-import openvino as ov
++from openvino.preprocess import PrePostProcessor
++from openvino.runtime import AsyncInferQueue, Core, InferRequest, Layout, Type
+
+
+ def parse_args() -> argparse.Namespace:
+@@ -24,14 +27,14 @@
+ args.add_argument('-i', '--input', type=str, required=True, nargs='+',
+ help='Required. Path to an image file(s).')
+ args.add_argument('-d', '--device', type=str, default='CPU',
+- help='Optional. Specify the target device to infer on; CPU, GPU, GNA or HETERO: '
++ help='Optional. Specify the target device to infer on; CPU, GPU, MYRIAD, HDDL or HETERO: '
+ 'is acceptable. The sample will look for a suitable plugin for device specified. '
+ 'Default value is CPU.')
+ # fmt: on
+ return parser.parse_args()
+
+
+-def completion_callback(infer_request: ov.InferRequest, image_path: str) -> None:
++def completion_callback(infer_request: InferRequest, image_path: str) -> None:
+ predictions = next(iter(infer_request.results.values()))
+
+ # Change a shape of a numpy.ndarray with results to get another one with one dimension
+@@ -60,7 +63,17 @@
+
+ # --------------------------- Step 1. Initialize OpenVINO Runtime Core ------------------------------------------------
+ log.info('Creating OpenVINO Runtime Core')
+- core = ov.Core()
++ dla_plugins = os.environ.get('DLA_PLUGINS', default='')
++ if dla_plugins == '':
++ # Backwards compatability for old DLA_PLUGINS_XML_FILE
++ warnings.warn("DLA_PLUGINS_XML_FILE option is deprecated as of 2024.1, Please use DLA_PLUGINS")
++ dla_plugins = os.environ.get('DLA_PLUGINS_XML_FILE', default='')
++ core = Core(dla_plugins)
++ if "FPGA" in args.device:
++ dla_arch_file = os.environ.get('DLA_ARCH_FILE')
++ if dla_arch_file is None:
++ raise Exception(f"To use FPGA, you need to specify the path to an arch_file!")
++ core.set_property(device_name="FPGA", properties={"ARCH_PATH": dla_arch_file})
+
+ # --------------------------- Step 2. Read a model --------------------------------------------------------------------
+ log.info(f'Reading the model: {args.model}')
+@@ -80,29 +93,38 @@
+ images = [cv2.imread(image_path) for image_path in args.input]
+
+ # Resize images to model input dims
+- _, _, h, w = model.input().shape
++ # Assuming we always have w=h, we will
++ # figure out the layout from the dimensions
++ # start with the assumption of NHWC (TF)
++ _, h, w, c = model.input().shape
++
++ if h != w:
++ c = h
++ h = w
++
+ resized_images = [cv2.resize(image, (w, h)) for image in images]
+
+ # Add N dimension
+ input_tensors = [np.expand_dims(image, 0) for image in resized_images]
+
++ # Transpose from NHWC to NCHW
++ input_tensors = [np.transpose(tensor, (0, 3, 1, 2)) for tensor in input_tensors]
++
+ # --------------------------- Step 4. Apply preprocessing -------------------------------------------------------------
+- ppp = ov.preprocess.PrePostProcessor(model)
++ ppp = PrePostProcessor(model)
+
+ # 1) Set input tensor information:
+ # - input() provides information about a single model input
+- # - precision of tensor is supposed to be 'u8'
+- # - layout of data is 'NHWC'
+- ppp.input().tensor() \
+- .set_element_type(ov.Type.u8) \
+- .set_layout(ov.Layout('NHWC')) # noqa: N400
++ # - layout of data is 'NCHW'
++ ppp.input().tensor().set_layout(Layout('NCHW')) # noqa: N400
+
+ # 2) Here we suppose model has 'NCHW' layout for input
+- ppp.input().model().set_layout(ov.Layout('NCHW'))
++ # DLA --> We let the demo select the layout based on the model
++ # ppp.input().model().set_layout(Layout('NCHW'))
+
+ # 3) Set output tensor information:
+ # - precision of tensor is supposed to be 'f32'
+- ppp.output().tensor().set_element_type(ov.Type.f32)
++ ppp.output().tensor().set_element_type(Type.f32)
+
+ # 4) Apply preprocessing modifing the original 'model'
+ model = ppp.build()
+@@ -114,7 +136,7 @@
+ # --------------------------- Step 6. Create infer request queue ------------------------------------------------------
+ log.info('Starting inference in asynchronous mode')
+ # create async queue with optimal number of infer requests
+- infer_queue = ov.AsyncInferQueue(compiled_model)
++ infer_queue = AsyncInferQueue(compiled_model)
+ infer_queue.set_callback(completion_callback)
+
+ # --------------------------- Step 7. Do inference --------------------------------------------------------------------