summaryrefslogtreecommitdiff
path: root/python/openvino/runtime/common/utils/include/samples
diff options
context:
space:
mode:
Diffstat (limited to 'python/openvino/runtime/common/utils/include/samples')
-rw-r--r--python/openvino/runtime/common/utils/include/samples/args_helper.hpp112
-rw-r--r--python/openvino/runtime/common/utils/include/samples/classification_results.h205
-rw-r--r--python/openvino/runtime/common/utils/include/samples/common.hpp1429
-rw-r--r--python/openvino/runtime/common/utils/include/samples/console_progress.hpp107
-rw-r--r--python/openvino/runtime/common/utils/include/samples/csv_dumper.hpp98
-rw-r--r--python/openvino/runtime/common/utils/include/samples/latency_metrics.hpp42
-rw-r--r--python/openvino/runtime/common/utils/include/samples/ocv_common.hpp92
-rw-r--r--python/openvino/runtime/common/utils/include/samples/os/windows/w_dirent.h176
-rw-r--r--python/openvino/runtime/common/utils/include/samples/slog.hpp102
-rw-r--r--python/openvino/runtime/common/utils/include/samples/vpu/vpu_tools_common.hpp28
10 files changed, 2391 insertions, 0 deletions
diff --git a/python/openvino/runtime/common/utils/include/samples/args_helper.hpp b/python/openvino/runtime/common/utils/include/samples/args_helper.hpp
new file mode 100644
index 0000000..6626140
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/args_helper.hpp
@@ -0,0 +1,112 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief a header file with common samples functionality
+ * @file args_helper.hpp
+ */
+
+#pragma once
+
+// clang-format off
+#include <string>
+#include <vector>
+
+#include "openvino/openvino.hpp"
+
+#include "samples/slog.hpp"
+// clang-format on
+
+/**
+ * @brief This function checks input args and existence of specified files in a given folder
+ * @param arg path to a file to be checked for existence
+ * @return files updated vector of verified input files
+ */
+void readInputFilesArguments(std::vector<std::string>& files, const std::string& arg);
+
+/**
+ * @brief This function find -i/--images key in input args
+ * It's necessary to process multiple values for single key
+ * @return files updated vector of verified input files
+ */
+void parseInputFilesArguments(std::vector<std::string>& files);
+std::map<std::string, std::string> parseArgMap(std::string argMap);
+
+void printInputAndOutputsInfo(const ov::Model& network);
+
+void configurePrePostProcessing(std::shared_ptr<ov::Model>& function,
+ const std::string& ip,
+ const std::string& op,
+ const std::string& iop,
+ const std::string& il,
+ const std::string& ol,
+ const std::string& iol,
+ const std::string& iml,
+ const std::string& oml,
+ const std::string& ioml);
+
+void printInputAndOutputsInfo(const ov::Model& network);
+ov::element::Type getPrecision2(const std::string& value);
+
+template <class T>
+void printInputAndOutputsInfoShort(const T& network) {
+ slog::info << "Network inputs:" << slog::endl;
+ for (auto&& input : network.inputs()) {
+ std::string in_name;
+ std::string node_name;
+
+ // Workaround for "tensor has no name" issue
+ try {
+ for (const auto& name : input.get_names()) {
+ in_name += name + " , ";
+ }
+ in_name = in_name.substr(0, in_name.size() - 3);
+ } catch (const ov::Exception&) {
+ }
+
+ try {
+ node_name = input.get_node()->get_friendly_name();
+ } catch (const ov::Exception&) {
+ }
+
+ if (in_name == "") {
+ in_name = "***NO_NAME***";
+ }
+ if (node_name == "") {
+ node_name = "***NO_NAME***";
+ }
+
+ slog::info << " " << in_name << " (node: " << node_name << ") : " << input.get_element_type() << " / "
+ << ov::layout::get_layout(input).to_string() << " / " << input.get_partial_shape() << slog::endl;
+ }
+
+ slog::info << "Network outputs:" << slog::endl;
+ for (auto&& output : network.outputs()) {
+ std::string out_name;
+ std::string node_name;
+
+ // Workaround for "tensor has no name" issue
+ try {
+ for (const auto& name : output.get_names()) {
+ out_name += name + " , ";
+ }
+ out_name = out_name.substr(0, out_name.size() - 3);
+ } catch (const ov::Exception&) {
+ }
+ try {
+ node_name = output.get_node()->get_input_node_ptr(0)->get_friendly_name();
+ } catch (const ov::Exception&) {
+ }
+
+ if (out_name == "") {
+ out_name = "***NO_NAME***";
+ }
+ if (node_name == "") {
+ node_name = "***NO_NAME***";
+ }
+
+ slog::info << " " << out_name << " (node: " << node_name << ") : " << output.get_element_type() << " / "
+ << ov::layout::get_layout(output).to_string() << " / " << output.get_partial_shape() << slog::endl;
+ }
+}
diff --git a/python/openvino/runtime/common/utils/include/samples/classification_results.h b/python/openvino/runtime/common/utils/include/samples/classification_results.h
new file mode 100644
index 0000000..e1bc20f
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/classification_results.h
@@ -0,0 +1,205 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief a header file with output classification results
+ * @file classification_results.h
+ */
+#pragma once
+
+#include <algorithm>
+#include <iomanip>
+#include <iostream>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "openvino/openvino.hpp"
+
+/**
+ * @class ClassificationResult
+ * @brief A ClassificationResult creates an output table with results
+ */
+class ClassificationResult {
+private:
+ const std::string _classidStr = "classid";
+ const std::string _probabilityStr = "probability";
+ const std::string _labelStr = "label";
+ size_t _nTop;
+ ov::Tensor _outTensor;
+ const std::vector<std::string> _labels;
+ const std::vector<std::string> _imageNames;
+ const size_t _batchSize;
+ std::vector<unsigned> _results;
+
+ void printHeader() {
+ std::cout << _classidStr << " " << _probabilityStr;
+ if (!_labels.empty())
+ std::cout << " " << _labelStr;
+ std::string classidColumn(_classidStr.length(), '-');
+ std::string probabilityColumn(_probabilityStr.length(), '-');
+ std::string labelColumn(_labelStr.length(), '-');
+ std::cout << std::endl << classidColumn << " " << probabilityColumn;
+ if (!_labels.empty())
+ std::cout << " " << labelColumn;
+ std::cout << std::endl;
+ }
+
+ /**
+ * @brief Gets the top n results from a tensor
+ *
+ * @param n Top n count
+ * @param input 1D tensor that contains probabilities
+ * @param output Vector of indexes for the top n places
+ */
+ template <class T>
+ void topResults(unsigned int n, const ov::Tensor& input, std::vector<unsigned>& output) {
+ ov::Shape shape = input.get_shape();
+ size_t input_rank = shape.size();
+ OPENVINO_ASSERT(input_rank != 0 && shape[0] != 0, "Input tensor has incorrect dimensions!");
+ size_t batchSize = shape[0];
+ std::vector<unsigned> indexes(input.get_size() / batchSize);
+
+ n = static_cast<unsigned>(std::min<size_t>((size_t)n, input.get_size()));
+ output.resize(n * batchSize);
+
+ for (size_t i = 0; i < batchSize; i++) {
+ const size_t offset = i * (input.get_size() / batchSize);
+ const T* batchData = input.data<const T>();
+ batchData += offset;
+
+ std::iota(std::begin(indexes), std::end(indexes), 0);
+ std::partial_sort(std::begin(indexes),
+ std::begin(indexes) + n,
+ std::end(indexes),
+ [&batchData](unsigned l, unsigned r) {
+ return batchData[l] > batchData[r];
+ });
+ for (unsigned j = 0; j < n; j++) {
+ output.at(i * n + j) = indexes.at(j);
+ }
+ }
+ }
+
+ /**
+ * @brief Gets the top n results from a blob
+ *
+ * @param n Top n count
+ * @param input 1D blob that contains probabilities
+ * @param output Vector of indexes for the top n places
+ */
+ void topResults(unsigned int n, const ov::Tensor& input, std::vector<unsigned>& output) {
+#define TENSOR_TOP_RESULT(elem_type) \
+ case ov::element::Type_t::elem_type: { \
+ using tensor_type = ov::fundamental_type_for<ov::element::Type_t::elem_type>; \
+ topResults<tensor_type>(n, input, output); \
+ break; \
+ }
+
+ switch (input.get_element_type()) {
+ TENSOR_TOP_RESULT(f32);
+ TENSOR_TOP_RESULT(f64);
+ TENSOR_TOP_RESULT(f16);
+ TENSOR_TOP_RESULT(i16);
+ TENSOR_TOP_RESULT(u8);
+ TENSOR_TOP_RESULT(i8);
+ TENSOR_TOP_RESULT(u16);
+ TENSOR_TOP_RESULT(i32);
+ TENSOR_TOP_RESULT(u32);
+ TENSOR_TOP_RESULT(i64);
+ TENSOR_TOP_RESULT(u64);
+ default:
+ OPENVINO_ASSERT(false, "cannot locate tensor with element type: ", input.get_element_type());
+ }
+
+#undef TENSOR_TOP_RESULT
+ }
+
+public:
+ explicit ClassificationResult(const ov::Tensor& output_tensor,
+ const std::vector<std::string>& image_names = {},
+ size_t batch_size = 1,
+ size_t num_of_top = 10,
+ const std::vector<std::string>& labels = {})
+ : _nTop(num_of_top),
+ _outTensor(output_tensor),
+ _labels(labels),
+ _imageNames(image_names),
+ _batchSize(batch_size),
+ _results() {
+ OPENVINO_ASSERT(_imageNames.size() == _batchSize, "Batch size should be equal to the number of images.");
+
+ topResults(_nTop, _outTensor, _results);
+ }
+
+ /**
+ * @brief prints formatted classification results
+ */
+ void show() {
+ /** Print the result iterating over each batch **/
+ std::ios::fmtflags fmt(std::cout.flags());
+ std::cout << std::endl << "Top " << _nTop << " results:" << std::endl << std::endl;
+ for (size_t image_id = 0; image_id < _batchSize; ++image_id) {
+ std::string out(_imageNames[image_id].begin(), _imageNames[image_id].end());
+ std::cout << "Image " << out;
+ std::cout.flush();
+ std::cout.clear();
+ std::cout << std::endl << std::endl;
+ printHeader();
+
+ for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
+ std::cout.precision(7);
+ // Getting probability for resulting class
+ const auto index = _results.at(id) + image_id * (_outTensor.get_size() / _batchSize);
+ const auto result = _outTensor.data<const float>()[index];
+
+ std::cout << std::setw(static_cast<int>(_classidStr.length())) << std::left << _results.at(id) << " ";
+ std::cout << std::left << std::setw(static_cast<int>(_probabilityStr.length())) << std::fixed << result;
+
+ if (!_labels.empty()) {
+ std::cout << " " + _labels[_results.at(id)];
+ }
+ std::cout << std::endl;
+ }
+ std::cout << std::endl;
+ }
+ std::cout.flags(fmt);
+ }
+
+ void print() {
+ /** Print the result iterating over each batch **/
+ std::ios::fmtflags fmt(std::cout.flags());
+ std::cout << std::endl << "Top " << _nTop << " results:" << std::endl << std::endl;
+ for (size_t image_id = 0; image_id < _batchSize; ++image_id) {
+ std::string out(_imageNames[image_id].begin(), _imageNames[image_id].end());
+ std::cout << "Image " << out;
+ std::cout.flush();
+ std::cout.clear();
+ std::cout << std::endl << std::endl;
+ printHeader();
+
+ for (size_t id = image_id * _nTop, cnt = 0; id < (image_id + 1) * _nTop; ++cnt, ++id) {
+ std::cout.precision(7);
+ // Getting probability for resulting class
+ const auto result = _outTensor.data<float>();
+ std::cout << std::setw(static_cast<int>(_classidStr.length())) << std::left << _results.at(id) << " ";
+ std::cout << std::left << std::setw(static_cast<int>(_probabilityStr.length())) << std::fixed << result;
+
+ if (!_labels.empty()) {
+ std::cout << " " + _labels[_results.at(id)];
+ }
+ std::cout << std::endl;
+ }
+ std::cout << std::endl;
+ }
+ std::cout.flags(fmt);
+ }
+
+ /**
+ * @brief returns the classification results in a vector
+ */
+ std::vector<unsigned> getResults() {
+ return _results;
+ }
+};
diff --git a/python/openvino/runtime/common/utils/include/samples/common.hpp b/python/openvino/runtime/common/utils/include/samples/common.hpp
new file mode 100644
index 0000000..448fd96
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/common.hpp
@@ -0,0 +1,1429 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief a header file with common samples functionality
+ * @file common.hpp
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <cctype>
+#include <fstream>
+#include <functional>
+#include <iomanip>
+#include <iostream>
+#include <limits>
+#include <list>
+#include <map>
+#include <random>
+#include <string>
+#include <utility>
+#include <vector>
+
+using std::setprecision;
+
+// clang-format off
+#include <inference_engine.hpp>
+#include "openvino/openvino.hpp"
+#include "slog.hpp"
+// clang-format on
+
+// @brief performance counters sort
+static constexpr char pcSort[] = "sort";
+static constexpr char pcNoSort[] = "no_sort";
+static constexpr char pcSimpleSort[] = "simple_sort";
+
+#ifndef UNUSED
+# if defined(_MSC_VER) && !defined(__clang__)
+# define UNUSED
+# else
+# define UNUSED __attribute__((unused))
+# endif
+#endif
+
+/**
+ * @brief Unicode string wrappers
+ */
+#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
+# define tchar wchar_t
+# define tstring std::wstring
+# define tmain wmain
+# define TSTRING2STRING(tstr) wstring2string(tstr)
+#else
+# define tchar char
+# define tstring std::string
+# define tmain main
+# define TSTRING2STRING(tstr) tstr
+#endif
+
+#if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32)
+
+/**
+ * @brief Convert wstring to string
+ * @param ref on wstring
+ * @return string
+ */
+inline std::string wstring2string(const std::wstring& wstr) {
+ std::string str;
+ for (auto&& wc : wstr)
+ str += static_cast<char>(wc);
+ return str;
+}
+#endif
+
+/**
+ * @brief trim from start (in place)
+ * @param s - string to trim
+ */
+inline void ltrim(std::string& s) {
+ s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int c) {
+ return !std::isspace(c);
+ }));
+}
+
+/**
+ * @brief trim from end (in place)
+ * @param s - string to trim
+ */
+inline void rtrim(std::string& s) {
+ s.erase(std::find_if(s.rbegin(),
+ s.rend(),
+ [](int c) {
+ return !std::isspace(c);
+ })
+ .base(),
+ s.end());
+}
+
+/**
+ * @brief trim from both ends (in place)
+ * @param s - string to trim
+ */
+inline std::string& trim(std::string& s) {
+ ltrim(s);
+ rtrim(s);
+ return s;
+}
+/**
+ * @brief Gets filename without extension
+ * @param filepath - full file name
+ * @return filename without extension
+ */
+inline std::string fileNameNoExt(const std::string& filepath) {
+ auto pos = filepath.rfind('.');
+ if (pos == std::string::npos)
+ return filepath;
+ return filepath.substr(0, pos);
+}
+
+/**
+ * @brief Get extension from filename
+ * @param filename - name of the file which extension should be extracted
+ * @return string with extracted file extension
+ */
+inline std::string fileExt(const std::string& filename) {
+ auto pos = filename.rfind('.');
+ if (pos == std::string::npos)
+ return "";
+ return filename.substr(pos + 1);
+}
+
+inline slog::LogStream& operator<<(slog::LogStream& os, const ov::Version& version) {
+ os << "Build ................................. ";
+ os << version.buildNumber << slog::endl;
+
+ return os;
+}
+
+inline slog::LogStream& operator<<(slog::LogStream& os, const std::map<std::string, ov::Version>& versions) {
+ for (auto&& version : versions) {
+ os << version.first << slog::endl;
+ os << version.second << slog::endl;
+ }
+
+ return os;
+}
+
+/**
+ * @class Color
+ * @brief A Color class stores channels of a given color
+ */
+class Color {
+private:
+ unsigned char _r;
+ unsigned char _g;
+ unsigned char _b;
+
+public:
+ /**
+ * A default constructor.
+ * @param r - value for red channel
+ * @param g - value for green channel
+ * @param b - value for blue channel
+ */
+ Color(unsigned char r, unsigned char g, unsigned char b) : _r(r), _g(g), _b(b) {}
+
+ inline unsigned char red() {
+ return _r;
+ }
+
+ inline unsigned char blue() {
+ return _b;
+ }
+
+ inline unsigned char green() {
+ return _g;
+ }
+};
+
+// TODO : keep only one version of writeOutputBMP
+
+/**
+ * @brief Writes output data to image
+ * @param name - image name
+ * @param data - output data
+ * @param classesNum - the number of classes
+ * @return false if error else true
+ */
+static UNUSED void writeOutputBmp(std::vector<std::vector<size_t>> data, size_t classesNum, std::ostream& outFile) {
+ unsigned int seed = (unsigned int)time(NULL);
+ // Known colors for training classes from Cityscape dataset
+ static std::vector<Color> colors = {
+ {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190}, {153, 153, 153},
+ {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152}, {180, 130, 70}, {60, 20, 220},
+ {0, 0, 255}, {142, 0, 0}, {70, 0, 0}, {100, 60, 0}, {90, 0, 0}, {230, 0, 0},
+ {32, 11, 119}, {0, 74, 111}, {81, 0, 81}};
+
+ while (classesNum > colors.size()) {
+ static std::mt19937 rng(seed);
+ std::uniform_int_distribution<int> dist(0, 255);
+ Color color(dist(rng), dist(rng), dist(rng));
+ colors.push_back(color);
+ }
+
+ unsigned char file[14] = {
+ 'B',
+ 'M', // magic
+ 0,
+ 0,
+ 0,
+ 0, // size in bytes
+ 0,
+ 0, // app data
+ 0,
+ 0, // app data
+ 40 + 14,
+ 0,
+ 0,
+ 0 // start of data offset
+ };
+ unsigned char info[40] = {
+ 40, 0, 0, 0, // info hd size
+ 0, 0, 0, 0, // width
+ 0, 0, 0, 0, // height
+ 1, 0, // number color planes
+ 24, 0, // bits per pixel
+ 0, 0, 0, 0, // compression is none
+ 0, 0, 0, 0, // image bits size
+ 0x13, 0x0B, 0, 0, // horz resolution in pixel / m
+ 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi)
+ 0, 0, 0, 0, // #colors in palette
+ 0, 0, 0, 0, // #important colors
+ };
+
+ auto height = data.size();
+ auto width = data.at(0).size();
+
+ OPENVINO_ASSERT(
+ height < (size_t)std::numeric_limits<int32_t>::max && width < (size_t)std::numeric_limits<int32_t>::max,
+ "File size is too big: ",
+ height,
+ " X ",
+ width);
+
+ int padSize = static_cast<int>(4 - (width * 3) % 4) % 4;
+ int sizeData = static_cast<int>(width * height * 3 + height * padSize);
+ int sizeAll = sizeData + sizeof(file) + sizeof(info);
+
+ file[2] = (unsigned char)(sizeAll);
+ file[3] = (unsigned char)(sizeAll >> 8);
+ file[4] = (unsigned char)(sizeAll >> 16);
+ file[5] = (unsigned char)(sizeAll >> 24);
+
+ info[4] = (unsigned char)(width);
+ info[5] = (unsigned char)(width >> 8);
+ info[6] = (unsigned char)(width >> 16);
+ info[7] = (unsigned char)(width >> 24);
+
+ int32_t negativeHeight = -(int32_t)height;
+ info[8] = (unsigned char)(negativeHeight);
+ info[9] = (unsigned char)(negativeHeight >> 8);
+ info[10] = (unsigned char)(negativeHeight >> 16);
+ info[11] = (unsigned char)(negativeHeight >> 24);
+
+ info[20] = (unsigned char)(sizeData);
+ info[21] = (unsigned char)(sizeData >> 8);
+ info[22] = (unsigned char)(sizeData >> 16);
+ info[23] = (unsigned char)(sizeData >> 24);
+
+ outFile.write(reinterpret_cast<char*>(file), sizeof(file));
+ outFile.write(reinterpret_cast<char*>(info), sizeof(info));
+
+ unsigned char pad[3] = {0, 0, 0};
+
+ for (size_t y = 0; y < height; y++) {
+ for (size_t x = 0; x < width; x++) {
+ unsigned char pixel[3];
+ size_t index = data.at(y).at(x);
+ pixel[0] = colors.at(index).red();
+ pixel[1] = colors.at(index).green();
+ pixel[2] = colors.at(index).blue();
+ outFile.write(reinterpret_cast<char*>(pixel), 3);
+ }
+ outFile.write(reinterpret_cast<char*>(pad), padSize);
+ }
+}
+
+/**
+ * @brief Writes output data to BMP image
+ * @param name - image name
+ * @param data - output data
+ * @param height - height of the target image
+ * @param width - width of the target image
+ * @return false if error else true
+ */
+static UNUSED bool writeOutputBmp(std::string name, unsigned char* data, size_t height, size_t width) {
+ std::ofstream outFile;
+ outFile.open(name, std::ofstream::binary);
+ if (!outFile.is_open()) {
+ return false;
+ }
+
+ unsigned char file[14] = {
+ 'B',
+ 'M', // magic
+ 0,
+ 0,
+ 0,
+ 0, // size in bytes
+ 0,
+ 0, // app data
+ 0,
+ 0, // app data
+ 40 + 14,
+ 0,
+ 0,
+ 0 // start of data offset
+ };
+ unsigned char info[40] = {
+ 40, 0, 0, 0, // info hd size
+ 0, 0, 0, 0, // width
+ 0, 0, 0, 0, // height
+ 1, 0, // number color planes
+ 24, 0, // bits per pixel
+ 0, 0, 0, 0, // compression is none
+ 0, 0, 0, 0, // image bits size
+ 0x13, 0x0B, 0, 0, // horz resolution in pixel / m
+ 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi)
+ 0, 0, 0, 0, // #colors in palette
+ 0, 0, 0, 0, // #important colors
+ };
+
+ OPENVINO_ASSERT(
+ height < (size_t)std::numeric_limits<int32_t>::max && width < (size_t)std::numeric_limits<int32_t>::max,
+ "File size is too big: ",
+ height,
+ " X ",
+ width);
+
+ int padSize = static_cast<int>(4 - (width * 3) % 4) % 4;
+ int sizeData = static_cast<int>(width * height * 3 + height * padSize);
+ int sizeAll = sizeData + sizeof(file) + sizeof(info);
+
+ file[2] = (unsigned char)(sizeAll);
+ file[3] = (unsigned char)(sizeAll >> 8);
+ file[4] = (unsigned char)(sizeAll >> 16);
+ file[5] = (unsigned char)(sizeAll >> 24);
+
+ info[4] = (unsigned char)(width);
+ info[5] = (unsigned char)(width >> 8);
+ info[6] = (unsigned char)(width >> 16);
+ info[7] = (unsigned char)(width >> 24);
+
+ int32_t negativeHeight = -(int32_t)height;
+ info[8] = (unsigned char)(negativeHeight);
+ info[9] = (unsigned char)(negativeHeight >> 8);
+ info[10] = (unsigned char)(negativeHeight >> 16);
+ info[11] = (unsigned char)(negativeHeight >> 24);
+
+ info[20] = (unsigned char)(sizeData);
+ info[21] = (unsigned char)(sizeData >> 8);
+ info[22] = (unsigned char)(sizeData >> 16);
+ info[23] = (unsigned char)(sizeData >> 24);
+
+ outFile.write(reinterpret_cast<char*>(file), sizeof(file));
+ outFile.write(reinterpret_cast<char*>(info), sizeof(info));
+
+ unsigned char pad[3] = {0, 0, 0};
+
+ for (size_t y = 0; y < height; y++) {
+ for (size_t x = 0; x < width; x++) {
+ unsigned char pixel[3];
+ pixel[0] = data[y * width * 3 + x * 3];
+ pixel[1] = data[y * width * 3 + x * 3 + 1];
+ pixel[2] = data[y * width * 3 + x * 3 + 2];
+
+ outFile.write(reinterpret_cast<char*>(pixel), 3);
+ }
+ outFile.write(reinterpret_cast<char*>(pad), padSize);
+ }
+ return true;
+}
+
+/**
+ * @brief Adds colored rectangles to the image
+ * @param data - data where rectangles are put
+ * @param height - height of the rectangle
+ * @param width - width of the rectangle
+ * @param rectangles - vector points for the rectangle, should be 4x compared to num classes
+ * @param classes - vector of classes
+ * @param thickness - thickness of a line (in pixels) to be used for bounding boxes
+ */
+static UNUSED void addRectangles(unsigned char* data,
+ size_t height,
+ size_t width,
+ std::vector<int> rectangles,
+ std::vector<int> classes,
+ int thickness = 1) {
+ std::vector<Color> colors = {// colors to be used for bounding boxes
+ {128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190},
+ {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152},
+ {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0},
+ {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111},
+ {81, 0, 81}};
+
+ if (rectangles.size() % 4 != 0 || rectangles.size() / 4 != classes.size()) {
+ return;
+ }
+
+ for (size_t i = 0; i < classes.size(); i++) {
+ int x = rectangles.at(i * 4);
+ int y = rectangles.at(i * 4 + 1);
+ int w = rectangles.at(i * 4 + 2);
+ int h = rectangles.at(i * 4 + 3);
+
+ int cls = classes.at(i) % colors.size(); // color of a bounding box line
+
+ if (x < 0)
+ x = 0;
+ if (y < 0)
+ y = 0;
+ if (w < 0)
+ w = 0;
+ if (h < 0)
+ h = 0;
+
+ if (static_cast<std::size_t>(x) >= width) {
+ x = static_cast<int>(width - 1);
+ w = 0;
+ thickness = 1;
+ }
+ if (static_cast<std::size_t>(y) >= height) {
+ y = static_cast<int>(height - 1);
+ h = 0;
+ thickness = 1;
+ }
+
+ if (static_cast<std::size_t>(x + w) >= width) {
+ w = static_cast<int>(width - x - 1);
+ }
+ if (static_cast<std::size_t>(y + h) >= height) {
+ h = static_cast<int>(height - y - 1);
+ }
+
+ thickness = std::min(std::min(thickness, w / 2 + 1), h / 2 + 1);
+
+ size_t shift_first;
+ size_t shift_second;
+ for (int t = 0; t < thickness; t++) {
+ shift_first = (y + t) * width * 3;
+ shift_second = (y + h - t) * width * 3;
+ for (int ii = x; ii < x + w + 1; ii++) {
+ data[shift_first + ii * 3] = colors.at(cls).red();
+ data[shift_first + ii * 3 + 1] = colors.at(cls).green();
+ data[shift_first + ii * 3 + 2] = colors.at(cls).blue();
+ data[shift_second + ii * 3] = colors.at(cls).red();
+ data[shift_second + ii * 3 + 1] = colors.at(cls).green();
+ data[shift_second + ii * 3 + 2] = colors.at(cls).blue();
+ }
+ }
+
+ for (int t = 0; t < thickness; t++) {
+ shift_first = (x + t) * 3;
+ shift_second = (x + w - t) * 3;
+ for (int ii = y; ii < y + h + 1; ii++) {
+ data[shift_first + ii * width * 3] = colors.at(cls).red();
+ data[shift_first + ii * width * 3 + 1] = colors.at(cls).green();
+ data[shift_first + ii * width * 3 + 2] = colors.at(cls).blue();
+ data[shift_second + ii * width * 3] = colors.at(cls).red();
+ data[shift_second + ii * width * 3 + 1] = colors.at(cls).green();
+ data[shift_second + ii * width * 3 + 2] = colors.at(cls).blue();
+ }
+ }
+ }
+}
+
+// DLA PATCH BEGIN - Re-implement functions needed for dla_benchmark that was removed from OPENVINO 2022.3.0
+inline std::size_t getTensorWidth(const InferenceEngine::TensorDesc& desc) {
+ const auto& layout = desc.getLayout();
+ const auto& dims = desc.getDims();
+ const auto& size = dims.size();
+ if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW ||
+ layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW ||
+ layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || layout == InferenceEngine::Layout::CHW ||
+ layout == InferenceEngine::Layout::HW)) {
+ // Regardless of layout, dimensions are stored in fixed order
+ return dims.back();
+ } else {
+ IE_THROW() << "Tensor does not have width dimension";
+ }
+ return 0;
+}
+
+inline std::size_t getTensorHeight(const InferenceEngine::TensorDesc& desc) {
+ const auto& layout = desc.getLayout();
+ const auto& dims = desc.getDims();
+ const auto& size = dims.size();
+ if ((size >= 2) && (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW ||
+ layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::OIHW || layout == InferenceEngine::Layout::GOIHW ||
+ layout == InferenceEngine::Layout::OIDHW || layout == InferenceEngine::Layout::GOIDHW || layout == InferenceEngine::Layout::CHW ||
+ layout == InferenceEngine::Layout::HW)) {
+ // Regardless of layout, dimensions are stored in fixed order
+ return dims.at(size - 2);
+ } else {
+ IE_THROW() << "Tensor does not have height dimension";
+ }
+ return 0;
+}
+
+inline std::size_t getTensorChannels(const InferenceEngine::TensorDesc& desc) {
+ const auto& layout = desc.getLayout();
+ if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW ||
+ layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::C || layout == InferenceEngine::Layout::CHW ||
+ layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) {
+ // Regardless of layout, dimensions are stored in fixed order
+ const auto& dims = desc.getDims();
+ switch (desc.getLayoutByDims(dims)) {
+ case InferenceEngine::Layout::C:
+ return dims.at(0);
+ case InferenceEngine::Layout::NC:
+ return dims.at(1);
+ case InferenceEngine::Layout::CHW:
+ return dims.at(0);
+ case InferenceEngine::Layout::NCHW:
+ return dims.at(1);
+ case InferenceEngine::Layout::NCDHW:
+ return dims.at(1);
+ case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
+ case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
+ default:
+ IE_THROW() << "Tensor does not have channels dimension";
+ }
+ } else {
+ IE_THROW() << "Tensor does not have channels dimension";
+ }
+ return 0;
+}
+
+inline std::size_t getTensorBatch(const InferenceEngine::TensorDesc& desc) {
+ const auto& layout = desc.getLayout();
+ if (layout == InferenceEngine::Layout::NCHW || layout == InferenceEngine::Layout::NHWC || layout == InferenceEngine::Layout::NCDHW ||
+ layout == InferenceEngine::Layout::NDHWC || layout == InferenceEngine::Layout::NC || layout == InferenceEngine::Layout::CN) {
+ // Regardless of layout, dimensions are stored in fixed order
+ const auto& dims = desc.getDims();
+ switch (desc.getLayoutByDims(dims)) {
+ case InferenceEngine::Layout::NC:
+ return dims.at(0);
+ case InferenceEngine::Layout::NCHW:
+ return dims.at(0);
+ case InferenceEngine::Layout::NCDHW:
+ return dims.at(0);
+ case InferenceEngine::Layout::CHW: // [[fallthrough]]
+ case InferenceEngine::Layout::C: // [[fallthrough]]
+ case InferenceEngine::Layout::SCALAR: // [[fallthrough]]
+ case InferenceEngine::Layout::BLOCKED: // [[fallthrough]]
+ default:
+ IE_THROW() << "Tensor does not have channels dimension";
+ }
+ } else {
+ IE_THROW() << "Tensor does not have channels dimension";
+ }
+ return 0;
+}
+
+// DLA PATCH END
+
+/**
+ * Write output data to image
+ * \param name - image name
+ * \param data - output data
+ * \param classesNum - the number of classes
+ * \return false if error else true
+ */
+
+static UNUSED bool writeOutputBmp(unsigned char* data, size_t height, size_t width, std::ostream& outFile) {
+ unsigned char file[14] = {
+ 'B',
+ 'M', // magic
+ 0,
+ 0,
+ 0,
+ 0, // size in bytes
+ 0,
+ 0, // app data
+ 0,
+ 0, // app data
+ 40 + 14,
+ 0,
+ 0,
+ 0 // start of data offset
+ };
+ unsigned char info[40] = {
+ 40, 0, 0, 0, // info hd size
+ 0, 0, 0, 0, // width
+ 0, 0, 0, 0, // height
+ 1, 0, // number color planes
+ 24, 0, // bits per pixel
+ 0, 0, 0, 0, // compression is none
+ 0, 0, 0, 0, // image bits size
+ 0x13, 0x0B, 0, 0, // horz resolution in pixel / m
+ 0x13, 0x0B, 0, 0, // vert resolution (0x03C3 = 96 dpi, 0x0B13 = 72 dpi)
+ 0, 0, 0, 0, // #colors in palette
+ 0, 0, 0, 0, // #important colors
+ };
+
+ OPENVINO_ASSERT(
+ height < (size_t)std::numeric_limits<int32_t>::max && width < (size_t)std::numeric_limits<int32_t>::max,
+ "File size is too big: ",
+ height,
+ " X ",
+ width);
+
+ int padSize = static_cast<int>(4 - (width * 3) % 4) % 4;
+ int sizeData = static_cast<int>(width * height * 3 + height * padSize);
+ int sizeAll = sizeData + sizeof(file) + sizeof(info);
+
+ file[2] = (unsigned char)(sizeAll);
+ file[3] = (unsigned char)(sizeAll >> 8);
+ file[4] = (unsigned char)(sizeAll >> 16);
+ file[5] = (unsigned char)(sizeAll >> 24);
+
+ info[4] = (unsigned char)(width);
+ info[5] = (unsigned char)(width >> 8);
+ info[6] = (unsigned char)(width >> 16);
+ info[7] = (unsigned char)(width >> 24);
+
+ int32_t negativeHeight = -(int32_t)height;
+ info[8] = (unsigned char)(negativeHeight);
+ info[9] = (unsigned char)(negativeHeight >> 8);
+ info[10] = (unsigned char)(negativeHeight >> 16);
+ info[11] = (unsigned char)(negativeHeight >> 24);
+
+ info[20] = (unsigned char)(sizeData);
+ info[21] = (unsigned char)(sizeData >> 8);
+ info[22] = (unsigned char)(sizeData >> 16);
+ info[23] = (unsigned char)(sizeData >> 24);
+
+ outFile.write(reinterpret_cast<char*>(file), sizeof(file));
+ outFile.write(reinterpret_cast<char*>(info), sizeof(info));
+
+ unsigned char pad[3] = {0, 0, 0};
+
+ for (size_t y = 0; y < height; y++) {
+ for (size_t x = 0; x < width; x++) {
+ unsigned char pixel[3];
+ pixel[0] = data[y * width * 3 + x * 3];
+ pixel[1] = data[y * width * 3 + x * 3 + 1];
+ pixel[2] = data[y * width * 3 + x * 3 + 2];
+ outFile.write(reinterpret_cast<char*>(pixel), 3);
+ }
+ outFile.write(reinterpret_cast<char*>(pad), padSize);
+ }
+
+ return true;
+}
+
+static UNUSED void printPerformanceCounts(const std::map<std::string, ov::ProfilingInfo>& performanceMap,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ std::chrono::microseconds totalTime = std::chrono::microseconds::zero();
+ // Print performance counts
+ if (bshowHeader) {
+ stream << std::endl << "performance counts:" << std::endl << std::endl;
+ }
+ std::ios::fmtflags fmt(std::cout.flags());
+
+ for (const auto& it : performanceMap) {
+ std::string toPrint(it.first);
+ const int maxLayerName = 30;
+
+ if (it.first.length() >= maxLayerName) {
+ toPrint = it.first.substr(0, maxLayerName - 4);
+ toPrint += "...";
+ }
+
+ stream << std::setw(maxLayerName) << std::left << toPrint;
+ switch (it.second.status) {
+ case ov::ProfilingInfo::Status::EXECUTED:
+ stream << std::setw(15) << std::left << "EXECUTED";
+ break;
+ case ov::ProfilingInfo::Status::NOT_RUN:
+ stream << std::setw(15) << std::left << "NOT_RUN";
+ break;
+ case ov::ProfilingInfo::Status::OPTIMIZED_OUT:
+ stream << std::setw(15) << std::left << "OPTIMIZED_OUT";
+ break;
+ }
+ stream << std::setw(30) << std::left << "layerType: " + std::string(it.second.node_type) + " ";
+ stream << std::setw(20) << std::left << "realTime: " + std::to_string(it.second.real_time.count());
+ stream << std::setw(20) << std::left << "cpu: " + std::to_string(it.second.cpu_time.count());
+ stream << " execType: " << it.second.exec_type << std::endl;
+ if (it.second.real_time.count() > 0) {
+ totalTime += it.second.real_time;
+ }
+ }
+ stream << std::setw(20) << std::left << "Total time: " + std::to_string(totalTime.count()) << " microseconds"
+ << std::endl;
+ std::cout << std::endl;
+ std::cout << "Full device name: " << deviceName << std::endl;
+ std::cout << std::endl;
+ std::cout.flags(fmt);
+}
+
+/**
+ * @brief This class represents an object that is found by an object detection net
+ */
+class DetectedObject {
+public:
+ int objectType;
+ float xmin, xmax, ymin, ymax, prob;
+ bool difficult;
+
+ DetectedObject(int _objectType,
+ float _xmin,
+ float _ymin,
+ float _xmax,
+ float _ymax,
+ float _prob,
+ bool _difficult = false)
+ : objectType(_objectType),
+ xmin(_xmin),
+ xmax(_xmax),
+ ymin(_ymin),
+ ymax(_ymax),
+ prob(_prob),
+ difficult(_difficult) {}
+
+ DetectedObject(const DetectedObject& other) = default;
+
+ static float ioU(const DetectedObject& detectedObject1_, const DetectedObject& detectedObject2_) {
+ // Add small space to eliminate empty squares
+ float epsilon = 0; // 1e-5f;
+
+ DetectedObject detectedObject1(detectedObject1_.objectType,
+ (detectedObject1_.xmin - epsilon),
+ (detectedObject1_.ymin - epsilon),
+ (detectedObject1_.xmax - epsilon),
+ (detectedObject1_.ymax - epsilon),
+ detectedObject1_.prob);
+ DetectedObject detectedObject2(detectedObject2_.objectType,
+ (detectedObject2_.xmin + epsilon),
+ (detectedObject2_.ymin + epsilon),
+ (detectedObject2_.xmax),
+ (detectedObject2_.ymax),
+ detectedObject2_.prob);
+
+ if (detectedObject1.objectType != detectedObject2.objectType) {
+ // objects are different, so the result is 0
+ return 0.0f;
+ }
+
+ if (detectedObject1.xmax < detectedObject1.xmin)
+ return 0.0;
+ if (detectedObject1.ymax < detectedObject1.ymin)
+ return 0.0;
+ if (detectedObject2.xmax < detectedObject2.xmin)
+ return 0.0;
+ if (detectedObject2.ymax < detectedObject2.ymin)
+ return 0.0;
+
+ float xmin = (std::max)(detectedObject1.xmin, detectedObject2.xmin);
+ float ymin = (std::max)(detectedObject1.ymin, detectedObject2.ymin);
+ float xmax = (std::min)(detectedObject1.xmax, detectedObject2.xmax);
+ float ymax = (std::min)(detectedObject1.ymax, detectedObject2.ymax);
+
+ // Caffe adds 1 to every length if the box isn't normalized. So do we...
+ float addendum;
+ if (xmax > 1 || ymax > 1)
+ addendum = 1;
+ else
+ addendum = 0;
+
+ // intersection
+ float intr;
+ if ((xmax >= xmin) && (ymax >= ymin)) {
+ intr = (addendum + xmax - xmin) * (addendum + ymax - ymin);
+ } else {
+ intr = 0.0f;
+ }
+
+ // union
+ float square1 = (addendum + detectedObject1.xmax - detectedObject1.xmin) *
+ (addendum + detectedObject1.ymax - detectedObject1.ymin);
+ float square2 = (addendum + detectedObject2.xmax - detectedObject2.xmin) *
+ (addendum + detectedObject2.ymax - detectedObject2.ymin);
+
+ float unn = square1 + square2 - intr;
+
+ return static_cast<float>(intr) / unn;
+ }
+
+ DetectedObject scale(float scale_x, float scale_y) const {
+ return DetectedObject(objectType,
+ xmin * scale_x,
+ ymin * scale_y,
+ xmax * scale_x,
+ ymax * scale_y,
+ prob,
+ difficult);
+ }
+};
+
+class ImageDescription {
+public:
+ const std::list<DetectedObject> alist;
+ const bool check_probs;
+
+ explicit ImageDescription(const std::list<DetectedObject>& _alist, bool _check_probs = false)
+ : alist(_alist),
+ check_probs(_check_probs) {}
+
+ static float ioUMultiple(const ImageDescription& detectedObjects, const ImageDescription& desiredObjects) {
+ const ImageDescription *detectedObjectsSmall, *detectedObjectsBig;
+ bool check_probs = desiredObjects.check_probs;
+
+ if (detectedObjects.alist.size() < desiredObjects.alist.size()) {
+ detectedObjectsSmall = &detectedObjects;
+ detectedObjectsBig = &desiredObjects;
+ } else {
+ detectedObjectsSmall = &desiredObjects;
+ detectedObjectsBig = &detectedObjects;
+ }
+
+ std::list<DetectedObject> doS = detectedObjectsSmall->alist;
+ std::list<DetectedObject> doB = detectedObjectsBig->alist;
+
+ float fullScore = 0.0f;
+ while (doS.size() > 0) {
+ float score = 0.0f;
+ std::list<DetectedObject>::iterator bestJ = doB.end();
+ for (auto j = doB.begin(); j != doB.end(); j++) {
+ float curscore = DetectedObject::ioU(*doS.begin(), *j);
+ if (score < curscore) {
+ score = curscore;
+ bestJ = j;
+ }
+ }
+
+ float coeff = 1.0;
+ if (check_probs) {
+ if (bestJ != doB.end()) {
+ float mn = std::min((*bestJ).prob, (*doS.begin()).prob);
+ float mx = std::max((*bestJ).prob, (*doS.begin()).prob);
+
+ coeff = mn / mx;
+ }
+ }
+
+ doS.pop_front();
+ if (bestJ != doB.end())
+ doB.erase(bestJ);
+ fullScore += coeff * score;
+ }
+ fullScore /= detectedObjectsBig->alist.size();
+
+ return fullScore;
+ }
+
+ ImageDescription scale(float scale_x, float scale_y) const {
+ std::list<DetectedObject> slist;
+ for (auto& dob : alist) {
+ slist.push_back(dob.scale(scale_x, scale_y));
+ }
+ return ImageDescription(slist, check_probs);
+ }
+};
+
+struct AveragePrecisionCalculator {
+private:
+ enum MatchKind { TruePositive, FalsePositive };
+
+ /**
+ * Here we count all TP and FP matches for all the classes in all the images.
+ */
+ std::map<int, std::vector<std::pair<double, MatchKind>>> matches;
+
+ std::map<int, int> N;
+
+ double threshold;
+
+ static bool SortBBoxDescend(const DetectedObject& bbox1, const DetectedObject& bbox2) {
+ return bbox1.prob > bbox2.prob;
+ }
+
+ static bool SortPairDescend(const std::pair<double, MatchKind>& p1, const std::pair<double, MatchKind>& p2) {
+ return p1.first > p2.first;
+ }
+
+public:
+ explicit AveragePrecisionCalculator(double _threshold) : threshold(_threshold) {}
+
+ // gt_bboxes -> des
+ // bboxes -> det
+
+ void consumeImage(const ImageDescription& detectedObjects, const ImageDescription& desiredObjects) {
+ // Collecting IoU values
+ std::vector<bool> visited(desiredObjects.alist.size(), false);
+ std::vector<DetectedObject> bboxes{std::begin(detectedObjects.alist), std::end(detectedObjects.alist)};
+ std::sort(bboxes.begin(), bboxes.end(), SortBBoxDescend);
+
+ for (auto&& detObj : bboxes) {
+ // Searching for the best match to this detection
+ // Searching for desired object
+ float overlap_max = -1;
+ int jmax = -1;
+ auto desmax = desiredObjects.alist.end();
+
+ int j = 0;
+ for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); desObj++, j++) {
+ double iou = DetectedObject::ioU(detObj, *desObj);
+ if (iou > overlap_max) {
+ overlap_max = static_cast<float>(iou);
+ jmax = j;
+ desmax = desObj;
+ }
+ }
+
+ MatchKind mk;
+ if (overlap_max >= threshold) {
+ if (!desmax->difficult) {
+ if (!visited[jmax]) {
+ mk = TruePositive;
+ visited[jmax] = true;
+ } else {
+ mk = FalsePositive;
+ }
+ matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk));
+ }
+ } else {
+ mk = FalsePositive;
+ matches[detObj.objectType].push_back(std::make_pair(detObj.prob, mk));
+ }
+ }
+
+ for (auto desObj = desiredObjects.alist.begin(); desObj != desiredObjects.alist.end(); desObj++) {
+ if (!desObj->difficult) {
+ N[desObj->objectType]++;
+ }
+ }
+ }
+
+ std::map<int, double> calculateAveragePrecisionPerClass() const {
+ /**
+ * Precision-to-TP curve per class (a variation of precision-to-recall curve without
+ * dividing into N)
+ */
+ std::map<int, std::map<int, double>> precisionToTP;
+
+ std::map<int, double> res;
+
+ for (auto m : matches) {
+ // Sorting
+ std::sort(m.second.begin(), m.second.end(), SortPairDescend);
+
+ int clazz = m.first;
+ int TP = 0, FP = 0;
+
+ std::vector<double> prec;
+ std::vector<double> rec;
+
+ for (auto mm : m.second) {
+ // Here we are descending in a probability value
+ MatchKind mk = mm.second;
+ if (mk == TruePositive)
+ TP++;
+ else if (mk == FalsePositive)
+ FP++;
+
+ double precision = static_cast<double>(TP) / (TP + FP);
+ double recall = 0;
+ if (N.find(clazz) != N.end()) {
+ recall = static_cast<double>(TP) / N.at(clazz);
+ }
+
+ prec.push_back(precision);
+ rec.push_back(recall);
+ }
+
+ int num = static_cast<int>(rec.size());
+
+ // 11point from Caffe
+ double ap = 0;
+ std::vector<float> max_precs(11, 0.);
+ int start_idx = num - 1;
+ for (int j = 10; j >= 0; --j) {
+ for (int i = start_idx; i >= 0; --i) {
+ if (rec[i] < j / 10.) {
+ start_idx = i;
+ if (j > 0) {
+ max_precs[j - 1] = max_precs[j];
+ }
+ break;
+ } else {
+ if (max_precs[j] < prec[i]) {
+ max_precs[j] = static_cast<float>(prec[i]);
+ }
+ }
+ }
+ }
+ for (int j = 10; j >= 0; --j) {
+ ap += max_precs[j] / 11;
+ }
+ res[clazz] = ap;
+ }
+
+ return res;
+ }
+};
+
+/**
+ * @brief Adds colored rectangles to the image
+ * @param data - data where rectangles are put
+ * @param height - height of the rectangle
+ * @param width - width of the rectangle
+ * @param detectedObjects - vector of detected objects
+ */
+static UNUSED void addRectangles(unsigned char* data,
+ size_t height,
+ size_t width,
+ std::vector<DetectedObject> detectedObjects) {
+ std::vector<Color> colors = {{128, 64, 128}, {232, 35, 244}, {70, 70, 70}, {156, 102, 102}, {153, 153, 190},
+ {153, 153, 153}, {30, 170, 250}, {0, 220, 220}, {35, 142, 107}, {152, 251, 152},
+ {180, 130, 70}, {60, 20, 220}, {0, 0, 255}, {142, 0, 0}, {70, 0, 0},
+ {100, 60, 0}, {90, 0, 0}, {230, 0, 0}, {32, 11, 119}, {0, 74, 111},
+ {81, 0, 81}};
+
+ for (size_t i = 0; i < detectedObjects.size(); i++) {
+ int cls = detectedObjects[i].objectType % colors.size();
+
+ int xmin = static_cast<int>(detectedObjects[i].xmin * width);
+ int xmax = static_cast<int>(detectedObjects[i].xmax * width);
+ int ymin = static_cast<int>(detectedObjects[i].ymin * height);
+ int ymax = static_cast<int>(detectedObjects[i].ymax * height);
+
+ size_t shift_first = ymin * width * 3;
+ size_t shift_second = ymax * width * 3;
+ for (int x = xmin; x < xmax; x++) {
+ data[shift_first + x * 3] = colors.at(cls).red();
+ data[shift_first + x * 3 + 1] = colors.at(cls).green();
+ data[shift_first + x * 3 + 2] = colors.at(cls).blue();
+ data[shift_second + x * 3] = colors.at(cls).red();
+ data[shift_second + x * 3 + 1] = colors.at(cls).green();
+ data[shift_second + x * 3 + 2] = colors.at(cls).blue();
+ }
+
+ shift_first = xmin * 3;
+ shift_second = xmax * 3;
+ for (int y = ymin; y < ymax; y++) {
+ data[shift_first + y * width * 3] = colors.at(cls).red();
+ data[shift_first + y * width * 3 + 1] = colors.at(cls).green();
+ data[shift_first + y * width * 3 + 2] = colors.at(cls).blue();
+ data[shift_second + y * width * 3] = colors.at(cls).red();
+ data[shift_second + y * width * 3 + 1] = colors.at(cls).green();
+ data[shift_second + y * width * 3 + 2] = colors.at(cls).blue();
+ }
+ }
+}
+
+inline void showAvailableDevices() {
+ ov::Core core;
+ std::vector<std::string> devices = core.get_available_devices();
+
+ std::cout << std::endl;
+ std::cout << "Available target devices:";
+ for (const auto& device : devices) {
+ std::cout << " " << device;
+ }
+ std::cout << std::endl;
+}
+
+/**
+ * @brief Parse text config file. The file must have the following format (with space a delimeter):
+ * CONFIG_NAME1 CONFIG_VALUE1
+ * CONFIG_NAME2 CONFIG_VALUE2
+ *
+ * @param configName - filename for a file with config options
+ * @param comment - lines starting with symbol `comment` are skipped
+ */
+std::map<std::string, std::string> parseConfig(const std::string& configName, char comment = '#');
+
+inline std::string getFullDeviceName(ov::Core& core, std::string device) {
+ try {
+ return core.get_property(device, ov::device::full_name);
+ } catch (ov::Exception&) {
+ return {};
+ }
+}
+
+static UNUSED void printPerformanceCounts(std::vector<ov::ProfilingInfo> performanceData,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ std::chrono::microseconds totalTime = std::chrono::microseconds::zero();
+ // Print performance counts
+ if (bshowHeader) {
+ stream << std::endl << "performance counts:" << std::endl << std::endl;
+ }
+ std::ios::fmtflags fmt(std::cout.flags());
+ for (const auto& it : performanceData) {
+ std::string toPrint(it.node_name);
+ const int maxLayerName = 30;
+
+ if (it.node_name.length() >= maxLayerName) {
+ toPrint = it.node_name.substr(0, maxLayerName - 5);
+ toPrint += "...";
+ }
+
+ stream << std::setw(maxLayerName) << std::left << toPrint << " ";
+ switch (it.status) {
+ case ov::ProfilingInfo::Status::EXECUTED:
+ stream << std::setw(15) << std::left << "EXECUTED ";
+ break;
+ case ov::ProfilingInfo::Status::NOT_RUN:
+ stream << std::setw(15) << std::left << "NOT_RUN ";
+ break;
+ case ov::ProfilingInfo::Status::OPTIMIZED_OUT:
+ stream << std::setw(15) << std::left << "OPTIMIZED_OUT ";
+ break;
+ }
+ stream << std::setw(30) << std::left << "layerType: " + std::string(it.node_type) + " ";
+ stream << std::setw(30) << std::left << "execType: " + std::string(it.exec_type) + " ";
+ stream << std::setw(25) << std::left << "realTime (ms): " + std::to_string(it.real_time.count() / 1000.0) + " ";
+ stream << std::setw(25) << std::left << "cpuTime (ms): " + std::to_string(it.cpu_time.count() / 1000.0) + " ";
+ stream << std::endl;
+ if (it.real_time.count() > 0) {
+ totalTime += it.real_time;
+ }
+ }
+ stream << std::setw(25) << std::left << "Total time: " + std::to_string(totalTime.count() / 1000.0)
+ << " milliseconds" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Full device name: " << deviceName << std::endl;
+ std::cout << std::endl;
+ std::cout.flags(fmt);
+}
+
+static UNUSED void printPerformanceCounts(ov::InferRequest request,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ auto performanceMap = request.get_profiling_info();
+ printPerformanceCounts(performanceMap, stream, deviceName, bshowHeader);
+}
+
+static inline std::string double_to_string(const double number) {
+ std::stringstream ss;
+ ss << std::fixed << std::setprecision(2) << number;
+ return ss.str();
+}
+
+template <typename T>
+using uniformDistribution = typename std::conditional<
+ std::is_floating_point<T>::value,
+ std::uniform_real_distribution<T>,
+ typename std::conditional<std::is_integral<T>::value, std::uniform_int_distribution<T>, void>::type>::type;
+
+template <typename T, typename T2>
+static inline void fill_random(ov::Tensor& tensor,
+ T rand_min = std::numeric_limits<uint8_t>::min(),
+ T rand_max = std::numeric_limits<uint8_t>::max()) {
+ std::mt19937 gen(0);
+ size_t tensor_size = tensor.get_size();
+ if (0 == tensor_size) {
+ throw std::runtime_error(
+ "Models with dynamic shapes aren't supported. Input tensors must have specific shapes before inference");
+ }
+ T* data = tensor.data<T>();
+ uniformDistribution<T2> distribution(rand_min, rand_max);
+ for (size_t i = 0; i < tensor_size; i++) {
+ data[i] = static_cast<T>(distribution(gen));
+ }
+}
+
+static inline void fill_tensor_random(ov::Tensor tensor) {
+ switch (tensor.get_element_type()) {
+ case ov::element::f32:
+ fill_random<float, float>(tensor);
+ break;
+ case ov::element::f64:
+ fill_random<double, double>(tensor);
+ break;
+ case ov::element::f16:
+ fill_random<short, short>(tensor);
+ break;
+ case ov::element::i32:
+ fill_random<int32_t, int32_t>(tensor);
+ break;
+ case ov::element::i64:
+ fill_random<int64_t, int64_t>(tensor);
+ break;
+ case ov::element::u8:
+ // uniform_int_distribution<uint8_t> is not allowed in the C++17
+ // standard and vs2017/19
+ fill_random<uint8_t, uint32_t>(tensor);
+ break;
+ case ov::element::i8:
+ // uniform_int_distribution<int8_t> is not allowed in the C++17 standard
+ // and vs2017/19
+ fill_random<int8_t, int32_t>(tensor, std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max());
+ break;
+ case ov::element::u16:
+ fill_random<uint16_t, uint16_t>(tensor);
+ break;
+ case ov::element::i16:
+ fill_random<int16_t, int16_t>(tensor);
+ break;
+ case ov::element::boolean:
+ fill_random<uint8_t, uint32_t>(tensor, 0, 1);
+ break;
+ default:
+ throw ov::Exception("Input type is not supported for a tensor");
+ }
+}
+
+static UNUSED void printPerformanceCountsNoSort(std::vector<ov::ProfilingInfo> performanceData,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ std::chrono::microseconds totalTime = std::chrono::microseconds::zero();
+ // Print performance counts
+ if (bshowHeader) {
+ stream << std::endl << "performance counts:" << std::endl << std::endl;
+ }
+ std::ios::fmtflags fmt(std::cout.flags());
+
+ for (const auto& it : performanceData) {
+ if (it.real_time.count() > 0) {
+ totalTime += it.real_time;
+ }
+ }
+ if (totalTime.count() != 0) {
+ for (const auto& it : performanceData) {
+ std::string toPrint(it.node_name);
+ const int maxLayerName = 30;
+
+ if (it.node_name.length() >= maxLayerName) {
+ toPrint = it.node_name.substr(0, maxLayerName - 5);
+ toPrint += "...";
+ }
+
+ stream << std::setw(maxLayerName) << std::left << toPrint << " ";
+ switch (it.status) {
+ case ov::ProfilingInfo::Status::EXECUTED:
+ stream << std::setw(15) << std::left << "EXECUTED ";
+ break;
+ case ov::ProfilingInfo::Status::NOT_RUN:
+ stream << std::setw(15) << std::left << "NOT_RUN ";
+ break;
+ case ov::ProfilingInfo::Status::OPTIMIZED_OUT:
+ stream << std::setw(15) << std::left << "OPTIMIZED_OUT ";
+ break;
+ }
+ stream << std::setw(30) << std::left << "layerType: " + std::string(it.node_type) + " ";
+ stream << std::setw(30) << std::left << "execType: " + std::string(it.exec_type) + " ";
+ stream << std::setw(25) << std::left
+ << "realTime (ms): " + std::to_string(it.real_time.count() / 1000.0) + " ";
+ stream << std::setw(25) << std::left
+ << "cpuTime (ms): " + std::to_string(it.cpu_time.count() / 1000.0) + " ";
+
+ double opt_proportion = it.real_time.count() * 100.0 / totalTime.count();
+ std::stringstream opt_proportion_ss;
+ opt_proportion_ss << std::fixed << std::setprecision(2) << opt_proportion;
+ std::string opt_proportion_str = opt_proportion_ss.str();
+ if (opt_proportion_str == "0.00") {
+ opt_proportion_str = "N/A";
+ }
+ stream << std::setw(20) << std::left << "proportion: " + opt_proportion_str + "%";
+
+ stream << std::endl;
+ }
+ }
+ stream << std::setw(25) << std::left << "Total time: " + std::to_string(totalTime.count() / 1000.0)
+ << " milliseconds" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Full device name: " << deviceName << std::endl;
+ std::cout << std::endl;
+ std::cout.flags(fmt);
+}
+
+static UNUSED bool sort_pc_descend(const ov::ProfilingInfo& profiling1, const ov::ProfilingInfo& profiling2) {
+ return profiling1.real_time > profiling2.real_time;
+}
+
+static UNUSED void printPerformanceCountsDescendSort(std::vector<ov::ProfilingInfo> performanceData,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ std::chrono::microseconds totalTime = std::chrono::microseconds::zero();
+ // Print performance counts
+ if (bshowHeader) {
+ stream << std::endl << "performance counts:" << std::endl << std::endl;
+ }
+ std::ios::fmtflags fmt(std::cout.flags());
+
+ for (const auto& it : performanceData) {
+ if (it.real_time.count() > 0) {
+ totalTime += it.real_time;
+ }
+ }
+ if (totalTime.count() != 0) {
+ // sort perfcounter
+ std::vector<ov::ProfilingInfo> sortPerfCounts{std::begin(performanceData), std::end(performanceData)};
+ std::sort(sortPerfCounts.begin(), sortPerfCounts.end(), sort_pc_descend);
+
+ for (const auto& it : sortPerfCounts) {
+ std::string toPrint(it.node_name);
+ const int maxLayerName = 30;
+
+ if (it.node_name.length() >= maxLayerName) {
+ toPrint = it.node_name.substr(0, maxLayerName - 5);
+ toPrint += "...";
+ }
+
+ stream << std::setw(maxLayerName) << std::left << toPrint << " ";
+ switch (it.status) {
+ case ov::ProfilingInfo::Status::EXECUTED:
+ stream << std::setw(15) << std::left << "EXECUTED ";
+ break;
+ case ov::ProfilingInfo::Status::NOT_RUN:
+ stream << std::setw(15) << std::left << "NOT_RUN ";
+ break;
+ case ov::ProfilingInfo::Status::OPTIMIZED_OUT:
+ stream << std::setw(15) << std::left << "OPTIMIZED_OUT ";
+ break;
+ }
+ stream << std::setw(30) << std::left << "layerType: " + std::string(it.node_type) + " ";
+ stream << std::setw(30) << std::left << "execType: " + std::string(it.exec_type) + " ";
+ stream << std::setw(25) << std::left
+ << "realTime (ms): " + std::to_string(it.real_time.count() / 1000.0) + " ";
+ stream << std::setw(25) << std::left
+ << "cpuTime (ms): " + std::to_string(it.cpu_time.count() / 1000.0) + " ";
+
+ double opt_proportion = it.real_time.count() * 100.0 / totalTime.count();
+ std::stringstream opt_proportion_ss;
+ opt_proportion_ss << std::fixed << std::setprecision(2) << opt_proportion;
+ std::string opt_proportion_str = opt_proportion_ss.str();
+ if (opt_proportion_str == "0.00") {
+ opt_proportion_str = "N/A";
+ }
+ stream << std::setw(20) << std::left << "proportion: " + opt_proportion_str + "%";
+
+ stream << std::endl;
+ }
+ }
+ stream << std::setw(25) << std::left << "Total time: " + std::to_string(totalTime.count() / 1000.0)
+ << " milliseconds" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Full device name: " << deviceName << std::endl;
+ std::cout << std::endl;
+ std::cout.flags(fmt);
+}
+
+static UNUSED void printPerformanceCountsSimpleSort(std::vector<ov::ProfilingInfo> performanceData,
+ std::ostream& stream,
+ std::string deviceName,
+ bool bshowHeader = true) {
+ std::chrono::microseconds totalTime = std::chrono::microseconds::zero();
+ // Print performance counts
+ if (bshowHeader) {
+ stream << std::endl << "performance counts:" << std::endl << std::endl;
+ }
+ std::ios::fmtflags fmt(std::cout.flags());
+
+ for (const auto& it : performanceData) {
+ if (it.real_time.count() > 0) {
+ totalTime += it.real_time;
+ }
+ }
+ if (totalTime.count() != 0) {
+ // sort perfcounter
+ std::vector<ov::ProfilingInfo> sortPerfCounts{std::begin(performanceData), std::end(performanceData)};
+ std::sort(sortPerfCounts.begin(), sortPerfCounts.end(), sort_pc_descend);
+
+ for (const auto& it : sortPerfCounts) {
+ if (it.status == ov::ProfilingInfo::Status::EXECUTED) {
+ std::string toPrint(it.node_name);
+ const int maxLayerName = 30;
+
+ if (it.node_name.length() >= maxLayerName) {
+ toPrint = it.node_name.substr(0, maxLayerName - 5);
+ toPrint += "...";
+ }
+
+ stream << std::setw(maxLayerName) << std::left << toPrint << " ";
+ stream << std::setw(15) << std::left << "EXECUTED ";
+ stream << std::setw(30) << std::left << "layerType: " + std::string(it.node_type) + " ";
+ stream << std::setw(30) << std::left << "execType: " + std::string(it.exec_type) + " ";
+ stream << std::setw(25) << std::left
+ << "realTime (ms): " + std::to_string(it.real_time.count() / 1000.0) + " ";
+ stream << std::setw(25) << std::left
+ << "cpuTime (ms): " + std::to_string(it.cpu_time.count() / 1000.0) + " ";
+
+ double opt_proportion = it.real_time.count() * 100.0 / totalTime.count();
+ std::stringstream opt_proportion_ss;
+ opt_proportion_ss << std::fixed << std::setprecision(2) << opt_proportion;
+ std::string opt_proportion_str = opt_proportion_ss.str();
+ if (opt_proportion_str == "0.00") {
+ opt_proportion_str = "N/A";
+ }
+ stream << std::setw(20) << std::left << "proportion: " + opt_proportion_str + "%";
+
+ stream << std::endl;
+ }
+ }
+ }
+ stream << std::setw(25) << std::left << "Total time: " + std::to_string(totalTime.count() / 1000.0)
+ << " milliseconds" << std::endl;
+ std::cout << std::endl;
+ std::cout << "Full device name: " << deviceName << std::endl;
+ std::cout << std::endl;
+ std::cout.flags(fmt);
+}
+
+static UNUSED void printPerformanceCountsSort(std::vector<ov::ProfilingInfo> performanceData,
+ std::ostream& stream,
+ std::string deviceName,
+ std::string sorttype,
+ bool bshowHeader = true) {
+ if (sorttype == pcNoSort) {
+ printPerformanceCountsNoSort(performanceData, stream, deviceName, bshowHeader);
+ } else if (sorttype == pcSort) {
+ printPerformanceCountsDescendSort(performanceData, stream, deviceName, bshowHeader);
+ } else if (sorttype == pcSimpleSort) {
+ printPerformanceCountsSimpleSort(performanceData, stream, deviceName, bshowHeader);
+ }
+}
diff --git a/python/openvino/runtime/common/utils/include/samples/console_progress.hpp b/python/openvino/runtime/common/utils/include/samples/console_progress.hpp
new file mode 100644
index 0000000..f62aeed
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/console_progress.hpp
@@ -0,0 +1,107 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <cstdio>
+#include <iomanip>
+#include <sstream>
+
+/**
+ * @class ConsoleProgress
+ * @brief A ConsoleProgress class provides functionality for printing progress dynamics
+ */
+class ConsoleProgress {
+ static const size_t DEFAULT_DETALIZATION = 20;
+ static const size_t DEFAULT_PERCENT_TO_UPDATE_PROGRESS = 1;
+
+ size_t total;
+ size_t cur_progress = 0;
+ size_t prev_progress = 0;
+ bool stream_output;
+ size_t detalization;
+ size_t percent_to_update;
+
+public:
+ /**
+ * @brief A constructor of ConsoleProgress class
+ * @param _total - maximum value that is correspondent to 100%
+ * @param _detalization - number of symbols(.) to use to represent progress
+ */
+ explicit ConsoleProgress(size_t _total,
+ bool _stream_output = false,
+ size_t _percent_to_update = DEFAULT_PERCENT_TO_UPDATE_PROGRESS,
+ size_t _detalization = DEFAULT_DETALIZATION)
+ : total(_total),
+ detalization(_detalization),
+ percent_to_update(_percent_to_update) {
+ stream_output = _stream_output;
+ if (total == 0) {
+ total = 1;
+ }
+ }
+
+ /**
+ * @brief Shows progress with current data. Progress is shown from the beginning of the current
+ * line.
+ */
+ void showProgress() const {
+ std::stringstream strm;
+ if (!stream_output) {
+ strm << '\r';
+ }
+ strm << "Progress: [";
+ size_t i = 0;
+ for (; i < detalization * cur_progress / total; i++) {
+ strm << ".";
+ }
+ for (; i < detalization; i++) {
+ strm << " ";
+ }
+ strm << "] " << std::setw(3) << 100 * cur_progress / total << "% done";
+ if (stream_output) {
+ strm << std::endl;
+ }
+ std::fputs(strm.str().c_str(), stdout);
+ std::fflush(stdout);
+ }
+
+ /**
+ * @brief Updates current value and progressbar
+ */
+ void updateProgress() {
+ if (cur_progress > total)
+ cur_progress = total;
+ size_t prev_percent = 100 * prev_progress / total;
+ size_t cur_percent = 100 * cur_progress / total;
+
+ if (prev_progress == 0 || cur_progress == total || prev_percent + percent_to_update <= cur_percent) {
+ showProgress();
+ prev_progress = cur_progress;
+ }
+ }
+
+ /**
+ * @brief Adds value to currently represented and redraw progressbar
+ * @param add - value to add
+ */
+ void addProgress(int add) {
+ if (add < 0 && -add > static_cast<int>(cur_progress)) {
+ add = -static_cast<int>(cur_progress);
+ }
+ cur_progress += add;
+ updateProgress();
+ }
+
+ /**
+ * @brief Output end line.
+ * @return
+ */
+ void finish() {
+ std::stringstream strm;
+ strm << std::endl;
+ std::fputs(strm.str().c_str(), stdout);
+ std::fflush(stdout);
+ }
+};
diff --git a/python/openvino/runtime/common/utils/include/samples/csv_dumper.hpp b/python/openvino/runtime/common/utils/include/samples/csv_dumper.hpp
new file mode 100644
index 0000000..5c80134
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/csv_dumper.hpp
@@ -0,0 +1,98 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <ctime>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+
+#include "samples/slog.hpp"
+
+/**
+ * @class CsvDumper
+ * @brief A CsvDumper class provides functionality for dumping the values in CSV files
+ */
+class CsvDumper {
+ std::ofstream file;
+ std::string filename;
+ bool canDump = true;
+ char delimiter = ';';
+
+ std::string generateFilename() {
+ std::stringstream filename;
+ filename << "dumpfile-";
+ filename << time(nullptr);
+ filename << ".csv";
+ return filename.str();
+ }
+
+public:
+ /**
+ * @brief A constructor. Disables dumping in case dump file cannot be created
+ * @param enabled - True if dumping is enabled by default.
+ * @param name - name of file to dump to. File won't be created if first parameter is false.
+ */
+ explicit CsvDumper(bool enabled = true, const std::string& name = "") : canDump(enabled) {
+ if (!canDump) {
+ return;
+ }
+ filename = (name == "" ? generateFilename() : name);
+ file.open(filename, std::ios::out);
+ if (!file) {
+ slog::warn << "Cannot create dump file! Disabling dump." << slog::endl;
+ canDump = false;
+ }
+ }
+
+ /**
+ * @brief Sets a delimiter to use in csv file
+ * @param c - Delimiter char
+ * @return
+ */
+ void setDelimiter(char c) {
+ delimiter = c;
+ }
+
+ /**
+ * @brief Overloads operator to organize streaming values to file. Does nothing if dumping is
+ * disabled Adds delimiter at the end of value provided
+ * @param add - value to add to dump
+ * @return reference to same object
+ */
+ template <class T>
+ CsvDumper& operator<<(const T& add) {
+ if (canDump) {
+ file << add << delimiter;
+ }
+ return *this;
+ }
+
+ /**
+ * @brief Finishes line in dump file. Does nothing if dumping is disabled
+ */
+ void endLine() {
+ if (canDump) {
+ file << "\n";
+ }
+ }
+
+ /**
+ * @brief Gets information if dump is enabled.
+ * @return true if dump is enabled and file was successfully created
+ */
+ bool dumpEnabled() {
+ return canDump;
+ }
+
+ /**
+ * @brief Gets name of a dump file
+ * @return name of a dump file
+ */
+ std::string getFilename() const {
+ return filename;
+ }
+};
diff --git a/python/openvino/runtime/common/utils/include/samples/latency_metrics.hpp b/python/openvino/runtime/common/utils/include/samples/latency_metrics.hpp
new file mode 100644
index 0000000..bca39d0
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/latency_metrics.hpp
@@ -0,0 +1,42 @@
+// Copyright (C) 2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+// clang-format off
+#include "samples/common.hpp"
+#include "samples/slog.hpp"
+// clang-format on
+
+/// @brief Responsible for calculating different latency metrics
+class LatencyMetrics {
+public:
+ LatencyMetrics() {}
+
+ LatencyMetrics(const std::vector<double>& latencies,
+ const std::string& data_shape = "",
+ size_t percentile_boundary = 50)
+ : data_shape(data_shape),
+ percentile_boundary(percentile_boundary) {
+ fill_data(latencies, percentile_boundary);
+ }
+
+ void write_to_stream(std::ostream& stream) const;
+ void write_to_slog() const;
+
+ double median_or_percentile = 0;
+ double avg = 0;
+ double min = 0;
+ double max = 0;
+ std::string data_shape;
+
+private:
+ void fill_data(std::vector<double> latencies, size_t percentile_boundary);
+ size_t percentile_boundary = 50;
+};
diff --git a/python/openvino/runtime/common/utils/include/samples/ocv_common.hpp b/python/openvino/runtime/common/utils/include/samples/ocv_common.hpp
new file mode 100644
index 0000000..94f3b1f
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/ocv_common.hpp
@@ -0,0 +1,92 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief a header file with common samples functionality using OpenCV
+ * @file ocv_common.hpp
+ */
+
+#pragma once
+
+#include <opencv2/opencv.hpp>
+
+#include "openvino/openvino.hpp"
+#include "samples/common.hpp"
+
+/**
+ * @brief Sets image data stored in cv::Mat object to a given Blob object.
+ * @param orig_image - given cv::Mat object with an image data.
+ * @param blob - Blob object which to be filled by an image data.
+ * @param batchIndex - batch index of an image inside of the blob.
+ */
+template <typename T>
+void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) {
+ InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
+ const size_t width = blobSize[3];
+ const size_t height = blobSize[2];
+ const size_t channels = blobSize[1];
+ InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
+ OPENVINO_ASSERT(mblob,
+ "We expect blob to be inherited from MemoryBlob in matU8ToBlob, "
+ "but by fact we were not able to cast inputBlob to MemoryBlob");
+ // locked memory holder should be alive all time while access to its buffer happens
+ auto mblobHolder = mblob->wmap();
+
+ T* blob_data = mblobHolder.as<T*>();
+
+ cv::Mat resized_image(orig_image);
+ if (static_cast<int>(width) != orig_image.size().width || static_cast<int>(height) != orig_image.size().height) {
+ cv::resize(orig_image, resized_image, cv::Size(width, height));
+ }
+
+ int batchOffset = batchIndex * width * height * channels;
+
+ for (size_t c = 0; c < channels; c++) {
+ for (size_t h = 0; h < height; h++) {
+ for (size_t w = 0; w < width; w++) {
+ blob_data[batchOffset + c * width * height + h * width + w] = resized_image.at<cv::Vec3b>(h, w)[c];
+ }
+ }
+ }
+}
+
+/**
+ * @brief Wraps data stored inside of a passed cv::Mat object by new Blob pointer.
+ * @note: No memory allocation is happened. The blob just points to already existing
+ * cv::Mat data.
+ * @param mat - given cv::Mat object with an image data.
+ * @return resulting Blob pointer.
+ */
+static UNUSED InferenceEngine::Blob::Ptr wrapMat2Blob(const cv::Mat& mat) {
+ size_t channels = mat.channels();
+ size_t height = mat.size().height;
+ size_t width = mat.size().width;
+
+ size_t strideH = mat.step.buf[0];
+ size_t strideW = mat.step.buf[1];
+
+ bool is_dense = strideW == channels && strideH == channels * width;
+
+ OPENVINO_ASSERT(is_dense, "Doesn't support conversion from not dense cv::Mat");
+
+ InferenceEngine::TensorDesc tDesc(InferenceEngine::Precision::U8,
+ {1, channels, height, width},
+ InferenceEngine::Layout::NHWC);
+
+ return InferenceEngine::make_shared_blob<uint8_t>(tDesc, mat.data);
+}
+
+static UNUSED ov::Tensor wrapMat2Tensor(const cv::Mat& mat) {
+ const size_t channels = mat.channels();
+ const size_t height = mat.size().height;
+ const size_t width = mat.size().width;
+
+ const size_t strideH = mat.step.buf[0];
+ const size_t strideW = mat.step.buf[1];
+
+ const bool is_dense = strideW == channels && strideH == channels * width;
+ OPENVINO_ASSERT(is_dense, "Doesn't support conversion from not dense cv::Mat");
+
+ return ov::Tensor(ov::element::u8, ov::Shape{1, height, width, channels}, mat.data);
+}
diff --git a/python/openvino/runtime/common/utils/include/samples/os/windows/w_dirent.h b/python/openvino/runtime/common/utils/include/samples/os/windows/w_dirent.h
new file mode 100644
index 0000000..40d1c5b
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/os/windows/w_dirent.h
@@ -0,0 +1,176 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#if defined(_WIN32)
+
+# ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN_UNDEF
+# endif
+
+# ifndef NOMINMAX
+# define NOMINMAX
+# define NOMINMAX_UNDEF
+# endif
+
+# if defined(_M_IX86) && !defined(_X86_) && !defined(_AMD64_)
+# define _X86_
+# endif
+
+# if defined(_M_X64) && !defined(_X86_) && !defined(_AMD64_)
+# define _AMD64_
+# endif
+
+# if defined(_M_ARM) && !defined(_ARM_) && !defined(_ARM64_)
+# define _ARM_
+# endif
+
+# if defined(_M_ARM64) && !defined(_ARM_) && !defined(_ARM64_)
+# define _ARM64_
+# endif
+
+// clang-format off
+ #include <string.h>
+ #include <windef.h>
+ #include <fileapi.h>
+ #include <Winbase.h>
+ #include <sys/stat.h>
+// clang-format on
+
+// Copied from linux libc sys/stat.h:
+# define S_ISREG(m) (((m)&S_IFMT) == S_IFREG)
+# define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR)
+
+/// @brief structure to store directory names
+struct dirent {
+ char* d_name;
+
+ explicit dirent(const wchar_t* wsFilePath) {
+ size_t i;
+ auto slen = wcslen(wsFilePath);
+ d_name = static_cast<char*>(malloc(slen + 1));
+ wcstombs_s(&i, d_name, slen + 1, wsFilePath, slen);
+ }
+ ~dirent() {
+ free(d_name);
+ }
+};
+
+/// @brief class to store directory data (files meta)
+class DIR {
+ WIN32_FIND_DATAA FindFileData;
+ HANDLE hFind;
+ dirent* next;
+
+ static inline bool endsWith(const std::string& src, const char* with) {
+ int wl = static_cast<int>(strlen(with));
+ int so = static_cast<int>(src.length()) - wl;
+ if (so < 0)
+ return false;
+ return 0 == strncmp(with, &src[so], wl);
+ }
+
+public:
+ DIR(const DIR& other) = delete;
+ DIR(DIR&& other) = delete;
+ DIR& operator=(const DIR& other) = delete;
+ DIR& operator=(DIR&& other) = delete;
+
+ explicit DIR(const char* dirPath) : next(nullptr) {
+ std::string ws = dirPath;
+ if (endsWith(ws, "\\"))
+ ws += "*";
+ else
+ ws += "\\*";
+ hFind = FindFirstFileA(ws.c_str(), &FindFileData);
+ FindFileData.dwReserved0 = hFind != INVALID_HANDLE_VALUE;
+ }
+
+ ~DIR() {
+ if (!next)
+ delete next;
+ next = nullptr;
+ FindClose(hFind);
+ }
+
+ /**
+ * @brief Check file handler is valid
+ * @return status True(success) or False(fail)
+ */
+ bool isValid() const {
+ return (hFind != INVALID_HANDLE_VALUE && FindFileData.dwReserved0);
+ }
+
+ /**
+ * @brief Add directory to directory names struct
+ * @return pointer to directory names struct
+ */
+ dirent* nextEnt() {
+ if (next != nullptr)
+ delete next;
+ next = nullptr;
+
+ if (!FindFileData.dwReserved0)
+ return nullptr;
+
+ wchar_t wbuf[4096];
+
+ size_t outSize;
+ mbstowcs_s(&outSize, wbuf, 4094, FindFileData.cFileName, 4094);
+ next = new dirent(wbuf);
+ FindFileData.dwReserved0 = FindNextFileA(hFind, &FindFileData);
+ return next;
+ }
+};
+
+/**
+ * @brief Create directory data struct element
+ * @param string directory path
+ * @return pointer to directory data struct element
+ */
+static DIR* opendir(const char* dirPath) {
+ auto dp = new DIR(dirPath);
+ if (!dp->isValid()) {
+ delete dp;
+ return nullptr;
+ }
+ return dp;
+}
+
+/**
+ * @brief Walk throw directory data struct
+ * @param pointer to directory data struct
+ * @return pointer to directory data struct next element
+ */
+static struct dirent* readdir(DIR* dp) {
+ return dp->nextEnt();
+}
+
+/**
+ * @brief Remove directory data struct
+ * @param pointer to struct directory data
+ * @return void
+ */
+static void closedir(DIR* dp) {
+ delete dp;
+}
+
+# ifdef WIN32_LEAN_AND_MEAN_UNDEF
+# undef WIN32_LEAN_AND_MEAN
+# undef WIN32_LEAN_AND_MEAN_UNDEF
+# endif
+
+# ifdef NOMINMAX_UNDEF
+# undef NOMINMAX_UNDEF
+# undef NOMINMAX
+# endif
+
+#else
+
+# include <dirent.h>
+# include <sys/types.h>
+
+#endif
diff --git a/python/openvino/runtime/common/utils/include/samples/slog.hpp b/python/openvino/runtime/common/utils/include/samples/slog.hpp
new file mode 100644
index 0000000..3f237e5
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/slog.hpp
@@ -0,0 +1,102 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+/**
+ * @brief a header file with logging facility for common samples
+ * @file log.hpp
+ */
+
+#pragma once
+
+#include <ostream>
+#include <string>
+#include <vector>
+
+namespace slog {
+/**
+ * @class LogStreamEndLine
+ * @brief The LogStreamEndLine class implements an end line marker for a log stream
+ */
+class LogStreamEndLine {};
+
+static constexpr LogStreamEndLine endl;
+
+/**
+ * @class LogStreamBoolAlpha
+ * @brief The LogStreamBoolAlpha class implements bool printing for a log stream
+ */
+class LogStreamBoolAlpha {};
+
+static constexpr LogStreamBoolAlpha boolalpha;
+
+/**
+ * @class LogStreamFlush
+ * @brief The LogStreamFlush class implements flushing for a log stream
+ */
+class LogStreamFlush {};
+
+static constexpr LogStreamFlush flush;
+
+/**
+ * @class LogStream
+ * @brief The LogStream class implements a stream for sample logging
+ */
+class LogStream {
+ std::string _prefix;
+ std::ostream* _log_stream;
+ bool _new_line;
+
+public:
+ /**
+ * @brief A constructor. Creates an LogStream object
+ * @param prefix The prefix to print
+ */
+ LogStream(const std::string& prefix, std::ostream& log_stream);
+
+ /**
+ * @brief A stream output operator to be used within the logger
+ * @param arg Object for serialization in the logger message
+ */
+ template <class T>
+ LogStream& operator<<(const T& arg) {
+ if (_new_line) {
+ (*_log_stream) << "[ " << _prefix << " ] ";
+ _new_line = false;
+ }
+
+ (*_log_stream) << arg;
+ return *this;
+ }
+
+ /**
+ * @brief Overload output stream operator to print vectors in pretty form
+ * [value1, value2, ...]
+ */
+ template <typename T>
+ LogStream& operator<<(const std::vector<T>& v) {
+ (*_log_stream) << "[ ";
+
+ for (auto&& value : v)
+ (*_log_stream) << value << " ";
+
+ (*_log_stream) << "]";
+
+ return *this;
+ }
+
+ // Specializing for LogStreamEndLine to support slog::endl
+ LogStream& operator<<(const LogStreamEndLine&);
+
+ // Specializing for LogStreamBoolAlpha to support slog::boolalpha
+ LogStream& operator<<(const LogStreamBoolAlpha&);
+
+ // Specializing for LogStreamFlush to support slog::flush
+ LogStream& operator<<(const LogStreamFlush&);
+};
+
+extern LogStream info;
+extern LogStream warn;
+extern LogStream err;
+
+} // namespace slog
diff --git a/python/openvino/runtime/common/utils/include/samples/vpu/vpu_tools_common.hpp b/python/openvino/runtime/common/utils/include/samples/vpu/vpu_tools_common.hpp
new file mode 100644
index 0000000..ba0665f
--- /dev/null
+++ b/python/openvino/runtime/common/utils/include/samples/vpu/vpu_tools_common.hpp
@@ -0,0 +1,28 @@
+// Copyright (C) 2018-2022 Intel Corporation
+// SPDX-License-Identifier: Apache-2.0
+//
+
+#pragma once
+
+#include <fstream>
+#include <map>
+#include <string>
+
+static std::map<std::string, std::string> parseConfig(const std::string& configName, char comment = '#') {
+ std::map<std::string, std::string> config = {};
+
+ std::ifstream file(configName);
+ if (!file.is_open()) {
+ return config;
+ }
+
+ std::string key, value;
+ while (file >> key >> value) {
+ if (key.empty() || key[0] == comment) {
+ continue;
+ }
+ config[key] = value;
+ }
+
+ return config;
+}