diff options
Diffstat (limited to 'python/openvino/runtime/common/pipelines/include')
3 files changed, 239 insertions, 0 deletions
diff --git a/python/openvino/runtime/common/pipelines/include/pipelines/async_pipeline.h b/python/openvino/runtime/common/pipelines/include/pipelines/async_pipeline.h new file mode 100644 index 0000000..6661c00 --- /dev/null +++ b/python/openvino/runtime/common/pipelines/include/pipelines/async_pipeline.h @@ -0,0 +1,121 @@ +/* +// Copyright (C) 2020-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#pragma once +#include <stdint.h> + +#include <condition_variable> +#include <exception> +#include <memory> +#include <mutex> +#include <unordered_map> + +#include <openvino/openvino.hpp> + +#include <models/results.h> +#include <utils/performance_metrics.hpp> + +#include "pipelines/requests_pool.h" + +class ModelBase; +struct InputData; +struct MetaData; +struct ModelConfig; + +/// This is base class for asynchronous pipeline +/// Derived classes should add functions for data submission and output processing +class AsyncPipeline { +public: + /// Loads model and performs required initialization + /// @param modelInstance pointer to model object. Object it points to should not be destroyed manually after passing + /// pointer to this function. + /// @param config - fine tuning configuration for model + /// @param core - reference to ov::Core instance to use. + /// If it is omitted, new instance of ov::Core will be created inside. + AsyncPipeline(std::unique_ptr<ModelBase>&& modelInstance, const ModelConfig& config, ov::Core& core); + virtual ~AsyncPipeline(); + + /// Waits until either output data becomes available or pipeline allows to submit more input data. + /// @param shouldKeepOrder if true, function will treat results as ready only if next sequential result (frame) is + /// ready (so results can be extracted in the same order as they were submitted). Otherwise, function will return if + /// any result is ready. + void waitForData(bool shouldKeepOrder = true); + + /// @returns true if there's available infer requests in the pool + /// and next frame can be submitted for processing, false otherwise. + bool isReadyToProcess() { + return requestsPool->isIdleRequestAvailable(); + } + + /// Waits for all currently submitted requests to be completed. + /// + void waitForTotalCompletion() { + if (requestsPool) + requestsPool->waitForTotalCompletion(); + } + + /// Submits data to the model for inference + /// @param inputData - input data to be submitted + /// @param metaData - shared pointer to metadata container. + /// Might be null. This pointer will be passed through pipeline and put to the final result structure. + /// @returns -1 if image cannot be scheduled for processing (there's no free InferRequest available). + /// Otherwise returns unique sequential frame ID for this particular request. Same frame ID will be written in the + /// result structure. + virtual int64_t submitData(const InputData& inputData, const std::shared_ptr<MetaData>& metaData); + + /// Gets available data from the queue + /// @param shouldKeepOrder if true, function will treat results as ready only if next sequential result (frame) is + /// ready (so results can be extracted in the same order as they were submitted). Otherwise, function will return if + /// any result is ready. + virtual std::unique_ptr<ResultBase> getResult(bool shouldKeepOrder = true); + + PerformanceMetrics getInferenceMetircs() { + return inferenceMetrics; + } + PerformanceMetrics getPreprocessMetrics() { + return preprocessMetrics; + } + PerformanceMetrics getPostprocessMetrics() { + return postprocessMetrics; + } + +protected: + /// Returns processed result, if available + /// @param shouldKeepOrder if true, function will return processed data sequentially, + /// keeping original frames order (as they were submitted). Otherwise, function will return processed data in random + /// order. + /// @returns InferenceResult with processed information or empty InferenceResult (with negative frameID) if there's + /// no any results yet. + virtual InferenceResult getInferenceResult(bool shouldKeepOrder); + + std::unique_ptr<RequestsPool> requestsPool; + std::unordered_map<int64_t, InferenceResult> completedInferenceResults; + + ov::CompiledModel compiledModel; + + std::mutex mtx; + std::condition_variable condVar; + + int64_t inputFrameId = 0; + int64_t outputFrameId = 0; + + std::exception_ptr callbackException = nullptr; + + std::unique_ptr<ModelBase> model; + PerformanceMetrics inferenceMetrics; + PerformanceMetrics preprocessMetrics; + PerformanceMetrics postprocessMetrics; +}; diff --git a/python/openvino/runtime/common/pipelines/include/pipelines/metadata.h b/python/openvino/runtime/common/pipelines/include/pipelines/metadata.h new file mode 100644 index 0000000..aca18ee --- /dev/null +++ b/python/openvino/runtime/common/pipelines/include/pipelines/metadata.h @@ -0,0 +1,51 @@ +/* +// Copyright (C) 2018-2020 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#pragma once +#include <utils/ocv_common.hpp> + +struct MetaData { + virtual ~MetaData() {} + + template <class T> + T& asRef() { + return dynamic_cast<T&>(*this); + } + + template <class T> + const T& asRef() const { + return dynamic_cast<const T&>(*this); + } +}; + +struct ImageMetaData : public MetaData { + cv::Mat img; + std::chrono::steady_clock::time_point timeStamp; + + ImageMetaData() {} + + ImageMetaData(cv::Mat img, std::chrono::steady_clock::time_point timeStamp) : img(img), timeStamp(timeStamp) {} +}; + +struct ClassificationImageMetaData : public ImageMetaData { + unsigned int groundTruthId; + + ClassificationImageMetaData(cv::Mat img, + std::chrono::steady_clock::time_point timeStamp, + unsigned int groundTruthId) + : ImageMetaData(img, timeStamp), + groundTruthId(groundTruthId) {} +}; diff --git a/python/openvino/runtime/common/pipelines/include/pipelines/requests_pool.h b/python/openvino/runtime/common/pipelines/include/pipelines/requests_pool.h new file mode 100644 index 0000000..d9b220e --- /dev/null +++ b/python/openvino/runtime/common/pipelines/include/pipelines/requests_pool.h @@ -0,0 +1,67 @@ +/* +// Copyright (C) 2020-2022 Intel Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +*/ + +#pragma once + +#include <stddef.h> + +#include <mutex> +#include <utility> +#include <vector> + +#include <openvino/openvino.hpp> + +/// This is class storing requests pool for asynchronous pipeline +/// +class RequestsPool { +public: + RequestsPool(ov::CompiledModel& compiledModel, unsigned int size); + ~RequestsPool(); + + /// Returns idle request from the pool. Returned request is automatically marked as In Use (this status will be + /// reset after request processing completion) This function is thread safe as long as request is used only until + /// setRequestIdle call + /// @returns pointer to request with idle state or nullptr if all requests are in use. + ov::InferRequest getIdleRequest(); + + /// Sets particular request to Idle state + /// This function is thread safe as long as request provided is not used after call to this function + /// @param request - request to be returned to idle state + void setRequestIdle(const ov::InferRequest& request); + + /// Returns number of requests in use. This function is thread safe. + /// @returns number of requests in use + size_t getInUseRequestsCount(); + + /// Returns number of requests in use. This function is thread safe. + /// @returns number of requests in use + bool isIdleRequestAvailable(); + + /// Waits for completion of every non-idle requests in pool. + /// getIdleRequest should not be called together with this function or after it to avoid race condition or invalid + /// state + /// @returns number of requests in use + void waitForTotalCompletion(); + + /// Returns list of all infer requests in the pool. + /// @returns list of all infer requests in the pool. + std::vector<ov::InferRequest> getInferRequestsList(); + +private: + std::vector<std::pair<ov::InferRequest, bool>> requests; + size_t numRequestsInUse; + std::mutex mtx; +}; |
