diff --git a/cmake/developer_package/packaging/common-libraries.cmake b/cmake/developer_package/packaging/common-libraries.cmake index f00995872335d8..00516a4b2c4f6d 100644 --- a/cmake/developer_package/packaging/common-libraries.cmake +++ b/cmake/developer_package/packaging/common-libraries.cmake @@ -91,8 +91,8 @@ macro(ov_define_component_include_rules) set(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL EXCLUDE_FROM_ALL) # python set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL) - set(OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - set(OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) # we don't pack artifacts of setup.py install, because it's called explicitly in conda / brew # or not used at all like in cases with conan / vcpkg set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) diff --git a/cmake/developer_package/packaging/debian/debian.cmake b/cmake/developer_package/packaging/debian/debian.cmake index ab317b5eaca9f2..f8ce29330d2fb8 100644 --- a/cmake/developer_package/packaging/debian/debian.cmake +++ b/cmake/developer_package/packaging/debian/debian.cmake @@ -97,10 +97,10 @@ macro(ov_define_component_include_rules) else() set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL) endif() - set(OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL}) - set(OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL}) # we don't pack python components itself, we pack artifacts of setup.py install set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL) + set(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) # we don't need wheels in Debian packages set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL) # tools diff --git a/cmake/developer_package/packaging/nsis.cmake b/cmake/developer_package/packaging/nsis.cmake index dc2c4be32bc3c4..1a89f39344016c 100644 --- a/cmake/developer_package/packaging/nsis.cmake +++ b/cmake/developer_package/packaging/nsis.cmake @@ -63,8 +63,8 @@ macro(ov_define_component_include_rules) unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL) # python unset(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL) - set(OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) - set(OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL) set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL) # tools diff --git a/cmake/developer_package/packaging/packaging.cmake b/cmake/developer_package/packaging/packaging.cmake index 28601404c43765..e1d9b60f6079de 100644 --- a/cmake/developer_package/packaging/packaging.cmake +++ b/cmake/developer_package/packaging/packaging.cmake @@ -155,8 +155,8 @@ macro(ov_define_component_names) set(OV_CPACK_COMP_PYTHON_SAMPLES "python_samples") # python set(OV_CPACK_COMP_PYTHON_OPENVINO "pyopenvino") - set(OV_CPACK_COMP_PYTHON_BENCHMARK_APP "benchmark_app") - set(OV_CPACK_COMP_PYTHON_OVC "ovc") + set(OV_CPACK_COMP_BENCHMARK_APP "benchmark_app") + set(OV_CPACK_COMP_OVC "ovc") set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE "pyopenvino_package") set(OV_CPACK_COMP_PYTHON_WHEELS "python_wheels") # tools @@ -184,8 +184,8 @@ macro(ov_define_component_include_rules) unset(OV_CPACK_COMP_PYTHON_SAMPLES_EXCLUDE_ALL) # python unset(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL) - unset(OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL) - unset(OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL) + unset(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL) + unset(OV_CPACK_COMP_OVC_EXCLUDE_ALL) set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL) unset(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL) # tools diff --git a/cmake/developer_package/packaging/rpm/rpm.cmake b/cmake/developer_package/packaging/rpm/rpm.cmake index 22d9c17f6445ee..f683104a93b8c0 100644 --- a/cmake/developer_package/packaging/rpm/rpm.cmake +++ b/cmake/developer_package/packaging/rpm/rpm.cmake @@ -92,10 +92,10 @@ macro(ov_define_component_include_rules) else() set(OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL EXCLUDE_FROM_ALL) endif() - set(OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL}) - set(OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_PACKAGE_EXCLUDE_ALL}) # we don't pack python components itself, we pack artifacts of setup.py install set(OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL EXCLUDE_FROM_ALL) + set(OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) + set(OV_CPACK_COMP_OVC_EXCLUDE_ALL ${OV_CPACK_COMP_PYTHON_OPENVINO_EXCLUDE_ALL}) # we don't need wheels in RPM packages set(OV_CPACK_COMP_PYTHON_WHEELS_EXCLUDE_ALL EXCLUDE_FROM_ALL) # tools diff --git a/cmake/templates/OpenVINOConfig.cmake.in b/cmake/templates/OpenVINOConfig.cmake.in index 5036fbf2bc6132..3bf3aad14dd598 100644 --- a/cmake/templates/OpenVINOConfig.cmake.in +++ b/cmake/templates/OpenVINOConfig.cmake.in @@ -419,16 +419,18 @@ macro(_ov_find_onnx_frontend_dependencies) endmacro() function(_ov_target_no_deprecation_error) - if(NOT MSVC) + if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") + # older macOS x86_64 does not support this linker option + if(CMAKE_CROSSCOMPILING AND NOT APPLE) + set_target_properties(${ARGV} PROPERTIES + INTERFACE_LINK_OPTIONS "-Wl,--allow-shlib-undefined") + endif() + if(CMAKE_CXX_COMPILER_ID STREQUAL "Intel") set(flags "-diag-warning=1786") else() set(flags "-Wno-error=deprecated-declarations") endif() - if(CMAKE_CROSSCOMPILING) - set_target_properties(${ARGV} PROPERTIES - INTERFACE_LINK_OPTIONS "-Wl,--allow-shlib-undefined") - endif() set_target_properties(${ARGV} PROPERTIES INTERFACE_COMPILE_OPTIONS ${flags}) endif() diff --git a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt index fe628c9c70ab61..a8816b290f317b 100644 --- a/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt +++ b/src/bindings/python/src/compatibility/openvino/inference_engine/CMakeLists.txt @@ -21,9 +21,13 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") # 'argument': conversion from 'size_t' to 'int', possible loss of data ie_add_compiler_flags(/wd4267) ie_add_compiler_flags(/wd4244) + ie_add_compiler_flags(/wd4551) endif() -if (OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) +if(OV_COMPILER_IS_CLANG OR CMAKE_COMPILER_IS_GNUCXX) ie_add_compiler_flags(-Wno-undef) + if(OV_COMPILER_IS_CLANG) + ie_add_compiler_flags(-Wno-parentheses-equality) + endif() endif() if(UNUSED_BUT_SET_VARIABLE_SUPPORTED) ie_add_compiler_flags(-Wno-unused-but-set-variable) diff --git a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt b/src/bindings/python/src/compatibility/openvino/requirements-dev.txt index 1b36134de0b285..aca50982d0dc53 100644 --- a/src/bindings/python/src/compatibility/openvino/requirements-dev.txt +++ b/src/bindings/python/src/compatibility/openvino/requirements-dev.txt @@ -1 +1 @@ -cython>=0.29.32,<=0.29.36 +cython>=0.29.32 diff --git a/src/bindings/python/src/openvino/runtime/__init__.py b/src/bindings/python/src/openvino/runtime/__init__.py index 9bfb5a64d6a05b..2755e0d59c4136 100644 --- a/src/bindings/python/src/openvino/runtime/__init__.py +++ b/src/bindings/python/src/openvino/runtime/__init__.py @@ -68,13 +68,6 @@ from openvino.runtime.ie_api import compile_model -# Model Conversion API -try: - from openvino.tools.ovc import convert_model, InputCutInfo, LayoutMap -except ImportError: - pass - - # Extend Node class to support binary operators Node.__add__ = opset12.add Node.__sub__ = opset12.subtract diff --git a/src/frontends/pytorch/src/op/quantized_add.cpp b/src/frontends/pytorch/src/op/quantized_add.cpp new file mode 100644 index 00000000000000..2c6541e5962b5f --- /dev/null +++ b/src/frontends/pytorch/src/op/quantized_add.cpp @@ -0,0 +1,46 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/relu.hpp" +#include "utils_quantize.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_quantized_add(const NodeContext& context) { + num_inputs_check(context, 4, 4); + const auto x = context.get_input(0); + const auto y = context.get_input(1); + const auto scale = context.get_input(2); + const auto zero_point = context.get_input(3); + + const auto quantized_add = context.mark_node(std::make_shared(x, y)); + + return {quantize(context, quantized_add, scale, zero_point, x)}; +} + +OutputVector translate_quantized_add_relu(const NodeContext& context) { + num_inputs_check(context, 4, 4); + const auto x = context.get_input(0); + const auto y = context.get_input(1); + const auto scale = context.get_input(2); + const auto zero_point = context.get_input(3); + + const auto quantized_add = context.mark_node(std::make_shared(x, y)); + const auto quantized_add_relu = context.mark_node(std::make_shared(quantized_add)); + + return {quantize(context, quantized_add_relu, scale, zero_point, x)}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/quantized_hardswish.cpp b/src/frontends/pytorch/src/op/quantized_hardswish.cpp new file mode 100644 index 00000000000000..3494ebe77a174e --- /dev/null +++ b/src/frontends/pytorch/src/op/quantized_hardswish.cpp @@ -0,0 +1,31 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/hswish.hpp" +#include "utils_quantize.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_quantized_hardswish(const NodeContext& context) { + num_inputs_check(context, 3, 3); + const auto x = context.get_input(0); + const auto scale = context.get_input(1); + const auto zero_point = context.get_input(2); + + const auto quantized_hardswish = context.mark_node(std::make_shared(x)); + + return {quantize(context, quantized_hardswish, scale, zero_point, x)}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/quantized_mul.cpp b/src/frontends/pytorch/src/op/quantized_mul.cpp new file mode 100644 index 00000000000000..81575a67c11937 --- /dev/null +++ b/src/frontends/pytorch/src/op/quantized_mul.cpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/multiply.hpp" +#include "utils_quantize.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +OutputVector translate_quantized_mul(const NodeContext& context) { + num_inputs_check(context, 4, 4); + const auto x = context.get_input(0); + const auto y = context.get_input(1); + const auto scale = context.get_input(2); + const auto zero_point = context.get_input(3); + + const auto quantized_mul = context.mark_node(std::make_shared(x, y)); + + return {quantize(context, quantized_mul, scale, zero_point, x)}; +} + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op/rand.cpp b/src/frontends/pytorch/src/op/rand.cpp new file mode 100644 index 00000000000000..9df9607c731d73 --- /dev/null +++ b/src/frontends/pytorch/src/op/rand.cpp @@ -0,0 +1,263 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include + +#include "openvino/frontend/pytorch/node_context.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/constant.hpp" +#include "openvino/op/convert.hpp" +#include "openvino/op/convert_like.hpp" +#include "openvino/op/cos.hpp" +#include "openvino/op/log.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/random_uniform.hpp" +#include "openvino/op/shape_of.hpp" +#include "openvino/op/sqrt.hpp" +#include "pt_framework_node.hpp" +#include "utils.hpp" + +namespace ov { +namespace frontend { +namespace pytorch { +namespace op { + +using namespace ov::op; + +namespace { +OutputVector make_random_normal(const NodeContext& context, Output sizes, element::Type target_type) { + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution distrib(0, 9999); + + const uint64_t global_seed = 0; + + const uint64_t seed_1 = distrib(gen); + const uint64_t seed_2 = distrib(gen); + + auto min_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {0})); + auto max_val = context.mark_node(v0::Constant::create(target_type, Shape{1}, {1})); + + auto uniform_1 = context.mark_node( + std::make_shared(sizes, min_val, max_val, target_type, global_seed, seed_1)); + auto uniform_2 = context.mark_node( + std::make_shared(sizes, min_val, max_val, target_type, global_seed, seed_2)); + + // Compute Box–Muller transform + // random_normal = scale * ng.sqrt(-2.0 * ng.log(uniform_1)) * ng.cos(2.0 * np.pi * uniform_2) + mean + auto pi = context.mark_node(v0::Constant::create(target_type, Shape{1}, {3.141592653589793})); + auto minus_two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {-2.0})); + auto two = context.mark_node(v0::Constant::create(target_type, Shape{1}, {2.0})); + + auto log = context.mark_node(std::make_shared(uniform_1)); + auto multiply_minus_two_log = context.mark_node(std::make_shared(log, minus_two)); + auto sqrt = context.mark_node(std::make_shared(multiply_minus_two_log)); + + auto multiply_two_pi = context.mark_node(std::make_shared(uniform_2, pi)); + auto multiply_two_pi_uniform_2 = context.mark_node(std::make_shared(multiply_two_pi, uniform_2)); + auto cos = context.mark_node(std::make_shared(multiply_two_pi_uniform_2)); + + auto scale_const = context.mark_node(v0::Constant::create(target_type, Shape{1}, {1})); + auto mean_const = context.mark_node(v0::Constant::create(target_type, Shape{1}, {0})); + auto sqrt_x_cos = context.mark_node(std::make_shared(sqrt, cos)); + auto product = context.mark_node(std::make_shared(scale_const, sqrt_x_cos)); + auto sum = context.mark_node(std::make_shared(product, mean_const)); + + return {sum}; +} +}; // namespace + +OutputVector translate_rand(const NodeContext& context) { + num_inputs_check(context, 2, 6); + auto sizes = context.get_input(0); + if (context.get_input_type(0).is()) { + sizes = concat_list_construct(sizes); + } + sizes = context.mark_node(std::make_shared(sizes, element::i32)); + auto low = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {0})); + auto high = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {1})); + auto dtype = element::f32; + size_t out_id = 1; + if (context.get_input_size() == 3) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); + out_id = 2; + } + // aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + // aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + if (context.get_input_size() == 2 || context.get_input_size() == 3) { + auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); + context.mutate_input(out_id, res); + return {res}; + } + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? + // pin_memory=None) -> Tensor + // aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, + // Device? device=None, bool? pin_memory=None) -> Tensor + bool dtype_applied = true; + Output convert_like_out; + size_t dtype_id = 1; + if (context.get_input_size() == 6) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::rand conversion with generator does not supported"); + dtype_id = 2; + } + if (!context.input_is_none(1)) { + if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + dtype = convert_dtype(context.const_input(1)); + low = context.mark_node(std::make_shared(low, dtype)); + high = context.mark_node(std::make_shared(high, dtype)); + } else if (const auto& fw_node = + cast_fw_node(context.get_input(static_cast(1)).get_node_shared_ptr(), "prim::dtype")) { + convert_like_out = fw_node->input_value(0); + dtype_applied = false; + + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + } + } + auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); + if (!dtype_applied) { + res = context.mark_node(std::make_shared(res, convert_like_out)); + } + return {res}; +}; + +OutputVector translate_rand_like(const NodeContext& context) { + // aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? + // pin_memory=None, MemoryFormat? memory_format=None) -> Tensor aten::rand_like.out(Tensor self, *, + // MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + num_inputs_check(context, 3, 6); + auto inp_tensor = context.get_input(0); + auto sizes = context.mark_node(std::make_shared(inp_tensor, element::i32)); + auto low = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {0})); + auto high = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {1})); + auto dtype = element::f32; + if (context.get_input_size() == 3) { + auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); + context.mutate_input(2, res); + return {res}; + } + bool dtype_applied = true; + Output convert_like_out; + if (!context.input_is_none(1)) { + if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + dtype = convert_dtype(context.const_input(1)); + low = context.mark_node(std::make_shared(low, dtype)); + high = context.mark_node(std::make_shared(high, dtype)); + } else if (const auto& fw_node = + cast_fw_node(context.get_input(static_cast(1)).get_node_shared_ptr(), "prim::dtype")) { + convert_like_out = fw_node->input_value(0); + dtype_applied = false; + + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + } + } + auto res = context.mark_node(std::make_shared(sizes, low, high, dtype)); + if (!dtype_applied) { + res = context.mark_node(std::make_shared(res, convert_like_out)); + } + return {res}; +}; + +OutputVector translate_randn(const NodeContext& context) { + num_inputs_check(context, 2, 6); + auto sizes = context.get_input(0); + if (context.get_input_type(0).is()) { + sizes = concat_list_construct(sizes); + } + sizes = context.mark_node(std::make_shared(sizes, element::i32)); + auto low = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {0})); + auto high = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {1})); + auto dtype = element::f32; + size_t out_id = 1; + if (context.get_input_size() == 3) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); + out_id = 2; + } + // aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!) + // aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + if (context.get_input_size() == 2 || context.get_input_size() == 3) { + auto res = make_random_normal(context, sizes, dtype); + context.mutate_input(out_id, res[0]); + return res; + } + size_t dtype_id = 1; + if (context.get_input_size() == 6) { + FRONT_END_OP_CONVERSION_CHECK(context.input_is_none(1), + "aten::randn conversion with generator does not supported"); + dtype_id = 2; + } + // aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? + // pin_memory=None) -> Tensor + // aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, + // Device? device=None, bool? pin_memory=None) -> Tensor + bool dtype_applied = true; + Output convert_like_out; + if (!context.input_is_none(dtype_id)) { + if (std::dynamic_pointer_cast( + context.get_input_from_visible_context(dtype_id).get_node_shared_ptr())) { + dtype = convert_dtype(context.const_input(dtype_id)); + low = context.mark_node(std::make_shared(low, dtype)); + high = context.mark_node(std::make_shared(low, dtype)); + } else if (const auto& fw_node = + cast_fw_node(context.get_input(static_cast(dtype_id)).get_node_shared_ptr(), + "prim::dtype")) { + convert_like_out = fw_node->input_value(0); + dtype_applied = false; + + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + } + } + auto res = make_random_normal(context, sizes, dtype); + if (!dtype_applied) { + res[0] = context.mark_node(std::make_shared(res[0], convert_like_out)); + } + return res; +}; + +OutputVector translate_randn_like(const NodeContext& context) { + // aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? + // pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + // aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + num_inputs_check(context, 3, 6); + auto inp_tensor = context.get_input(0); + auto sizes = context.mark_node(std::make_shared(inp_tensor, element::i32)); + auto low = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {0})); + auto high = context.mark_node(v0::Constant::create(element::f32, Shape{1}, {1})); + auto dtype = element::f32; + if (context.get_input_size() == 3) { + auto res = make_random_normal(context, sizes, dtype); + context.mutate_input(2, res[0]); + return res; + } + // aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? + // pin_memory=None) -> Tensor + bool dtype_applied = true; + Output convert_like_out; + if (!context.input_is_none(1)) { + if (std::dynamic_pointer_cast(context.get_input_from_visible_context(1).get_node_shared_ptr())) { + dtype = convert_dtype(context.const_input(1)); + } else if (const auto& fw_node = + cast_fw_node(context.get_input(static_cast(1)).get_node_shared_ptr(), "prim::dtype")) { + convert_like_out = fw_node->input_value(0); + dtype_applied = false; + + } else { + FRONT_END_OP_CONVERSION_CHECK(false, "Couldn't get dtype input"); + } + } + auto res = make_random_normal(context, sizes, dtype); + if (!dtype_applied) { + res[0] = context.mark_node(std::make_shared(res[0], convert_like_out)); + } + return res; +}; + +} // namespace op +} // namespace pytorch +} // namespace frontend +} // namespace ov diff --git a/src/frontends/pytorch/src/op_table.cpp b/src/frontends/pytorch/src/op_table.cpp index 862d5485452b3b..e1b5f77a15f5db 100644 --- a/src/frontends/pytorch/src/op_table.cpp +++ b/src/frontends/pytorch/src/op_table.cpp @@ -109,7 +109,15 @@ OP_CONVERTER(translate_pow); OP_CONVERTER(translate_pythonop); OP_CONVERTER(translate_quantize_per_channel); OP_CONVERTER(translate_quantize_per_tensor); +OP_CONVERTER(translate_quantized_add); +OP_CONVERTER(translate_quantized_add_relu); +OP_CONVERTER(translate_quantized_hardswish); +OP_CONVERTER(translate_quantized_mul); OP_CONVERTER(translate_range_length); +OP_CONVERTER(translate_rand); +OP_CONVERTER(translate_randn); +OP_CONVERTER(translate_rand_like); +OP_CONVERTER(translate_randn_like); OP_CONVERTER(translate_reciprocal); OP_CONVERTER(translate_relu6); OP_CONVERTER(translate_remainder); @@ -336,6 +344,10 @@ const std::map get_supported_ops() { {"aten::pow", op::translate_pow}, {"aten::quantize_per_channel", op::translate_quantize_per_channel}, {"aten::quantize_per_tensor", op::translate_quantize_per_tensor}, + {"aten::rand", op::translate_rand}, + {"aten::randn", op::translate_randn}, + {"aten::rand_like", op::translate_rand_like}, + {"aten::randn_like", op::translate_randn_like}, {"aten::reciprocal", op::translate_reciprocal}, {"aten::relu", op::translate_1to1_match_1_inputs}, {"aten::relu_", op::inplace_op>}, @@ -422,8 +434,12 @@ const std::map get_supported_ops() { {"prim::requires_grad", op::return_false_scalar}, {"prim::PythonOp", op::translate_pythonop}, {"prim::type", op::skip_node}, // Used with prim::device, pass PtFrameworkNode. + {"quantized::add", op::translate_quantized_add}, + {"quantized::add_relu", op::translate_quantized_add_relu}, {"quantized::conv2d", op::translate_quantized_convnd}, {"quantized::conv2d_relu", op::translate_quantized_convnd_relu}, + {"quantized::hardswish", op::translate_quantized_hardswish}, + {"quantized::mul", op::translate_quantized_mul}, {"quantized::linear", op::translate_quantized_linear}, {"torchvision::deform_conv2d", op::translate_deform_conv}, {"torchvision::nms", op::translate_nms}, diff --git a/src/inference/dev_api/openvino/runtime/make_tensor.hpp b/src/inference/dev_api/openvino/runtime/make_tensor.hpp index 64c7ccee965304..434e72b5da5fe3 100644 --- a/src/inference/dev_api/openvino/runtime/make_tensor.hpp +++ b/src/inference/dev_api/openvino/runtime/make_tensor.hpp @@ -71,7 +71,9 @@ ov::SoPtr make_tensor(const std::shared_ptr& ten const InferenceEngine::Blob* get_hardware_blob(const InferenceEngine::Blob* blob); InferenceEngine::Blob* get_hardware_blob(InferenceEngine::Blob* blob); -std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, bool unwrap = true); +std::shared_ptr tensor_to_blob(const ov::SoPtr& tensor, + bool unwrap = true, + InferenceEngine::TensorDesc desc = {}); /** @endcond */ IE_SUPPRESS_DEPRECATED_END diff --git a/src/inference/src/dev/converter_utils.cpp b/src/inference/src/dev/converter_utils.cpp index 405225e463034d..a733ef7c13d5f2 100644 --- a/src/inference/src/dev/converter_utils.cpp +++ b/src/inference/src/dev/converter_utils.cpp @@ -544,14 +544,28 @@ class IInferRequestInternalWrapper : public InferenceEngine::IInferRequestIntern } InferenceEngine::Blob::Ptr GetBlob(const std::string& name) override { - return tensor_to_blob(m_request->get_tensor(find_port(name))); + auto port = find_port(name); + auto& rt_info = port.get_rt_info(); + auto it = rt_info.find("ie_legacy_td"); + InferenceEngine::TensorDesc desc; + if (it != rt_info.end()) { + desc = it->second.as(); + } + return tensor_to_blob(m_request->get_tensor(port), true, desc); } InferenceEngine::BatchedBlob::Ptr GetBlobs(const std::string& name) override { - auto tensors = m_request->get_tensors(find_port(name)); + auto port = find_port(name); + auto& rt_info = port.get_rt_info(); + auto it = rt_info.find("ie_legacy_td"); + InferenceEngine::TensorDesc desc; + if (it != rt_info.end()) { + desc = it->second.as(); + } + auto tensors = m_request->get_tensors(port); std::vector blobs; for (const auto& tensor : tensors) { - blobs.emplace_back(tensor_to_blob(tensor)); + blobs.emplace_back(tensor_to_blob(tensor, true, desc)); } return std::make_shared(blobs); } diff --git a/src/inference/src/dev/make_tensor.cpp b/src/inference/src/dev/make_tensor.cpp index 137f1412e08fde..2a7b28f64b79c7 100644 --- a/src/inference/src/dev/make_tensor.cpp +++ b/src/inference/src/dev/make_tensor.cpp @@ -441,11 +441,7 @@ class RemoteBlobTensor : public IRemoteTensor { */ class TensorRemoteBlob : public ie::RemoteBlob { public: - TensorRemoteBlob(const ov::SoPtr& tensor) - : ie::RemoteBlob{ie::TensorDesc{ie::details::convertPrecision(tensor->get_element_type()), - tensor->get_shape(), - ie::TensorDesc::getLayoutByRank(tensor->get_shape().size())}}, - tensor{tensor} { + TensorRemoteBlob(const ov::SoPtr& tensor, ie::TensorDesc desc) : ie::RemoteBlob{desc}, tensor{tensor} { OPENVINO_ASSERT(this->tensor); } std::shared_ptr cast_tensor() const { @@ -508,38 +504,8 @@ template class TensorMemoryBlob : public ie::TBlob { public: ~TensorMemoryBlob() override = default; - explicit TensorMemoryBlob(const ov::SoPtr& tensor_) try : ie - ::TBlob{[&] { - auto element_type = tensor_->get_element_type(); - auto shape = tensor_->get_shape(); - ie::SizeVector blk_order(shape.size()); - std::iota(blk_order.begin(), blk_order.end(), 0); - ie::SizeVector dim_offset(shape.size(), 0); - ie::SizeVector blk_strides; - auto byte_strides = element_type.bitwidth() >= 8 ? tensor_->get_strides() : Strides{}; - if (byte_strides.empty()) { - blk_strides = ov::row_major_strides(shape); - } else { - blk_strides.resize(byte_strides.size()); - std::transform(byte_strides.begin(), - byte_strides.end(), - blk_strides.begin(), - [&element_type](size_t byte_stride) { - OPENVINO_ASSERT(byte_stride % element_type.size() == 0, - "Limitation: Stride in bytes ", - byte_stride, - " should be divisible by size of element ", - element_type.size()); - return byte_stride / element_type.size(); - }); - } - return ie::TensorDesc{ie::details::convertPrecision(element_type), - shape, - ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; - }(), - static_cast(tensor_->data()), - tensor_->get_byte_size()}, - tensor{tensor_} { + explicit TensorMemoryBlob(const ov::SoPtr& tensor_, ie::TensorDesc desc) try : ie + ::TBlob{desc, static_cast(tensor_->data()), tensor_->get_byte_size()}, tensor{tensor_} { OPENVINO_ASSERT(!std::dynamic_pointer_cast(tensor._ptr)); } catch (const std::exception& ex) { @@ -620,7 +586,40 @@ const ie::Blob* get_hardware_blob(const ie::Blob* blob) { return blob; } -ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap) { +ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap, InferenceEngine::TensorDesc desc) { + auto create_desc = [](const ov::SoPtr& tensor, + const InferenceEngine::TensorDesc& desc) -> InferenceEngine::TensorDesc { + if (desc.getLayout() != InferenceEngine::ANY || + desc.getPrecision() != InferenceEngine::Precision::UNSPECIFIED) { + return desc; + } + auto element_type = tensor->get_element_type(); + auto shape = tensor->get_shape(); + ie::SizeVector blk_order(shape.size()); + std::iota(blk_order.begin(), blk_order.end(), 0); + ie::SizeVector dim_offset(shape.size(), 0); + ie::SizeVector blk_strides; + auto byte_strides = element_type.bitwidth() >= 8 ? tensor->get_strides() : Strides{}; + if (byte_strides.empty()) { + blk_strides = ov::row_major_strides(shape); + } else { + blk_strides.resize(byte_strides.size()); + std::transform(byte_strides.begin(), + byte_strides.end(), + blk_strides.begin(), + [&element_type](size_t byte_stride) { + OPENVINO_ASSERT(byte_stride % element_type.size() == 0, + "Limitation: Stride in bytes ", + byte_stride, + " should be divisible by size of element ", + element_type.size()); + return byte_stride / element_type.size(); + }); + } + return ie::TensorDesc{ie::details::convertPrecision(element_type), + shape, + ie::BlockingDesc{shape, blk_order, 0, dim_offset, blk_strides}}; + }; #ifdef PROXY_PLUGIN_ENABLED const auto& tensor = unwrap ? ov::proxy::get_hardware_tensor(orig_tensor) : orig_tensor; #else @@ -633,11 +632,11 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap) } else if (auto blob_tensor = std::dynamic_pointer_cast(tensor._ptr)) { return blob_tensor->blob; } else if (std::dynamic_pointer_cast(tensor._ptr)) { - return std::make_shared(tensor); + return std::make_shared(tensor, create_desc(tensor, desc)); } else { #define CASE(precision, T) \ case element::precision: \ - return std::make_shared>(tensor); + return std::make_shared>(tensor, create_desc(tensor, desc)); switch (tensor->get_element_type()) { CASE(f32, float); CASE(f64, double); @@ -654,16 +653,16 @@ ie::Blob::Ptr tensor_to_blob(const ov::SoPtr& orig_tensor, bool unwrap) CASE(u1, int8_t); CASE(boolean, bool); case element::f16: - return std::make_shared>(tensor); + return std::make_shared>(tensor, create_desc(tensor, desc)); case element::bf16: - return std::make_shared>(tensor); + return std::make_shared>(tensor, create_desc(tensor, desc)); default: OPENVINO_THROW("Unsupported element type"); } #undef CASE } OPENVINO_THROW("Cannot convert tensor to blob!"); -} +} // namespace ov namespace util { diff --git a/src/inference/src/dev/preprocessing/preprocessing.cpp b/src/inference/src/dev/preprocessing/preprocessing.cpp index 7d062221ee98ee..c2447ed23421d1 100644 --- a/src/inference/src/dev/preprocessing/preprocessing.cpp +++ b/src/inference/src/dev/preprocessing/preprocessing.cpp @@ -14,6 +14,7 @@ #include "openvino/op/constant.hpp" #include "openvino/pass/graph_rewrite.hpp" #include "openvino/pass/manager.hpp" +#include "transformations/utils/utils.hpp" bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& model) { RUN_ON_MODEL_SCOPE(AddPreprocessing); @@ -111,8 +112,10 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& if (const_input.get_partial_shape().is_static() && const_input.get_shape().size() == 4) preproc.input(i).model().set_layout("NCHW"); } - for (size_t i = 0; i < model->outputs().size(); i++) { + std::vector legacy_names(model->get_output_size()); + for (size_t i = 0; i < model->get_output_size(); i++) { ov::Output const_output(model->output(i).get_node(), model->output(i).get_index()); + legacy_names[i] = ov::op::util::create_ie_output_name(const_output.get_node()->input_value(0)); InferenceEngine::DataPtr output_info; // I don't remove rt info to have information in InputsInfo about pre-processing in legacy // ExecutableNetwork @@ -122,6 +125,15 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& if (element_type != model->output(i).get_element_type()) { preproc.output(i).tensor().set_element_type(element_type); } + if (output_info->getLayout() != InferenceEngine::Layout::BLOCKED && + output_info->getLayout() != InferenceEngine::Layout::SCALAR) { + std::stringstream stream; + stream << output_info->getLayout(); + preproc.output(i).tensor().set_layout(ov::Layout{stream.str()}); + } + + if (const_output.get_partial_shape().is_static() && const_output.get_shape().size() == 4) + preproc.output(i).model().set_layout("NCHW"); } ov::pass::Manager manager(get_pass_config()); @@ -133,5 +145,10 @@ bool ov::pass::AddPreprocessing::run_on_model(const std::shared_ptr& preproc.build(); + for (size_t i = 0; i < model->get_output_size(); i++) { + ov::descriptor::set_ov_tensor_legacy_name(model->output(i).get_node()->input_value(0).get_tensor(), + legacy_names[i]); + } + return false; } diff --git a/src/plugins/intel_cpu/src/config.h b/src/plugins/intel_cpu/src/config.h index 4be16563c8991c..65237c52e20138 100644 --- a/src/plugins/intel_cpu/src/config.h +++ b/src/plugins/intel_cpu/src/config.h @@ -90,6 +90,8 @@ struct Config { bool isLegacyApi = false; + int modelPreferThreads = -1; + #ifdef CPU_DEBUG_CAPS DebugCapsConfig debugCaps; void applyDebugCapsProperties(); diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp index 5c958e3eb76b2c..ff074c03d58b43 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.cpp @@ -353,25 +353,12 @@ std::vector> get_streams_info_table(const int input_streams, } int get_model_prefer_threads(const int num_streams, - const Config::LatencyThreadingMode latency_threading_mode, const std::vector> proc_type_table, const std::shared_ptr& ngraphFunc, - const ov::threading::IStreamsExecutor::Config streamExecutorConfig) { - const int sockets = get_default_latency_streams(latency_threading_mode); + Config& config) { + const int sockets = get_default_latency_streams(config.latencyThreadingMode); auto model_prefer = 0; - // latency - if (num_streams <= sockets && num_streams > 0) { - if (proc_type_table[0][EFFICIENT_CORE_PROC] > 0 && proc_type_table[0][MAIN_CORE_PROC] > 0) { - bool fp_intesive = !ov::op::util::has_op_with_type(ngraphFunc); - const int int8_threshold = 4; // ~relative efficiency of the VNNI-intensive code for Big vs Little cores; - const int fp32_threshold = 2; // ~relative efficiency of the AVX2 fp32 code for Big vs Little cores; - // by default the latency case uses (faster) Big cores only, depending on the compute ratio - model_prefer = proc_type_table[0][MAIN_CORE_PROC] > (proc_type_table[0][EFFICIENT_CORE_PROC] / - (fp_intesive ? fp32_threshold : int8_threshold)) - ? proc_type_table[0][MAIN_CORE_PROC] - : proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC]; - } - } else { // throughput + if (-1 == config.modelPreferThreads) { const auto isa = dnnl::get_effective_cpu_isa(); float isaSpecificThreshold = 1.0f; switch (isa) { @@ -397,23 +384,39 @@ int get_model_prefer_threads(const int num_streams, const float L2_cache_size = dnnl::utils::get_cache_size(2 /*level*/, true /*per core */); ov::MemBandwidthPressure networkToleranceForLowCache = ov::MemBandwidthPressureTolerance(ngraphFunc, L2_cache_size, memThresholdAssumeLimitedForISA); - model_prefer = ov::threading::IStreamsExecutor::Config::StreamMode::DEFAULT; + config.modelPreferThreads = ov::threading::IStreamsExecutor::Config::StreamMode::DEFAULT; if (networkToleranceForLowCache.max_mem_tolerance == ov::MemBandwidthPressure::UNKNOWN) { if ((networkToleranceForLowCache.ratio_compute_convs == ov::MemBandwidthPressure::ALL) || (networkToleranceForLowCache.ratio_compute_deconvs == ov::MemBandwidthPressure::ALL)) { // all relevant layers (convs, etc) are compute-limited, the most aggressive val for #streams - model_prefer = 1; + config.modelPreferThreads = 1; } // otherwise (no recognized layers) falling back to the default value } else if (networkToleranceForLowCache.max_mem_tolerance > memThresholdAssumeLimitedForISA) { // network is below the ISA-specific threshold - model_prefer = 1; + config.modelPreferThreads = 1; } else if (networkToleranceForLowCache.max_mem_tolerance > ov::MemBandwidthPressure::LIMITED) { // network is below general threshold - model_prefer = 2; + config.modelPreferThreads = 2; + } + if (config.modelPreferThreads == 1 && proc_type_table[0][EFFICIENT_CORE_PROC] == 0 && sockets == 1) { + config.modelPreferThreads = 2; } - if (model_prefer == 1 && proc_type_table[0][EFFICIENT_CORE_PROC] == 0 && sockets == 1) { - model_prefer = 2; + } + + // latency + if (num_streams <= sockets && num_streams > 0) { + if (proc_type_table[0][EFFICIENT_CORE_PROC] > 0 && proc_type_table[0][MAIN_CORE_PROC] > 0) { + bool fp_intesive = !ov::op::util::has_op_with_type(ngraphFunc); + const int int8_threshold = 4; // ~relative efficiency of the VNNI-intensive code for Big vs Little cores; + const int fp32_threshold = 2; // ~relative efficiency of the AVX2 fp32 code for Big vs Little cores; + // by default the latency case uses (faster) Big cores only, depending on the compute ratio + model_prefer = proc_type_table[0][MAIN_CORE_PROC] > (proc_type_table[0][EFFICIENT_CORE_PROC] / + (fp_intesive ? fp32_threshold : int8_threshold)) + ? proc_type_table[0][MAIN_CORE_PROC] + : proc_type_table[0][MAIN_CORE_PROC] + proc_type_table[0][EFFICIENT_CORE_PROC]; } + } else { // throughput + model_prefer = config.modelPreferThreads; } return model_prefer; @@ -439,11 +442,7 @@ std::vector> generate_stream_info(const int streams, config.latencyThreadingMode, proc_type_table); if (-1 == preferred_nthreads_per_stream) { - model_prefer_threads = get_model_prefer_threads(streams, - config.latencyThreadingMode, - proc_type_table, - ngraphFunc, - executor_config); + model_prefer_threads = get_model_prefer_threads(streams, proc_type_table, ngraphFunc, config); } executor_config._streams_info_table = get_streams_info_table(executor_config._streams, diff --git a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp index 4c425bb157066a..db8baba4a18874 100644 --- a/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp +++ b/src/plugins/intel_cpu/src/cpu_streams_calculation.hpp @@ -56,18 +56,16 @@ std::vector> get_streams_info_table(const int input_streams, * @param[in] num_streams is target streams set by user via NUM_STREAMS or hints. * - input "0" mean function generate the optimal number of streams * - LATENCY hint equals 1 stream. - * @param[in] latency_threading_mode is the scope of candidate processors per stream for latency hint - * - user can select all processors per numa node, per socket, or per platform. * @param[in] proc_type_table candidate processors available at this time * - candidate processors have benn updated based on properties like "Ecore only" in previous function * @param[in] ngraphFunc ngraph function + * @param[in] config intel cpu configuration * @return model_prefer_threads "0" means generating the optimal threads per stream based on platform */ int get_model_prefer_threads(const int num_streams, - const Config::LatencyThreadingMode latency_threading_mode, const std::vector> proc_type_table, const std::shared_ptr& ngraphFunc, - const ov::threading::IStreamsExecutor::Config streamExecutorConfig); + Config& config); /** * @brief Generate streams information according to processors type table diff --git a/src/plugins/intel_cpu/src/plugin.cpp b/src/plugins/intel_cpu/src/plugin.cpp index f303ca3eeeef1e..7bff483554ef44 100644 --- a/src/plugins/intel_cpu/src/plugin.cpp +++ b/src/plugins/intel_cpu/src/plugin.cpp @@ -283,11 +283,9 @@ void Engine::ApplyPerformanceHints(std::map &config, c void Engine::GetPerformanceStreams(Config& config, const std::shared_ptr& ngraphFunc) { const auto perf_hint_name = config.perfHintsConfig.ovPerfHint; - // save hints parameters to model rt_info - ov::AnyMap hints_props; - std::string hint_name; const int latency_streams = get_default_latency_streams(config.latencyThreadingMode); int streams; + if (config.streamExecutorConfig._streams_changed) { streams = config.streamExecutorConfig._streams; } else if (perf_hint_name == CONFIG_VALUE(LATENCY)) { @@ -298,22 +296,56 @@ void Engine::GetPerformanceStreams(Config& config, const std::shared_ptrset_rt_info(hints_props, "intel_cpu_hints_config"); config._config[CONFIG_KEY(CPU_THROUGHPUT_STREAMS)] = std::to_string(config.streamExecutorConfig._streams); } +void Engine::CalculateStreams(Config& conf, const std::shared_ptr& function, bool imported) { + // import config props from caching model + if (imported && !is_cpu_map_available()) { + if (function->has_rt_info("intel_cpu_hints_config") && !conf.perfHintsConfig.ovPerfHint.empty()) { + const auto mode_name = conf.perfHintsConfig.ovPerfHint; + if (mode_name == CONFIG_VALUE(LATENCY) || mode_name == CONFIG_VALUE(THROUGHPUT)) { + const auto& hints_config = function->get_rt_info("intel_cpu_hints_config"); + const auto hints_param_name = mode_name + "_" + std::string(ov::num_streams.name()); + const auto it = hints_config.find(hints_param_name); + if (it != hints_config.end()) { + conf.readProperties({{std::string(ov::num_streams.name()), it->second.as()}}); + } else { + IE_THROW() << "Cache file doesn't contain precalculated number of streams for mode " << mode_name; + } + } + } + } + + if (is_cpu_map_available()) { + const auto model_prefer_name = std::string("MODEL_PREFER_THREADS"); + if (imported && function->has_rt_info("intel_cpu_hints_config")) { + // load model_prefer_threads from cache + int cache_model_prefer; + const auto& hints_config = function->get_rt_info("intel_cpu_hints_config"); + const auto it_model_prefer = hints_config.find(model_prefer_name); + if (it_model_prefer != hints_config.end()) { + try { + cache_model_prefer = it_model_prefer->second.as(); + } catch (const std::exception&) { + OPENVINO_THROW("Cache file doesn't have valid value for " + model_prefer_name); + } + + conf.modelPreferThreads = cache_model_prefer; + } + } + GetPerformanceStreams(conf, function); + // save model_prefer_threads to model rt_info when loading network + if (!imported) { + ov::AnyMap hints_props; + hints_props.insert({model_prefer_name, std::to_string(conf.modelPreferThreads)}); + function->set_rt_info(hints_props, "intel_cpu_hints_config"); + } + } +} + StreamCfg Engine::GetNumStreams(InferenceEngine::IStreamsExecutor::ThreadBindingType thread_binding_type, int stream_mode, const bool enable_hyper_thread) const { @@ -493,9 +525,7 @@ Engine::LoadExeNetworkImpl(const InferenceEngine::CNNNetwork &network, const std Config conf = engConfig; conf.readProperties(config); - if (is_cpu_map_available()) { - GetPerformanceStreams(conf, nGraphFunc); - } + CalculateStreams(conf, nGraphFunc); // SSE runtime check is needed for some ATOM machine, which is x86-64 but w/o SSE static Xbyak::util::Cpu cpu; @@ -786,25 +816,9 @@ InferenceEngine::IExecutableNetworkInternal::Ptr Engine::ImportNetwork(std::istr Config conf = engConfig; conf.readProperties(config); - // import config props from caching model auto function = cnnnetwork.getFunction(); - if (function->has_rt_info("intel_cpu_hints_config") && !conf.perfHintsConfig.ovPerfHint.empty()) { - const auto mode_name = conf.perfHintsConfig.ovPerfHint; - if (mode_name == CONFIG_VALUE(LATENCY) || mode_name == CONFIG_VALUE(THROUGHPUT)) { - const auto& hints_config = function->get_rt_info("intel_cpu_hints_config"); - const auto hints_param_name = mode_name + "_" + std::string(ov::num_streams.name()); - const auto it = hints_config.find(hints_param_name); - if (it != hints_config.end()) { - conf.readProperties({{std::string(ov::num_streams.name()), it->second.as()}}); - } else { - IE_THROW() << "Cache file doesn't contain precalculated number of streams for mode " << mode_name; - } - } - } - if (is_cpu_map_available()) { - get_num_streams(conf.streamExecutorConfig._streams, function, conf); - } + CalculateStreams(conf, function, true); auto execNetwork = std::make_shared(cnnnetwork, conf, extensionManager, shared_from_this()); diff --git a/src/plugins/intel_cpu/src/plugin.h b/src/plugins/intel_cpu/src/plugin.h index 5f55640cf67bed..218fb1c73d040c 100644 --- a/src/plugins/intel_cpu/src/plugin.h +++ b/src/plugins/intel_cpu/src/plugin.h @@ -49,6 +49,8 @@ class Engine : public InferenceEngine::IInferencePlugin { void GetPerformanceStreams(Config &config, const std::shared_ptr& ngraphFunc); + void CalculateStreams(Config& conf, const std::shared_ptr& ngraphFunc, bool imported = false); + StreamCfg GetNumStreams(InferenceEngine::IStreamsExecutor::ThreadBindingType thread_binding_type, int stream_mode, const bool enable_hyper_thread = true) const; diff --git a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp index 335eb73ab307ea..92946bc1a0d804 100644 --- a/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp +++ b/src/plugins/intel_cpu/tests/functional/behavior/export_import.cpp @@ -15,7 +15,9 @@ namespace { -class ExportOptimalNumStreams : public ::testing::TestWithParam {}; +using PropertiesParams = std::tuple>; + +class ExportOptimalNumStreams : public ::testing::TestWithParam {}; std::shared_ptr MakeMatMulModel() { const ov::Shape input_shape = {1, 4096}; @@ -35,34 +37,102 @@ std::shared_ptr MakeMatMulModel() { TEST_P(ExportOptimalNumStreams, OptimalNumStreams) { auto original_model = MakeMatMulModel(); - std::string deviceName = GetParam(); ov::Core core; - auto tput_mode = ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT); - auto latency_mode = ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY); - - auto original_tp_network = core.compile_model(original_model, deviceName, tput_mode); - auto original_latency_network = core.compile_model(original_model, deviceName, latency_mode); - - auto nstreams_tp_original = original_tp_network.get_property(ov::num_streams.name()).as(); - auto nstreams_latency_original = original_latency_network.get_property(ov::num_streams.name()).as(); - - std::stringstream exported_stream; - original_tp_network.export_model(exported_stream); + std::string device_name; + std::vector properties; + std::tie(device_name, properties) = GetParam(); + auto original_properties_input = properties[0]; + auto new_properties_input = properties[1]; + + auto GetProperties = [&](ov::CompiledModel& network) { + std::vector properties; + properties.push_back(network.get_property(ov::hint::performance_mode.name()).as()); + properties.push_back(network.get_property(ov::num_streams.name()).as()); + properties.push_back(network.get_property(ov::hint::scheduling_core_type.name()).as()); + properties.push_back(network.get_property(ov::hint::enable_hyper_threading.name()).as()); + properties.push_back(network.get_property(ov::hint::enable_cpu_pinning.name()).as()); + properties.push_back(network.get_property(ov::inference_num_threads.name()).as()); + properties.push_back(network.get_property(ov::hint::num_requests.name()).as()); + return properties; + }; + + auto original_network = core.compile_model(original_model, device_name, original_properties_input); + auto original_properties_output = GetProperties(original_network); + + auto new_network = core.compile_model(original_model, device_name, new_properties_input); + auto new_properties_output = GetProperties(new_network); + + std::stringstream exported_model; + original_network.export_model(exported_model); + + // import_model with original config can create the same multi_thread setting as compile_model { - std::stringstream ss(exported_stream.str()); - auto imported_tp_network = core.import_model(ss, deviceName, tput_mode); - auto nstreams_tp_imported = imported_tp_network.get_property(ov::num_streams.name()).as(); - EXPECT_EQ(nstreams_tp_original, nstreams_tp_imported); + std::stringstream ss(exported_model.str()); + auto imported_network = core.import_model(ss, device_name, original_properties_input); + auto imported_properties_output = GetProperties(imported_network); + + EXPECT_EQ(original_properties_output[0], imported_properties_output[0]); + EXPECT_EQ(original_properties_output[1], imported_properties_output[1]); + EXPECT_EQ(original_properties_output[2], imported_properties_output[2]); + EXPECT_EQ(original_properties_output[3], imported_properties_output[3]); + EXPECT_EQ(original_properties_output[4], imported_properties_output[4]); + EXPECT_EQ(original_properties_output[5], imported_properties_output[5]); + EXPECT_EQ(original_properties_output[6], imported_properties_output[6]); } + // import_model with new properties can create the same multi_thread setting as compile_model with new properties { - std::stringstream ss(exported_stream.str()); - auto imported_latency_network = core.import_model(ss, deviceName, latency_mode); - auto nstreams_latency_imported = imported_latency_network.get_property(ov::num_streams.name()).as(); - EXPECT_EQ(nstreams_latency_original, nstreams_latency_imported); + std::stringstream ss(exported_model.str()); + auto imported_network = core.import_model(ss, device_name, new_properties_input); + auto imported_properties_output = GetProperties(imported_network); + + EXPECT_EQ(new_properties_output[0], imported_properties_output[0]); + EXPECT_EQ(new_properties_output[1], imported_properties_output[1]); + EXPECT_EQ(new_properties_output[2], imported_properties_output[2]); + EXPECT_EQ(new_properties_output[3], imported_properties_output[3]); + EXPECT_EQ(new_properties_output[4], imported_properties_output[4]); + EXPECT_EQ(new_properties_output[5], imported_properties_output[5]); + EXPECT_EQ(new_properties_output[6], imported_properties_output[6]); } } -INSTANTIATE_TEST_CASE_P(smoke_ExportImportTest, ExportOptimalNumStreams, ::testing::Values(std::string("CPU"))); +const std::vector testing_property_for_streams = {{ov::num_streams(1)}, {ov::num_streams(2)}}; + +const std::vector testing_property_for_threads = {{ov::inference_num_threads(1)}, + {ov::inference_num_threads(4)}}; + +const std::vector testing_property_for_performance_mode = { + {ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT)}, + {ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY)}}; + +const std::vector testing_property_for_scheduling_core_type_1 = { + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ANY_CORE)}, + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::PCORE_ONLY)}}; + +const std::vector testing_property_for_scheduling_core_type_2 = { + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::PCORE_ONLY)}, + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ECORE_ONLY)}}; + +const std::vector testing_property_for_scheduling_core_type_3 = { + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ANY_CORE)}, + {ov::hint::scheduling_core_type(ov::hint::SchedulingCoreType::ECORE_ONLY)}}; + +const std::vector testing_property_for_enable_hyper_threading = {{ov::hint::enable_hyper_threading(true)}, + {ov::hint::enable_hyper_threading(false)}}; + +const std::vector testing_property_for_enable_cpu_pinning = {{ov::hint::enable_cpu_pinning(true)}, + {ov::hint::enable_cpu_pinning(false)}}; + +INSTANTIATE_TEST_CASE_P(smoke_ExportImportTest, + ExportOptimalNumStreams, + ::testing::Combine(::testing::Values(std::string("CPU")), + ::testing::Values(testing_property_for_streams, + testing_property_for_threads, + testing_property_for_performance_mode, + testing_property_for_scheduling_core_type_1, + testing_property_for_scheduling_core_type_2, + testing_property_for_scheduling_core_type_3, + testing_property_for_enable_hyper_threading, + testing_property_for_enable_cpu_pinning))); } // namespace diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index 7785a006bb9caa..13a474da81bf8d 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -224,6 +224,7 @@ REGISTER_FACTORY(v7, Roll); REGISTER_FACTORY(v8, Slice); REGISTER_FACTORY(v8, Gather); REGISTER_FACTORY(v8, GatherND); +REGISTER_FACTORY(v8, DetectionOutput); REGISTER_FACTORY(v8, DeformableConvolution); REGISTER_FACTORY(v8, NV12toRGB); REGISTER_FACTORY(v8, NV12toBGR); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/detection_output.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/detection_output.hpp index 85f22401b2ff5f..743710642841cc 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/detection_output.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/detection_output.hpp @@ -39,15 +39,14 @@ struct detection_output : public primitive_base { input_height(-1), decrease_label_id(false), clip_before_nms(false), - clip_after_nms(false) {} + clip_after_nms(false), + objectness_score(0.0f) {} DECLARE_OBJECT_TYPE_SERIALIZATION /// @brief Constructs detection output primitive. /// @param id This primitive id. - /// @param input_location Input location primitive id. - /// @param input_confidence Input confidence primitive id. - /// @param input_prior_box Input prior-box primitive id. + /// @param inputs Inputs for primitive id. /// @param num_classes Number of classes to be predicted. /// @param keep_top_k Number of total bounding boxes to be kept per image after NMS step. /// @param share_location If true bounding box are shared among different classes. @@ -59,10 +58,8 @@ struct detection_output : public primitive_base { /// @param variance_encoded_in_target If true, variance is encoded in target; otherwise we need to adjust the predicted offset accordingly. /// @param confidence_threshold Only keep detections with confidences larger than this threshold. detection_output(const primitive_id& id, - const input_info& input_location, - const input_info& input_confidence, - const input_info& input_prior_box, - const uint32_t num_classes, + const std::vector& inputs, + const int32_t num_classes, const uint32_t keep_top_k, const bool share_location = true, const int background_label_id = 0, @@ -80,8 +77,9 @@ struct detection_output : public primitive_base { const bool decrease_label_id = false, const bool clip_before_nms = false, const bool clip_after_nms = false, + const float objectness_score = 0.0f, const padding& output_padding = padding()) - : primitive_base(id, {input_location, input_confidence, input_prior_box}, {output_padding}), + : primitive_base(id, inputs, {output_padding}), num_classes(num_classes), keep_top_k(keep_top_k), share_location(share_location), @@ -99,14 +97,15 @@ struct detection_output : public primitive_base { input_height(input_height), decrease_label_id(decrease_label_id), clip_before_nms(clip_before_nms), - clip_after_nms(clip_after_nms) { + clip_after_nms(clip_after_nms), + objectness_score(objectness_score) { if (decrease_label_id && background_label_id != 0) throw std::invalid_argument( "Cannot use decrease_label_id and background_label_id parameter simultaneously."); } /// @brief Number of classes to be predicted. - const uint32_t num_classes; + const int num_classes; /// @brief Number of total bounding boxes to be kept per image after NMS step. const int keep_top_k; /// @brief If true, bounding box are shared among different classes. @@ -141,6 +140,8 @@ struct detection_output : public primitive_base { const bool clip_before_nms; /// @brief Clip decoded boxes after nms step const bool clip_after_nms; + /// @brief Threshold to sort out condifence predictions + const float objectness_score; size_t hash() const override { size_t seed = primitive::hash(); @@ -162,6 +163,7 @@ struct detection_output : public primitive_base { seed = hash_combine(seed, decrease_label_id); seed = hash_combine(seed, clip_before_nms); seed = hash_combine(seed, clip_after_nms); + seed = hash_combine(seed, objectness_score); return seed; } @@ -189,7 +191,8 @@ struct detection_output : public primitive_base { cmp_fields(input_height) && cmp_fields(decrease_label_id) && cmp_fields(clip_before_nms) && - cmp_fields(clip_after_nms); + cmp_fields(clip_after_nms) && + cmp_fields(objectness_score); #undef cmp_fields } @@ -213,11 +216,12 @@ struct detection_output : public primitive_base { ob << decrease_label_id; ob << clip_before_nms; ob << clip_after_nms; + ob << objectness_score; } void load(BinaryInputBuffer& ib) override { primitive_base::load(ib); - ib >> *const_cast(&num_classes); + ib >> *const_cast(&num_classes); ib >> *const_cast(&keep_top_k); ib >> *const_cast(&share_location); ib >> *const_cast(&background_label_id); @@ -235,6 +239,7 @@ struct detection_output : public primitive_base { ib >> *const_cast(&decrease_label_id); ib >> *const_cast(&clip_before_nms); ib >> *const_cast(&clip_after_nms); + ib >> *const_cast(&objectness_score); } protected: diff --git a/src/plugins/intel_gpu/src/graph/detection_output.cpp b/src/plugins/intel_gpu/src/graph/detection_output.cpp index 09c876d43b7f22..e1d4b3b384e0c5 100644 --- a/src/plugins/intel_gpu/src/graph/detection_output.cpp +++ b/src/plugins/intel_gpu/src/graph/detection_output.cpp @@ -4,6 +4,7 @@ #include "detection_output_inst.h" #include "primitive_type_base.h" +#include "detection_output_shape_inference.hpp" #include "intel_gpu/graph/serialization/string_serializer.hpp" #include "intel_gpu/runtime/error_handler.hpp" #include "json_object.h" @@ -55,6 +56,58 @@ layout detection_output_inst::calc_output_layout(detection_output_node const& no cldnn::tensor(1, 1, DETECTION_OUTPUT_ROW_SIZE, desc->keep_top_k * input_layout.batch())}; } +template +std::vector detection_output_inst::calc_output_layouts(detection_output_node const& node, kernel_impl_params const& impl_param) { + auto desc = impl_param.typed_desc(); + auto input0_layout = impl_param.get_input_layout(0); + auto box_logits_shape = input0_layout.get(); + auto output_type = desc->output_data_types[0].value_or(input0_layout.data_type); + auto output_format = input0_layout.format; + + ShapeType class_preds_shape = impl_param.get_input_layout(1).get(); + ShapeType proposals_shape = impl_param.get_input_layout(2).get(); + std::vector output_shapes = { ShapeType() }; + std::vector input_shapes = { + box_logits_shape, + class_preds_shape, + proposals_shape + }; + + for (size_t i = 3; i < impl_param.input_layouts.size(); ++i) { + input_shapes.push_back(impl_param.input_layouts[i].get()); + } + + if (desc->num_classes == -1) { + ov::op::v8::DetectionOutput op; + ov::op::util::DetectionOutputBase::AttributesBase attrs; + attrs.top_k = desc->top_k; + attrs.variance_encoded_in_target = desc->variance_encoded_in_target; + attrs.keep_top_k = { desc->keep_top_k }; + attrs.share_location = desc->share_location; + attrs.normalized = desc->prior_is_normalized; + op.set_attrs(attrs); + + ov::op::v8::shape_infer(&op, input_shapes, output_shapes); + } else { + ov::op::v0::DetectionOutput op; + ov::op::v0::DetectionOutput::Attributes attrs; + attrs.num_classes = desc->num_classes; + attrs.top_k = desc->top_k; + attrs.variance_encoded_in_target = desc->variance_encoded_in_target; + attrs.keep_top_k = { desc->keep_top_k }; + attrs.share_location = desc->share_location; + attrs.normalized = desc->prior_is_normalized; + op.set_attrs(attrs); + + ov::op::v0::shape_infer(&op, input_shapes, output_shapes); + } + + return { layout{output_shapes[0], output_type, output_format} }; +} + +template std::vector detection_output_inst::calc_output_layouts(detection_output_node const& node, + const kernel_impl_params& impl_param); + std::string detection_output_inst::to_string(detection_output_node const& node) { auto node_info = node.desc_to_json(); auto desc = node.get_primitive(); @@ -108,6 +161,7 @@ std::string detection_output_inst::to_string(detection_output_node const& node) detec_out_info.add("decrease_label_id", decrease_label_id); detec_out_info.add("clip_before_nms", clip_before_nms); detec_out_info.add("clip_after_nms", clip_after_nms); + detec_out_info.add("objectness_score", desc->objectness_score); detec_out_info.dump(primitive_description); node_info->add("dection output info", detec_out_info); @@ -121,6 +175,9 @@ detection_output_inst::typed_primitive_inst(network& network, detection_output_n auto location_layout = node.location().get_output_layout(); auto confidence_layout = node.confidence().get_output_layout(); auto prior_box_layout = node.prior_box().get_output_layout(); + if (location_layout.is_dynamic() || confidence_layout.is_dynamic() || prior_box_layout.is_dynamic()) + return; + CLDNN_ERROR_NOT_PROPER_FORMAT(node.id(), "Location memory format", location_layout.format.value, @@ -183,9 +240,7 @@ void detection_output_inst::save(cldnn::BinaryOutputBuffer& ob) const { // argument (struct detection_output) ob << argument->id; - ob << argument->input[0].pid; - ob << argument->input[1].pid; - ob << argument->input[2].pid; + ob << argument->input; ob << make_data(&argument->output_paddings[0], sizeof(argument->output_paddings[0])); ob << argument->num_classes; ob << argument->keep_top_k; @@ -205,15 +260,14 @@ void detection_output_inst::save(cldnn::BinaryOutputBuffer& ob) const { ob << argument->decrease_label_id; ob << argument->clip_before_nms; ob << argument->clip_after_nms; + ob << argument->objectness_score; } void detection_output_inst::load(cldnn::BinaryInputBuffer& ib) { parent::load(ib); primitive_id id; - primitive_id input_location; - primitive_id input_confidence; - primitive_id input_prior_box; + std::vector input; uint32_t num_classes; uint32_t keep_top_k; bool share_location; @@ -232,13 +286,11 @@ void detection_output_inst::load(cldnn::BinaryInputBuffer& ib) { bool decrease_label_id; bool clip_before_nms; bool clip_after_nms; - // primitive_id ext_prim_id; + float objectness_score; padding output_padding; ib >> id; - ib >> input_location; - ib >> input_confidence; - ib >> input_prior_box; + ib >> input; ib >> make_data(&output_padding, sizeof(output_padding)); ib >> num_classes; ib >> keep_top_k; @@ -258,12 +310,12 @@ void detection_output_inst::load(cldnn::BinaryInputBuffer& ib) { ib >> decrease_label_id; ib >> clip_before_nms; ib >> clip_after_nms; + ib >> objectness_score; argument = std::make_shared( - id, input_info(input_location), input_info(input_confidence), input_info(input_prior_box), - num_classes, keep_top_k, share_location, background_label_id, nms_threshold, top_k, eta, code_type, - variance_encoded_in_target, confidence_threshold, prior_info_size, prior_coordinates_offset, - prior_is_normalized, input_width, input_height, decrease_label_id, clip_before_nms, clip_after_nms, - output_padding); + id, input, num_classes, keep_top_k, share_location, background_label_id, nms_threshold, top_k, + eta, code_type, variance_encoded_in_target, confidence_threshold, prior_info_size, + prior_coordinates_offset, prior_is_normalized, input_width, input_height, decrease_label_id, + clip_before_nms, clip_after_nms, objectness_score, output_padding); } } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp index 011b41f3cde9f2..d235c6657dd950 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/reorder_inputs.cpp @@ -739,9 +739,10 @@ void reorder_inputs::run(program& p, layout_optimizer& lo, reorder_factory& rf) for (size_t i = 0; i < detection_output_node.get_dependencies().size(); i++) { auto& input = detection_output_node.get_dependency(i); + auto input_layout = input.get_output_layout(); auto new_input = rf.get_reorder(input.id(), - input.get_output_layout(), - layout{ data_types::f32, format::bfyx, input.get_output_layout().get_tensor() }); + input_layout, + layout{ input_layout.get_partial_shape(), data_types::f32, format::bfyx }); if (new_input.first) { p.add_intermediate(new_input.first, detection_output_node, i, !new_input.second); diff --git a/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp b/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp index 27c9f3993e8e13..7fbc7f38801be4 100644 --- a/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/cpu/detection_output.cpp @@ -855,10 +855,16 @@ struct detection_output_impl : typed_primitive_impl { namespace detail { attach_detection_output_impl::attach_detection_output_impl() { - implementation_map::add(impl_types::cpu, detection_output_impl::create, { - std::make_tuple(data_types::f32, format::bfyx), - std::make_tuple(data_types::f16, format::bfyx) - }); + auto formats = { + format::bfyx + }; + + auto types = { + data_types::f32, + data_types::f16 + }; + + implementation_map::add(impl_types::cpu, shape_types::any, detection_output_impl::create, types, formats); } } // namespace detail diff --git a/src/plugins/intel_gpu/src/graph/include/detection_output_inst.h b/src/plugins/intel_gpu/src/graph/include/detection_output_inst.h index 99e53843966712..43ebffc8839a5f 100644 --- a/src/plugins/intel_gpu/src/graph/include/detection_output_inst.h +++ b/src/plugins/intel_gpu/src/graph/include/detection_output_inst.h @@ -25,6 +25,7 @@ class typed_program_node : public typed_program_node_base get_shape_infer_dependencies() const override { return {}; } }; using detection_output_node = typed_program_node; @@ -35,6 +36,8 @@ class typed_primitive_inst : public typed_primitive_inst_base< using parent::parent; public: + template + static std::vector calc_output_layouts(detection_output_node const& node, kernel_impl_params const& impl_param); static layout calc_output_layout(detection_output_node const& node, kernel_impl_params const& impl_param); static std::string to_string(detection_output_node const& node); diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index 0a10658419d987..f9a7bdc9037929 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -1424,13 +1424,17 @@ impl_types layout_optimizer::get_preferred_impl_type(program_node& node, format auto& detection_output_node = node.as(); auto confidence_layout = detection_output_node.confidence().get_output_layout(); auto prim = detection_output_node.get_primitive(); - auto batch_size_limitations = (device_info.supports_immad && device_info.execution_units_count >= 256) ? true : confidence_layout.batch() >= 4; - if (confidence_layout.batch() <= lws_max && batch_size_limitations && - prim->confidence_threshold >= 0.1 && - prim->top_k <= 400 && prim->num_classes >= 16 && confidence_layout.feature() > 10000) - preferred_impl = impl_types::ocl; - else + if (confidence_layout.is_dynamic()) { preferred_impl = impl_types::cpu; + } else { + auto batch_size_limitations = (device_info.supports_immad && device_info.execution_units_count >= 256) ? true : confidence_layout.batch() >= 4; + auto can_use_ocl_impl = confidence_layout.batch() <= lws_max && + batch_size_limitations && + prim->confidence_threshold >= 0.1 && + prim->top_k <= 400 && prim->num_classes >= 16 && + confidence_layout.feature() > 10000; + preferred_impl = can_use_ocl_impl ? impl_types::ocl : impl_types::cpu; + } } else if (node.is_type()) { const std::set blocked_formats = { format::b_fs_yx_fsv16, diff --git a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp index 19cdb51ebf94d7..0b4d91fb3bc7b4 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp @@ -27,14 +27,13 @@ static cldnn::prior_box_code_type PriorBoxCodeFromString(const std::string& str) return cldnn::prior_box_code_type::corner; } -static void CreateDetectionOutputOp(Program& p, const std::shared_ptr& op) { - validate_inputs_count(op, {3}); +static void CreateCommonDetectionOutputOp(Program& p, + const std::shared_ptr& op, + const ov::op::util::DetectionOutputBase::AttributesBase& attrs, + int num_classes) { auto inputs = p.GetInputInfo(op); std::string layerName = layer_type_name_ID(op); - auto attrs = op->get_attrs(); - - uint32_t num_classes = attrs.num_classes; bool share_location = attrs.share_location; int background_label_id = attrs.background_label_id; float nms_threshold = attrs.nms_threshold; @@ -50,15 +49,14 @@ static void CreateDetectionOutputOp(Program& p, const std::shared_ptr& op) { + validate_inputs_count(op, {3}); + + auto attrs = op->get_attrs(); + CreateCommonDetectionOutputOp(p, op, attrs, attrs.num_classes); +} + +static void CreateDetectionOutputOp(Program& p, const std::shared_ptr& op) { + validate_inputs_count(op, {3}); + + CreateCommonDetectionOutputOp(p, op, op->get_attrs(), -1); +} + REGISTER_FACTORY_IMPL(v0, DetectionOutput); +REGISTER_FACTORY_IMPL(v8, DetectionOutput); } // namespace intel_gpu } // namespace ov diff --git a/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp new file mode 100644 index 00000000000000..5dc1121dbffa3a --- /dev/null +++ b/src/plugins/intel_gpu/tests/functional/single_layer_tests/dynamic/detection_output.cpp @@ -0,0 +1,370 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "shared_test_classes/single_layer/detection_output.hpp" +#include "shared_test_classes/base/ov_subgraph.hpp" +#include "ie_precision.hpp" +#include "ngraph_functions/builders.hpp" +#include "common_test_utils/ov_tensor_utils.hpp" +#include + +using namespace ngraph; +using namespace InferenceEngine; +using namespace ov::test; + +namespace GPULayerTestsDefinitions { + +enum { + idxLocation, + idxConfidence, + idxPriors, + idxArmConfidence, + idxArmLocation, + numInputs +}; + +typedef std::tuple< + bool, // varianceEncodedInTarget + bool, // shareLocation + bool, // normalized + size_t, // inputHeight + size_t, // inputWidth + ov::test::InputShape, // "Location" input + ov::test::InputShape, // "Confidence" input + ov::test::InputShape, // "Priors" input + ov::test::InputShape, // "ArmConfidence" input + ov::test::InputShape // "ArmLocation" input +> ParamsWhichSizeDependsDynamic; + +typedef std::tuple< + int, // numClasses + int, // backgroundLabelId + int, // topK + std::vector, // keepTopK + std::string, // codeType + float, // nmsThreshold + float, // confidenceThreshold + bool, // clip_afterNms + bool, // clip_beforeNms + bool // decreaseLabelId +> DetectionOutputAttributes; + +typedef std::tuple< + DetectionOutputAttributes, + ParamsWhichSizeDependsDynamic, + size_t, // Number of batch + float, // objectnessScore + bool, // replace dynamic shapes to intervals + std::string // Device name +> DetectionOutputGPUTestParams; + +class DetectionOutputLayerGPUTest : public testing::WithParamInterface, + virtual public ov::test::SubgraphBaseTest { +public: + static std::string getTestCaseName(testing::TestParamInfo obj) { + DetectionOutputAttributes commonAttrs; + ParamsWhichSizeDependsDynamic specificAttrs; + ngraph::op::DetectionOutputAttrs attrs; + size_t batch; + bool replaceDynamicShapesToIntervals; + std::string targetDevice; + std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, replaceDynamicShapesToIntervals, targetDevice) = obj.param; + + std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, + attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; + + const size_t numInputs = 5; + std::vector inShapes(numInputs); + std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, + inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; + + if (inShapes[idxArmConfidence].first.rank().get_length() == 0ul) { + inShapes.resize(3); + } + + for (size_t i = 0; i < inShapes.size(); i++) { + inShapes[i].first[0] = batch; + } + + std::ostringstream result; + result << "IS = { "; + + using ov::test::operator<<; + result << "LOC=" << inShapes[0] << "_"; + result << "CONF=" << inShapes[1] << "_"; + result << "PRIOR=" << inShapes[2]; + if (inShapes.size() > 3) { + result << "_ARM_CONF=" << inShapes[3] << "_"; + result << "ARM_LOC=" << inShapes[4] << " }_"; + } else { + result << " }_"; + } + + using LayerTestsDefinitions::operator<<; + result << attrs; + result << "RDS=" << (replaceDynamicShapesToIntervals ? "true" : "false") << "_"; + result << "TargetDevice=" << targetDevice; + return result.str(); + } + + void generate_inputs(const std::vector& targetInputStaticShapes) override { + inputs.clear(); + const auto& funcInputs = function->inputs(); + for (auto i = 0ul; i < funcInputs.size(); ++i) { + const auto &funcInput = funcInputs[i]; + InferenceEngine::Blob::Ptr blob; + int32_t resolution = 1; + uint32_t range = 1; + if (i == 2) { + if (attrs.normalized) { + resolution = 100; + } else { + range = 10; + } + } else if (i == 1 || i == 3) { + resolution = 1000; + } else { + resolution = 10; + } + + auto tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), targetInputStaticShapes[i], range, 0, resolution); + inputs.insert({funcInput.get_node_shared_ptr(), tensor}); + } + } + + void compare(const std::vector& expectedTensors, const std::vector& actualTensors) override { + ASSERT_EQ(expectedTensors.size(), actualTensors.size()); + + for (size_t i = 0; i < expectedTensors.size(); ++i) { + auto expected = expectedTensors[i]; + auto actual = actualTensors[i]; + ASSERT_EQ(expected.get_size(), actual.get_size()); + + size_t expSize = 0; + const float* expBuf = expected.data(); + for (size_t i = 0; i < expected.get_size(); i+=7) { + if (expBuf[i] == -1) + break; + expSize += 7; + } + + size_t actSize = 0; + const float* actBuf = actual.data(); + for (size_t i = 0; i < actual.get_size(); i+=7) { + if (actBuf[i] == -1) + break; + actSize += 7; + } + + ASSERT_EQ(expSize, actSize); + } + + ov::test::SubgraphBaseTest::compare(expectedTensors, actualTensors); + } + +protected: + void SetUp() override { + DetectionOutputAttributes commonAttrs; + ParamsWhichSizeDependsDynamic specificAttrs; + size_t batch; + bool replaceDynamicShapesToIntervals; + std::tie(commonAttrs, specificAttrs, batch, attrs.objectness_score, replaceDynamicShapesToIntervals, targetDevice) = this->GetParam(); + + std::tie(attrs.num_classes, attrs.background_label_id, attrs.top_k, attrs.keep_top_k, attrs.code_type, attrs.nms_threshold, attrs.confidence_threshold, + attrs.clip_after_nms, attrs.clip_before_nms, attrs.decrease_label_id) = commonAttrs; + + inShapes.resize(numInputs); + std::tie(attrs.variance_encoded_in_target, attrs.share_location, attrs.normalized, attrs.input_height, attrs.input_width, + inShapes[idxLocation], inShapes[idxConfidence], inShapes[idxPriors], inShapes[idxArmConfidence], inShapes[idxArmLocation]) = specificAttrs; + + if (inShapes[idxArmConfidence].first.rank().get_length() == 0) { + inShapes.resize(3); + } + + if (replaceDynamicShapesToIntervals) { + set_dimension_intervals(inShapes); + } + + for (auto& value : inShapes) { + auto shapes = value.second; + for (auto& shape : shapes) { + shape[0] = batch; + } + } + + init_input_shapes({ inShapes }); + + auto params = ngraph::builder::makeDynamicParams(ngraph::element::f32, inputDynamicShapes); + auto paramOuts = ngraph::helpers::convert2OutputVector(ngraph::helpers::castOps2Nodes(params)); + auto detOut = ngraph::builder::makeDetectionOutput(paramOuts, attrs); + ngraph::ResultVector results{std::make_shared(detOut)}; + function = std::make_shared(results, params, "DetectionOutputDynamic"); + } + +private: + // define dynamic shapes dimension intervals + static void set_dimension_intervals(std::vector>>& inputShapes) { + for (auto& input_shape : inputShapes) { + const auto model_dynamic_shape = input_shape.first; + OPENVINO_ASSERT(model_dynamic_shape.is_dynamic(), "input shape is not dynamic"); + + const auto inputShapeRank = model_dynamic_shape.rank(); + OPENVINO_ASSERT(!inputShapeRank.is_dynamic(), "input shape rank is dynamic"); + + for (auto dimension = 0; dimension < inputShapeRank.get_length(); ++dimension) { + auto interval_min = -1; + auto interval_max = 0; + for (auto& input_static_shape : input_shape.second) { + if ((interval_min == -1) || (static_cast(interval_min) > input_static_shape[dimension])) { + interval_min = input_static_shape[dimension]; + } + if (static_cast(interval_max) < input_static_shape[dimension]) { + interval_max = input_static_shape[dimension]; + } + } + + input_shape.first[dimension] = { + interval_min, + interval_min == interval_max ? (interval_max + 1) : interval_max }; + } + } + } + ngraph::op::DetectionOutputAttrs attrs; + std::vector inShapes; +}; + +TEST_P(DetectionOutputLayerGPUTest, CompareWithRefs) { + SKIP_IF_CURRENT_TEST_IS_DISABLED() + + run(); +} + +namespace { + +const int numClasses = 11; +const int backgroundLabelId = 0; +const std::vector topK = {75}; +const std::vector> keepTopK = { {50}, {100} }; +const std::vector codeType = {"caffe.PriorBoxParameter.CORNER", "caffe.PriorBoxParameter.CENTER_SIZE"}; +const float nmsThreshold = 0.5f; +const float confidenceThreshold = 0.3f; +const std::vector clipAfterNms = {true, false}; +const std::vector clipBeforeNms = {true, false}; +const std::vector decreaseLabelId = {true, false}; +const float objectnessScore = 0.4f; +const std::vector numberBatch = {1, 2}; + +const auto commonAttributes = ::testing::Combine( + ::testing::Values(numClasses), + ::testing::Values(backgroundLabelId), + ::testing::ValuesIn(topK), + ::testing::ValuesIn(keepTopK), + ::testing::ValuesIn(codeType), + ::testing::Values(nmsThreshold), + ::testing::Values(confidenceThreshold), + ::testing::ValuesIn(clipAfterNms), + ::testing::ValuesIn(clipBeforeNms), + ::testing::ValuesIn(decreaseLabelId) +); + +/* =============== 3 inputs cases =============== */ + +const std::vector specificParams3InDynamic = { + // dynamic input shapes + ParamsWhichSizeDependsDynamic { + true, true, true, 1, 1, + { + // input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 60}, {1, 120}} + }, + { + // input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 165}, {1, 330}} + }, + { + // input model dynamic shapes + {ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, + // input tensor shapes + {{1, 1, 60}, {1, 1, 120}} + }, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + true, false, true, 1, 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 60}, {1, 1, 120}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + false, true, true, 1, 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + false, false, true, 1, 1, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 60}, {1, 2, 120}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + true, true, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + true, false, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 1, 75}, {1, 1, 150}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + false, true, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 60}, {1, 120}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, + {}, + {} + }, + ParamsWhichSizeDependsDynamic { + false, false, false, 10, 10, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 660}, {1, 1320}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 165}, {1, 330}}}, + {{ov::Dimension::dynamic(), ov::Dimension::dynamic(), ov::Dimension::dynamic()}, {{1, 2, 75}, {1, 2, 150}}}, + {}, + {} + }, +}; + +const auto params3InputsDynamic = ::testing::Combine( + commonAttributes, + ::testing::ValuesIn(specificParams3InDynamic), + ::testing::ValuesIn(numberBatch), + ::testing::Values(objectnessScore), + ::testing::Values(false, true), + ::testing::Values(CommonTestUtils::DEVICE_GPU) +); + +INSTANTIATE_TEST_SUITE_P(smoke_GPUDetectionOutputDynamic3In, DetectionOutputLayerGPUTest, + params3InputsDynamic, + DetectionOutputLayerGPUTest::getTestCaseName); + +} // namespace +} // namespace GPULayerTestsDefinitions diff --git a/src/plugins/intel_gpu/tests/unit/shape_infer/detection_output_si_test.cpp b/src/plugins/intel_gpu/tests/unit/shape_infer/detection_output_si_test.cpp new file mode 100644 index 00000000000000..5beae0babf2eaa --- /dev/null +++ b/src/plugins/intel_gpu/tests/unit/shape_infer/detection_output_si_test.cpp @@ -0,0 +1,117 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "test_utils.h" + +#include +#include +#include + +#include "detection_output_inst.h" + +#include "program_wrapper.h" + +#include +#include + +using namespace cldnn; +using namespace ::tests; + +namespace shape_infer_tests { + +const int background_label_id = 0; +const float nms_threshold = 0.5f; +const float eta = 1.f; +const float confidence_threshold = 0.3f; +const prior_box_code_type code_type = prior_box_code_type::corner; +const int input_width = 1; +const int input_height = 1; +const bool decrease_label_id = false; +const bool clip_before_nms = false; +const bool clip_after_nms = false; +const float objectness_score = 0.0f; + +struct detection_output_test_params { + std::vector in_layouts; + int num_classes; + int top_k; + bool variance_encoded_in_target; + int keep_top_k; + bool share_location; + bool normalized; + layout expected_layout; +}; + +class detection_output_test : public testing::TestWithParam { }; + +TEST_P(detection_output_test, shape_infer) { + auto p = GetParam(); + + auto& engine = get_test_engine(); + + cldnn::program prog(engine); + std::vector> input_prims; + std::vector input_prim_ids; + for (size_t i = 0; i < p.in_layouts.size(); i++) { + auto prim_id = "input" + std::to_string(i); + auto input_layout_prim = std::make_shared(prim_id, p.in_layouts[i]); + input_prims.push_back(input_layout_prim); + input_prim_ids.push_back(input_info(prim_id)); + } + + int prior_info_size = p.normalized != 0 ? 4 : 5; + int prior_coordinates_offset = p.normalized != 0 ? 0 : 1; + + auto detection_output_prim = std::make_shared("detection_output", + input_prim_ids, + p.num_classes, + p.keep_top_k, + p.share_location, + background_label_id, + nms_threshold, + p.top_k, + eta, + code_type, + p.variance_encoded_in_target, + confidence_threshold, + prior_info_size, + prior_coordinates_offset, + p.normalized, + input_width, + input_height, + decrease_label_id, + clip_before_nms, + clip_after_nms, + objectness_score); + auto& detection_output_node = prog.get_or_create(detection_output_prim); + for (auto& prim : input_prims) { + auto& input_layout_node = prog.get_or_create(prim); + program_wrapper::add_connection(prog, input_layout_node, detection_output_node); + } + + auto res = detection_output_inst::calc_output_layouts(detection_output_node, *detection_output_node.get_kernel_impl_params()); + + ASSERT_EQ(res.size(), 1); + ASSERT_EQ(res[0], p.expected_layout); +} + +INSTANTIATE_TEST_SUITE_P(smoke, detection_output_test, + testing::ValuesIn(std::vector{ + { + {layout{ov::PartialShape{1, 60}, data_types::f32, format::bfyx}, + layout{ov::PartialShape{1, 165}, data_types::f32, format::bfyx}, + layout{ov::PartialShape{1, 1, 60}, data_types::f32, format::bfyx}}, + 11, 75, true, 50, true, true, + layout{ov::PartialShape{1, 1, 50, 7}, data_types::f32, format::bfyx} + }, + { + {layout{ov::PartialShape::dynamic(2), data_types::f32, format::bfyx}, + layout{ov::PartialShape::dynamic(2), data_types::f32, format::bfyx}, + layout{ov::PartialShape::dynamic(3), data_types::f32, format::bfyx}}, + 11, 75, true, 50, true, true, + layout{ov::PartialShape{1, 1, ov::Dimension::dynamic(), 7}, data_types::f32, format::bfyx} + } + })); + +} // shape_infer_tests diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp index 08fa4e65c5b1ec..c00252dfbb19a7 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/detection_output_test.cpp @@ -146,7 +146,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -180,8 +180,8 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output_1", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k)); - topology.add(detection_output("detection_output_2", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k)); + topology.add(detection_output("detection_output_1", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k)); + topology.add(detection_output("detection_output_2", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -223,7 +223,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box")}, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -271,7 +271,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -313,7 +313,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -366,7 +366,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -425,7 +425,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, nms_threshold, top_k, eta, code_type, variance_encoded_in_target, confidence_threshold, prior_info_size, prior_coordinates_offset, prior_is_normalized, input_width, input_height, decrease_label_id @@ -483,7 +483,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -544,7 +544,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -592,7 +592,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -643,7 +643,7 @@ class detection_output_test : public ::testing::Test { topology.add(input_layout("input_confidence", input_confidence->get_layout())); topology.add(input_layout("input_prior_box", input_prior_box->get_layout())); - topology.add(detection_output("detection_output", input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); + topology.add(detection_output("detection_output", { input_info("input_location"), input_info("input_confidence"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); cldnn::network::ptr network = get_network(engine, topology, get_test_default_config(engine), get_test_stream_ptr(), is_caching_test); @@ -689,7 +689,7 @@ class detection_output_test : public ::testing::Test { topology.add(reorder("input_location_padded", input_info("input_location"), input_location->get_layout().with_padding(padding{ { 0, 0, 12, 3 },{ 0, 0, 5, 11 } }))); topology.add(reorder("input_confidence_padded", input_info("input_confidence"), input_location->get_layout().with_padding(padding{ { 0, 0, 2, 7 },{ 0, 0, 13, 1 } }))); - topology.add(detection_output("detection_output", input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box"), this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); + topology.add(detection_output("detection_output", { input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k)); topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, type_to_data_type::value)); auto config = get_test_default_config(engine); @@ -753,7 +753,7 @@ class detection_output_test : public ::testing::Test { topology.add(reorder("input_confidence_padded", input_info("input_confidence"), input_location->get_layout().with_padding(padding{ { 0, 0, 2, 7 },{ 0, 0, 13, 1 } }))); topology.add(reorder("output_reorder", input_info("detection_output"), format::bfyx, type_to_data_type::value)); - topology.add(detection_output("detection_output", input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box"), + topology.add(detection_output("detection_output", { input_info("input_location_padded"), input_info("input_confidence_padded"), input_info("input_prior_box") }, this->num_classes, keep_top_k, share_location, background_label_id, this->nms_threshold, top_k, eta, code_type, variance_encoded_in_target, confidence_threshold, prior_info_size, prior_coordinates_offset, prior_is_normalized, this->img_size, this->img_size diff --git a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp index e63375775363ba..86c8debb96d51c 100644 --- a/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp +++ b/src/plugins/template/tests/functional/shared_tests_instances/behavior/plugin/set_preprocess.cpp @@ -57,6 +57,7 @@ const std::vector netLayouts = { const std::vector ioLayouts = { InferenceEngine::Layout::NCHW, + InferenceEngine::Layout::NHWC, }; INSTANTIATE_TEST_SUITE_P(smoke_BehaviorTests, InferRequestPreprocessConversionTest, diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/cache.hpp index 43ac92d1b71be5..1ed336b3a4ae5a 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/cache.hpp @@ -9,6 +9,7 @@ #include "openvino/openvino.hpp" #include "cache/meta/meta_info.hpp" +#include "matchers/single_op/manager.hpp" namespace ov { namespace tools { @@ -19,6 +20,7 @@ class ICache { virtual void update_cache(const std::shared_ptr& model, const std::string& source_model, bool extract_body = true) {}; virtual void serialize_cache() {}; + virtual void reset_cache() {}; void set_serialization_dir(const std::string& serialization_dir) { m_serialization_dir = serialization_dir; @@ -26,7 +28,6 @@ class ICache { protected: size_t m_serialization_timeout = 60; - // NOLINT Static/global string variables are not permitted std::string m_serialization_dir = "."; ICache() = default; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/graph_cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/graph_cache.hpp index d5dbdceeb1986b..335863d7e611f4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/graph_cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/graph_cache.hpp @@ -6,13 +6,18 @@ #include "cache/cache.hpp" +#include "cache/meta/input_info.hpp" +#include "matchers/subgraph/manager.hpp" +#include "matchers/subgraph/subgraph.hpp" + namespace ov { namespace tools { namespace subgraph_dumper { -class GraphCache final : public virtual ICache { +class GraphCache : public virtual ICache { public: - void update_cache(const std::shared_ptr& model, const std::string& model_meta_data, + void update_cache(const std::shared_ptr& model, + const std::string& model_meta_data, bool extract_body = true) override; void serialize_cache() override; @@ -28,10 +33,22 @@ class GraphCache final : public virtual ICache { m_cache_instance = nullptr; } -private: + void reset_cache() override { + reset(); + }; + +protected: std::map, MetaInfo> m_graph_cache; + ExtractorsManager m_manager = ExtractorsManager(); static std::shared_ptr m_cache_instance; - GraphCache() = default; + + GraphCache() { + ExtractorsManager::ExtractorsMap matchers = {}; + m_manager.set_extractors(matchers); + } + + void update_cache(const std::shared_ptr& model, const std::string& model_path, + const std::map& input_info, size_t model_op_cnt); }; } // namespace subgraph_dumper diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/input_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/input_info.hpp index 9f127e91b775cb..9cd79a3f1b1990 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/input_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/input_info.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include "openvino/openvino.hpp" namespace ov { namespace tools { @@ -51,6 +52,8 @@ struct InputInfo { } }; +using ExtractedPattern = std::pair, std::map>; + } // namespace subgraph_dumper } // namespace tools } // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/meta_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/meta_info.hpp index 3e64de503fb7da..1ded7e0044d16f 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/meta_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/meta_info.hpp @@ -26,11 +26,10 @@ class MetaInfo { // { model_name: model_paths, this_op/graph_cnt, total_op_cnt, model_priority} std::map model_info; - // to store model priority ranges to normilize graph_priority static unsigned long MAX_MODEL_PRIORITY; static unsigned long MIN_MODEL_PRIORITY; - + double get_graph_priority(); std::string get_model_name_by_path(const std::string& model_path); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/model_info.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/model_info.hpp index 3ed0012b37fe6b..408c6cdcc603b8 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/model_info.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/meta/model_info.hpp @@ -17,7 +17,7 @@ struct ModelInfo { ModelInfo(const std::string& model_path = "", size_t total_ops_in_model = 1, size_t _model_priority = 1) : total_op_cnt(total_ops_in_model), model_paths({model_path}), - this_op_cnt(1), model_priority(_model_priority) {}; + this_op_cnt(1), model_priority(_model_priority) {} bool operator==(const ModelInfo& model_info_ref) const { if (this->model_priority != model_info_ref.model_priority || this->this_op_cnt != model_info_ref.this_op_cnt || diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/op_cache.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/op_cache.hpp index 24af36dbba548a..feca24414d63cb 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/op_cache.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/cache/op_cache.hpp @@ -6,9 +6,8 @@ #include "cache/cache.hpp" -#include "single_op_matchers/manager.hpp" -#include "single_op_matchers/base.hpp" -#include "single_op_matchers/convolutions.hpp" +#include "matchers/single_op/single_op.hpp" +#include "matchers/single_op/convolutions.hpp" namespace ov { namespace tools { @@ -32,6 +31,10 @@ class OpCache : public ICache { m_cache_instance = nullptr; } + void reset_cache() override { + reset(); + }; + protected: std::map, MetaInfo> m_ops_cache; static std::shared_ptr m_cache_instance; @@ -39,7 +42,7 @@ class OpCache : public ICache { OpCache() { MatchersManager::MatchersMap matchers = { - { "generic_single_op", BaseMatcher::Ptr(new BaseMatcher) }, + { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, }; m_manager.set_matchers(matchers); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/config.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/config.hpp similarity index 100% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/config.hpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/config.hpp diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/convolutions.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/convolutions.hpp similarity index 86% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/convolutions.hpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/convolutions.hpp index 890bfaeae90a68..900d969083eb7a 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/convolutions.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/convolutions.hpp @@ -4,13 +4,13 @@ #pragma once -#include "single_op_matchers/base.hpp" +#include "matchers/single_op/single_op.hpp" namespace ov { namespace tools { namespace subgraph_dumper { -class ConvolutionsMatcher : public BaseMatcher { +class ConvolutionsMatcher : public SingleOpMatcher { public: ConvolutionsMatcher(); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/manager.hpp new file mode 100644 index 00000000000000..d8895ca4a527f3 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/manager.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "matchers/single_op/single_op.hpp" +#include "cache/meta/input_info.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +class MatchersManager { +public: + using MatchersMap = std::map; + explicit MatchersManager(const MatchersMap& matchers = {}) : m_matchers(matchers) {} + + bool match(const std::shared_ptr &node, + const std::shared_ptr &ref); + + void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; } + const MatchersMap& get_matchers() { return m_matchers; } + iMatcherConfig::Ptr get_config(const std::shared_ptr &node) const; + +protected: + MatchersMap m_matchers = {}; +}; + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/base.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/single_op.hpp similarity index 90% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/base.hpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/single_op.hpp index 26fbba7ba0ce55..d6ef95ad99864c 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/base.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/single_op/single_op.hpp @@ -1,23 +1,21 @@ // Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // - #pragma once - #include #include "pugixml.hpp" -#include "single_op_matchers/config.hpp" +#include "matchers/single_op/config.hpp" namespace ov { namespace tools { namespace subgraph_dumper { -class BaseMatcher { +class SingleOpMatcher { public: - using Ptr = std::shared_ptr; - BaseMatcher(); + using Ptr = std::shared_ptr; + SingleOpMatcher(); virtual bool match(const std::shared_ptr &node, const std::shared_ptr &ref) const; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/manager.hpp new file mode 100644 index 00000000000000..e9dad1b9adc0e2 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/manager.hpp @@ -0,0 +1,32 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "matchers/subgraph/subgraph.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +class ExtractorsManager { +public: + using ExtractorsMap = std::map; + explicit ExtractorsManager(const ExtractorsMap& extractors = {}) : m_extractors(extractors) {} + + bool match(const std::shared_ptr &model, + const std::shared_ptr &ref); + std::list extract(const std::shared_ptr &model, + bool is_extract_body = true); + + void set_extractors(const ExtractorsMap& extractors = {}) { m_extractors = extractors; } + ExtractorsMap get_extractors() { return m_extractors; } + +protected: + ExtractorsMap m_extractors = {}; +}; + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/subgraph.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/subgraph.hpp new file mode 100644 index 00000000000000..b0ab5b79c9e813 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/matchers/subgraph/subgraph.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include + +#include "openvino/op/util/op_types.hpp" +#include "common_test_utils/graph_comparator.hpp" +#include "cache/meta/input_info.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +class SubgraphExtractor { +public: + using Ptr = std::shared_ptr; + + bool match(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const; + + virtual std::list extract(const std::shared_ptr &model, + bool is_extract_body = true) { + return std::list{}; + }; + +protected: + FunctionsComparator comparator = FunctionsComparator::no_default() + .enable(FunctionsComparator::ATTRIBUTES) + .enable(FunctionsComparator::NODES) + .enable(FunctionsComparator::PRECISIONS); + + inline bool is_node_to_skip(const std::shared_ptr& node) const { + return ov::op::util::is_parameter(node) || + ov::op::util::is_constant(node) || + ov::op::util::is_output(node); + } +}; + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/manager.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/manager.hpp deleted file mode 100644 index 6c14353f7eae9e..00000000000000 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/single_op_matchers/manager.hpp +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#pragma once - -// #include "pugixml.hpp" -// #include "ngraph/node.hpp" -// #include "single_op_matchers/single_op.hpp" -// #include "single_op_matchers/convolutions.hpp" - -#include "single_op_matchers/base.hpp" - -namespace ov { -namespace tools { -namespace subgraph_dumper { - -class Matcher; - -class MatchersManager { -public: - using MatchersMap = std::map; - - explicit MatchersManager(const MatchersMap& matchers = {}) : m_matchers(matchers) {} - - bool match_all(const std::shared_ptr &node, - const std::shared_ptr &ref); - bool match_any(const std::shared_ptr &node, - const std::shared_ptr &ref); - void set_matchers(const MatchersMap& matchers = {}) { m_matchers = matchers; } - iMatcherConfig::Ptr get_config(const std::shared_ptr &node) const; - -private: - std::vector run_matchers(const std::shared_ptr &node, - const std::shared_ptr &ref); - - MatchersMap m_matchers = {}; -}; - -} // namespace subgraph_dumper -} // namespace tools -} // namespace ov diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/model.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/model.hpp index 460de75c178473..abaeb3b036e1d6 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/model.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/model.hpp @@ -7,6 +7,7 @@ #include #include #include +#include #include "openvino/util/file_util.hpp" @@ -15,6 +16,7 @@ #include "functional_test_utils/ov_plugin_cache.hpp" #include "cache/cache.hpp" +#include "utils/node.hpp" namespace ov { namespace tools { @@ -54,81 +56,21 @@ static std::map model_cache_status_to_str = { { ModelCacheStatus::NOT_READ, "not_read_models" }, }; -inline std::vector find_models(const std::vector &dirs, const std::string& regexp = ".*") { - std::vector models, full_content; - for (const auto& dir : dirs) { - std::vector dir_content; - if (ov::util::directory_exists(dir)) { - dir_content = CommonTestUtils::getFileListByPatternRecursive({dir}, FROTEND_REGEXP); - } else if (ov::util::file_exists(dir) && std::regex_match(dir, std::regex(".*" + std::string(CommonTestUtils::LST_EXTENSION)))) { - dir_content = CommonTestUtils::readListFiles({dir}); - } else { - std::string msg = "Input directory (" + dir + ") doesn't not exist!"; - throw std::runtime_error(msg); - } - if (!dir_content.empty()) { - full_content.insert(full_content.end(), dir_content.begin(), dir_content.end()); - } - } - auto in_regex = std::regex(regexp); - for (const auto& file : full_content) { - if (std::regex_match(file, in_regex)) { - try { - models.emplace_back(file); - } catch (std::exception& e) { - std::cout << "Impossible to read model: " << file << std::endl << "Exception: " << e.what(); - } - } - } - return models; -} +std::vector find_models(const std::vector &dirs, const std::string& regexp = ".*"); // model_cache_status: model_list -inline std::map> cache_models( +std::map> cache_models( std::vector>& caches, const std::vector& models, - bool extract_body) { - std::map> cache_status = { - { ModelCacheStatus::SUCCEED, {} }, - { ModelCacheStatus::NOT_FULLY_CACHED, {} }, - { ModelCacheStatus::NOT_READ, {} } - }; - auto core = ov::test::utils::PluginCache::get().core(); + bool extract_body); - for (const auto& model : models) { - if (ov::util::file_exists(model)) { - std::cout << "Processing model: " << model << std::endl; - ModelCacheStatus model_status = ModelCacheStatus::SUCCEED; - try { - std::shared_ptr function = core->read_model(model); - try { - for (auto& cache : caches) { - cache->update_cache(function, model, extract_body); - } - } catch (std::exception &e) { - std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl; - model_status = ModelCacheStatus::NOT_FULLY_CACHED; - } - } catch (std::exception &e) { - model_status = ModelCacheStatus::NOT_READ; - std::cout << "Model reading failed with exception:" << std::endl << e.what() << std::endl; - } - cache_status[model_status].push_back(model); - } - } - return cache_status; -} +void save_model_status_to_file(const std::map>& caching_status, + const std::string& output_dir); -inline void save_model_status_to_file(const std::map>& caching_status, const std::string& output_dir) { - std::string cache_status_path = ov::util::path_join({output_dir, "model_caching_status"}); - if (!ov::util::directory_exists(cache_status_path)) { - ov::util::create_directory_recursive(cache_status_path); - } - for (const auto& status_info : caching_status) { - std::string output_file_path = ov::util::path_join({ cache_status_path, model_cache_status_to_str[status_info.first] + CommonTestUtils::LST_EXTENSION}); - CommonTestUtils::vec2File(status_info.second, output_file_path); - } -} +std::pair, std::map> +generate_model(const std::set>& nodes, + const std::shared_ptr& start_node, + std::unordered_set& checked_ops); } // namespace subgraph_dumper } // namespace tools diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/node.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/node.hpp index 426ff7de0245b3..b567c8363a868d 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/node.hpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include/utils/node.hpp @@ -7,7 +7,11 @@ #include "cache/meta/input_info.hpp" #include "functional_test_utils/summary/op_info.hpp" + #include "openvino/openvino.hpp" +#include "openvino/pass/manager.hpp" +#include "openvino/pass/constant_folding.hpp" +#include "openvino/op/util/op_types.hpp" namespace ov { namespace tools { @@ -22,136 +26,18 @@ inline InputInfo::Range get_const_ranges(const std::shared_ptr(min), static_cast(max)); } -inline std::map get_input_info_by_node(const std::shared_ptr& node) { - std::map input_info; - for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) { - InputInfo in_info; - std::shared_ptr input_node = node->input_value(port_id).get_node_shared_ptr(); - std::string input_name = input_node->get_friendly_name(); - if (std::dynamic_pointer_cast(input_node)) { - auto const_node = - std::dynamic_pointer_cast(input_node); - in_info.is_const = true; - switch (node->get_output_element_type(0)) { - case ov::element::Type_t::boolean: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::bf16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::f64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i8: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::i64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - // TODO cast_vector doesn't support u1 now - // case ov::element::Type_t::u1: - // return get_const_ranges(const_node); - case ov::element::Type_t::u8: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u16: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u32: { - in_info.ranges = get_const_ranges(const_node); - break; - } - case ov::element::Type_t::u64: { - in_info.ranges = get_const_ranges(const_node); - break; - } - default: { - std::cout << "Can't get ranges.. Unsupported data type" << std::endl; - break; - } - } - } - input_info.insert({ input_name, in_info }); - } - return input_info; -} +std::map get_input_info_by_node(const std::shared_ptr& node); // replace all input node by parameters and constants instead of non input mode types // if `!is_save_const` replace only by parameters // if `!is_copy_const_node` do not create new node with constants only as inputs -inline std::shared_ptr clone_node(std::shared_ptr node, - bool is_save_const = false, - bool is_copy_const_node = false) { - bool has_parameters = false; - ov::OutputVector inputs; - inputs.resize(node->get_input_size()); - std::string in_name_base = ov::test::functional::get_node_version(node); - for (size_t i = 0; i < node->get_input_size(); ++i) { - std::string node_name = in_name_base + "_" + std::to_string(i); - if (is_save_const) { - // todo: replace deprecated code - OPENVINO_SUPPRESS_DEPRECATED_START - const auto constant_input = ov::get_constant_from_source(node->input(i).get_source_output()); - OPENVINO_SUPPRESS_DEPRECATED_END - if (constant_input) { - auto in_const = std::make_shared(constant_input->get_element_type(), - constant_input->get_shape(), - constant_input->get_data_ptr()); - in_const->set_friendly_name(node_name); - inputs[i] = in_const; - continue; - } - } - has_parameters = true; - auto param = - std::make_shared(node->get_input_element_type(i), node->get_input_partial_shape(i)); - param->set_friendly_name(node_name); - inputs[i] = param; - } - if (!has_parameters && !is_copy_const_node) { - std::cout << "The operation: " + node->get_friendly_name() + " does not have parameters!" << std::endl; - return nullptr; - } - std::shared_ptr cloned_node = node->clone_with_new_inputs(inputs); - cloned_node->set_friendly_name(in_name_base); - return cloned_node; -} +std::shared_ptr clone_node(std::shared_ptr node, + bool is_save_const = false, + bool is_copy_const_node = false, + std::string node_name = ""); // all inputs are defined as parameters and contains detailed info in meta -inline std::shared_ptr generate_model_by_node(const std::shared_ptr& node) { - static size_t model_cnt = 0; - auto cloned_node = clone_node(node); - ov::OutputVector results; - for (auto& out : cloned_node->outputs()) { - results.push_back(std::make_shared(out)); - } - auto model = std::make_shared(results); - model->set_friendly_name(cloned_node->get_friendly_name() + "_" + std::to_string(model_cnt++)); - return model; -} +std::shared_ptr generate_model_by_node(const std::shared_ptr& node); inline std::string get_node_type(const std::shared_ptr& node) { for (size_t i = 0; i < node->get_input_size(); ++i) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/cache.cpp index ab85fdfe495d03..c6284f5c01e06d 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/cache.cpp @@ -6,6 +6,7 @@ #include "openvino/util/file_util.hpp" #include "openvino/pass/manager.hpp" +#include "openvino/op/util/op_types.hpp" #include "common_test_utils/file_utils.hpp" @@ -19,6 +20,31 @@ bool ICache::serialize_model(const std::pair, MetaInf const std::string& rel_serialization_dir) { std::shared_ptr model = graph_info.first; MetaInfo meta = graph_info.second; + std::map, std::shared_ptr> nodes; + ov::ParameterVector param_vector; + for (const auto& op : model->get_ordered_ops()) { + if (ov::op::util::is_parameter(op)) { + auto param = std::dynamic_pointer_cast(op); + param_vector.push_back(param); + } + if (ov::op::util::is_constant(op)) { + auto op_to_replace = std::dynamic_pointer_cast(op); + if (op_to_replace->get_byte_size() > 1024) { + auto param = std::make_shared( + op_to_replace->get_output_element_type(0), op_to_replace->get_output_partial_shape(0)); + nodes.insert({ op_to_replace, param }); + param->set_friendly_name(op_to_replace->get_friendly_name()); + param_vector.push_back(param); + } + } + } + for (const auto& node : nodes) { + model->replace_node(node.first, node.second); + } + if (!nodes.empty()) { + model = std::make_shared(model->get_results(), param_vector); + } + std::string model_name = model->get_friendly_name(); std::string abs_searilization_dir = ov::util::path_join({ m_serialization_dir, rel_serialization_dir }); std::string xml_path = ov::util::path_join({ abs_searilization_dir, model_name + ".xml" }); diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/graph_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/graph_cache.cpp index 6832568729d9d0..dcbb9f61d04fb7 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/graph_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/graph_cache.cpp @@ -2,7 +2,13 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "openvino/op/util/op_types.hpp" + +#include "functional_test_utils/ov_plugin_cache.hpp" +#include "common_test_utils/graph_comparator.hpp" + #include "cache/graph_cache.hpp" +#include "utils/node.hpp" namespace ov { namespace tools { @@ -10,8 +16,51 @@ namespace subgraph_dumper { std::shared_ptr GraphCache::m_cache_instance = nullptr; -void GraphCache::update_cache(const std::shared_ptr& model, const std::string& model_meta_data, bool extract_body) {} -void GraphCache::serialize_cache() {} +void GraphCache::update_cache(const std::shared_ptr& model, + const std::string& model_meta_data, + bool extract_body) { + auto model_total_op = model->get_ops().size() - model->get_output_size() - model->inputs().size(); + auto extracted_patterns = m_manager.extract(model, extract_body); + if (extracted_patterns.empty()) { + return; + } + while (!extracted_patterns.empty()) { + auto it = extracted_patterns.begin(); + update_cache(it->first, model_meta_data, it->second, model_total_op); + extracted_patterns.pop_front(); + } + return; +} + +void GraphCache::update_cache(const std::shared_ptr& extracted_model, const std::string& model_path, + const std::map& input_info, size_t model_op_cnt) { + std::shared_ptr model_to_update = nullptr; + for (const auto& cached_model : m_graph_cache) { + if (m_manager.match(cached_model.first, extracted_model)) { + model_to_update = cached_model.first; + break; + } + } + if (model_to_update == nullptr) { + auto meta = MetaInfo(model_path, input_info, model_op_cnt); + m_graph_cache.insert({ extracted_model, meta }); + return; + } + m_graph_cache[model_to_update].update(model_path, input_info, model_op_cnt); + auto cached_model_size = model_to_update->get_graph_size(); + auto pattern_model_size = extracted_model->get_graph_size(); + if (pattern_model_size < cached_model_size) { + auto meta = m_graph_cache[model_to_update]; + m_graph_cache.erase(model_to_update); + m_graph_cache.insert({extracted_model, meta}); + } +} + +void GraphCache::serialize_cache() { + for (const auto& cache_item : m_graph_cache) { + serialize_model(cache_item, "subgraph"); + } +} } // namespace subgraph_dumper } // namespace tools diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/meta/meta_info.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/meta/meta_info.cpp index fb7bdc035cb2bf..74e532dd392619 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/meta/meta_info.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/meta/meta_info.cpp @@ -53,7 +53,7 @@ void MetaInfo::serialize(const std::string& serialization_path) { model_node.append_attribute("this_op_count").set_value(static_cast(model.second.this_op_cnt)); model_node.append_attribute("total_op_count").set_value(static_cast(model.second.total_op_cnt)); for (const auto& model_path : model.second.model_paths) { - model_node.append_child("path").append_child(model_path.c_str()); + model_node.append_child("path").append_child("model").append_attribute("path").set_value(model_path.c_str()); } } double graph_priority = get_graph_priority(); @@ -82,7 +82,7 @@ void MetaInfo::update(const std::string& _model_path, size_t _total_op_cnt, const std::vector& ignored_inputs) { if (input_info.size() != _input_info.size()) { - throw std::runtime_error("Uncompatible input info!"); + throw std::runtime_error("Incompatible input info!"); } std::string model_name = get_model_name_by_path(_model_path); if (model_info.find(model_name) != model_info.end()) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/op_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/op_cache.cpp index e4e1d8dd88d8c2..486f669dbd08ab 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/op_cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/cache/op_cache.cpp @@ -62,7 +62,7 @@ void OpCache::update_cache(const std::shared_ptr& node, return; cloned_node->set_friendly_name(ov::test::functional::get_node_version(cloned_node)); for (auto &&it : m_ops_cache) { - if (m_manager.match_any(it.first, cloned_node)) { + if (m_manager.match(it.first, cloned_node)) { std::cout << "Match " << cloned_node->get_type_info().name << " " << cloned_node->get_friendly_name() << " with " << it.first->get_friendly_name() << std::endl; find_op_in_cache = it.first; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp index 2b19e50e1318e1..bd258252ede9a4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp @@ -15,7 +15,6 @@ int main(int argc, char *argv[]) { showUsage(); return 0; } - // SubgraphsDumper::ClonersMap::constant_size_threshold_mb = FLAGS_constants_size_threshold; std::vector local_cache_dirs = CommonTestUtils::splitStringByDelimiter(FLAGS_local_cache); std::vector dirs = CommonTestUtils::splitStringByDelimiter(FLAGS_input_folders); @@ -41,6 +40,9 @@ int main(int argc, char *argv[]) { caches.push_back(GraphCache::get()); } + for (auto& cache : caches) { + cache->set_serialization_dir(FLAGS_output_folder); + } std::map> cache_model_status; // Upload previously cached graphs to cache if (!FLAGS_local_cache.empty()) { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/convolutions.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/convolutions.cpp similarity index 95% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/convolutions.cpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/convolutions.cpp index 7bc6ec1108403c..30b99c580df314 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/convolutions.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/convolutions.cpp @@ -4,7 +4,7 @@ #include "openvino/op/ops.hpp" -#include "single_op_matchers/convolutions.hpp" +#include "matchers/single_op/convolutions.hpp" using namespace ov::tools::subgraph_dumper; @@ -27,7 +27,6 @@ bool ConvolutionsMatcher::match(const std::shared_ptr &node, if (cfg->ignore_matching) { return false; } - if (!same_op_type(node, ref)) { return false; } @@ -45,7 +44,7 @@ bool ConvolutionsMatcher::match(const std::shared_ptr &node, bool ConvolutionsMatcher::match_inputs(const std::shared_ptr &node, const std::shared_ptr &ref) const { - if (!BaseMatcher::match_inputs(node, ref)) { + if (!SingleOpMatcher::match_inputs(node, ref)) { return false; } bool has_groups = std::dynamic_pointer_cast(node) || diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/manager.cpp new file mode 100644 index 00000000000000..a274d0d7786701 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/manager.cpp @@ -0,0 +1,28 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matchers/single_op/manager.hpp" + +using namespace ov::tools::subgraph_dumper; + +iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr &node) const { + if (node == nullptr) return nullptr; + for (const auto &it : m_matchers) { + auto default_config = it.second->get_config(node); + if (default_config->op_in_config(node)) { + return default_config; + } + } + return nullptr; +} + +bool MatchersManager::match(const std::shared_ptr &node, + const std::shared_ptr &ref) { + for (const auto &it : m_matchers) { + if (it.second->match(node, ref)) { + return true; + } + } + return false; +} diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/base.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/single_op.cpp similarity index 79% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/base.cpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/single_op.cpp index 50cc71633fad58..437b59b1ba78d4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/base.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/single_op/single_op.cpp @@ -1,20 +1,17 @@ - // Copyright (C) 2018-2023 Intel Corporation +// Copyright (C) 2018-2023 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // -// #include "matchers/single_op.hpp" -// #include "ngraph/ops.hpp" -// #include "ngraph/validation_util.hpp" -// #include - -#include "single_op_matchers/base.hpp" -#include "common_test_utils/graph_comparator.hpp" #include "openvino/op/convolution.hpp" #include "openvino/op/group_conv.hpp" +#include "common_test_utils/graph_comparator.hpp" + +#include "matchers/single_op/single_op.hpp" + using namespace ov::tools::subgraph_dumper; -iMatcherConfig::Ptr BaseMatcher::get_config(const std::shared_ptr &node) const { +iMatcherConfig::Ptr SingleOpMatcher::get_config(const std::shared_ptr &node) const { for (const auto &cfg : default_configs) { if (cfg->op_in_config(node)) { return cfg; @@ -28,8 +25,8 @@ iMatcherConfig::Ptr BaseMatcher::get_config(const std::shared_ptr &nod return std::make_shared>(); } -bool BaseMatcher::match_inputs(const std::shared_ptr &node, - const std::shared_ptr &ref) const { +bool SingleOpMatcher::match_inputs(const std::shared_ptr &node, + const std::shared_ptr &ref) const { if (node->get_input_size() != ref->get_input_size()) { return false; } @@ -58,8 +55,8 @@ bool BaseMatcher::match_inputs(const std::shared_ptr &node, } bool -BaseMatcher::match_outputs(const std::shared_ptr &node, - const std::shared_ptr &ref) const { +SingleOpMatcher::match_outputs(const std::shared_ptr &node, + const std::shared_ptr &ref) const { if (node->get_output_size() != ref->get_output_size()) { return false; } @@ -77,14 +74,14 @@ BaseMatcher::match_outputs(const std::shared_ptr &node, return true; } -bool BaseMatcher::match_attrs(const std::shared_ptr &node, - const std::shared_ptr &ref) const { +bool SingleOpMatcher::match_attrs(const std::shared_ptr &node, + const std::shared_ptr &ref) const { // todo: iefode: to provide correct with ingored attributes return attributes::compare(node.get(), ref.get(), Comparator::CmpValues::ATTRIBUTES).valid; } -bool BaseMatcher::match(const std::shared_ptr &node, - const std::shared_ptr &ref) const { +bool SingleOpMatcher::match(const std::shared_ptr &node, + const std::shared_ptr &ref) const { const auto &cfg = get_config(node); if (match_only_configured_ops() && cfg->is_fallback_config) { return false; @@ -108,12 +105,12 @@ bool BaseMatcher::match(const std::shared_ptr &node, return true; } -bool BaseMatcher::same_op_type(const std::shared_ptr &node, - const std::shared_ptr &ref) const { +bool SingleOpMatcher::same_op_type(const std::shared_ptr &node, + const std::shared_ptr &ref) const { return node->get_type_info() == ref->get_type_info(); } -BaseMatcher::BaseMatcher() { +SingleOpMatcher::SingleOpMatcher() { default_configs = { // std::make_shared>(std::vector{}, std::vector{0}), // std::make_shared>(std::vector{}, diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/manager.cpp new file mode 100644 index 00000000000000..2964cbcebcfa25 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/manager.cpp @@ -0,0 +1,27 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "matchers/subgraph/manager.hpp" + +using namespace ov::tools::subgraph_dumper; + +bool ExtractorsManager::match(const std::shared_ptr &model, + const std::shared_ptr &ref) { + for (const auto &it : m_extractors) { + if (it.second->match(model, ref)) { + return true; + } + } + return false; +} + +std::list +ExtractorsManager::extract(const std::shared_ptr &model, bool is_extract_body) { + std::list result; + for (const auto &it : m_extractors) { + auto extracted_patterns = it.second->extract(model, is_extract_body); + result.insert(result.end(), extracted_patterns.begin(), extracted_patterns.end()); + } + return result; +} diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/subgraph.cpp new file mode 100644 index 00000000000000..95e706e88787d8 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/matchers/subgraph/subgraph.cpp @@ -0,0 +1,36 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// +#include "matchers/single_op/single_op.hpp" +#include "matchers/single_op/convolutions.hpp" +#include "matchers/single_op/manager.hpp" +#include "matchers/subgraph/subgraph.hpp" + +using namespace ov::tools::subgraph_dumper; + +bool +SubgraphExtractor::match(const std::shared_ptr &model, + const std::shared_ptr &ref_model) const { + bool res = comparator.compare(model, ref_model).valid; + if (res) { + return res; + } + std::vector> ordered_ops = model->get_ordered_ops(), + ref_ordered_ops = ref_model->get_ordered_ops(); + if (ordered_ops.size() != ref_ordered_ops.size()) + return false; + + MatchersManager::MatchersMap matchers = { + { "generic_single_op", SingleOpMatcher::Ptr(new SingleOpMatcher) }, + { "convolutions", ConvolutionsMatcher::Ptr(new ConvolutionsMatcher) }, + }; + MatchersManager manager(matchers); + for (size_t i = 0; i < ordered_ops.size(); ++i) { + if (is_node_to_skip(ordered_ops[i]) && is_node_to_skip(ref_ordered_ops[i])) + continue; + if (!manager.match(ordered_ops[i], ref_ordered_ops[i])) { + return false; + } + } + return true; +} diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/manager.cpp deleted file mode 100644 index 86eeed549a03c1..00000000000000 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/single_op_matchers/manager.cpp +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2018-2023 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 -// - -#include "single_op_matchers/manager.hpp" - -using namespace ov::tools::subgraph_dumper; - -iMatcherConfig::Ptr MatchersManager::get_config(const std::shared_ptr &node) const { - if (node == nullptr) return nullptr; - for (const auto &it : m_matchers) { - auto default_config = it.second->get_config(node); - if (default_config->op_in_config(node)) { - return default_config; - } - } - return nullptr; -} - -bool MatchersManager::match_any(const std::shared_ptr &node, - const std::shared_ptr &ref) { - for (const auto &it : m_matchers) { - if (it.second->match(node, ref)) return true; - } - return false; -} - -bool MatchersManager::match_all(const std::shared_ptr &node, - const std::shared_ptr &ref) { - const auto matches = this->run_matchers(node, ref); - return std::all_of(matches.begin(), matches.end(), [](bool i) { return i; }); -} - -std::vector MatchersManager::run_matchers(const std::shared_ptr &node, - const std::shared_ptr &ref) { - std::vector matches; - for (const auto &it : m_matchers) { - matches.push_back(it.second->match(node, ref)); - } - return matches; -} diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/model.cpp new file mode 100644 index 00000000000000..e30a2ff2be523c --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/model.cpp @@ -0,0 +1,173 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "utils/model.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +inline std::unordered_map> +update_nodes(const std::set>& nodes, + const std::shared_ptr& start_node) { + std::unordered_map> model_map; + auto cloned_op = clone_node(start_node, true, false, "Op_" + std::to_string(model_map.size())); + model_map.insert({ start_node->get_friendly_name(), cloned_op }); + + for (const auto& op : nodes) { + if (ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || + ov::op::util::is_output(op) || op == start_node) { + continue; + } + cloned_op = clone_node(op, true, false, "Op_" + std::to_string(model_map.size())); + model_map.insert({ op->get_friendly_name(), cloned_op }); + } + + for (const auto& op : nodes) { + if (ov::op::util::is_parameter(op) || ov::op::util::is_constant(op) || + ov::op::util::is_output(op) || op == start_node) { + continue; + } + auto op_name = op->get_friendly_name(); + cloned_op = model_map[op->get_friendly_name()]; + size_t inputs_size = op->inputs().size(); + ov::OutputVector in_out_vector(inputs_size); + bool is_input_filled = false; + for (size_t in_idx = 0; in_idx < inputs_size; ++in_idx) { + bool is_this_input_filled = false; + auto in_node = op->get_input_node_ptr(in_idx)->shared_from_this(); + for (size_t in_out_idx = 0; in_out_idx < in_node->outputs().size(); ++in_out_idx) { + for (const auto& target_input : in_node->output(in_out_idx).get_target_inputs()) { + auto out_in_node = target_input.get_node()->shared_from_this(); + if (out_in_node == op) { + auto in_node_name = in_node->get_friendly_name(); + in_out_vector[in_idx] = model_map.count(in_node_name) ? + model_map.at(in_node_name)->output(in_out_idx) : + cloned_op->get_input_node_ptr(in_idx)->output(0); + is_this_input_filled = true; + break; + } + } + if (is_this_input_filled) { + is_input_filled = true; + break; + } + } + } + if (!is_input_filled && op_name != start_node->get_friendly_name()) { + model_map.erase(op_name); + } else { + model_map[op_name] = cloned_op->clone_with_new_inputs(in_out_vector); + } + } + return model_map; +} + +std::pair, std::map> +generate_model(const std::set>& nodes, + const std::shared_ptr& start_node, + std::unordered_set& checked_ops) { + auto model_map = update_nodes(nodes, start_node); + if (model_map.size() < 2) { + throw std::runtime_error("Incorrect node number to create model"); + } + + ov::OutputVector results; + std::map input_info; + for (const auto& op : model_map) { + checked_ops.insert(op.first); + auto this_input_info = get_input_info_by_node(op.second); + input_info.insert(this_input_info.begin(), this_input_info.end()); + for (size_t j = 0; j < op.second->outputs().size(); ++j) { + if (op.second->output(j).get_target_inputs().empty()) { + results.push_back(std::make_shared(op.second->output(j))); + } + } + } + return { std::make_shared(results), input_info }; +} + +void save_model_status_to_file(const std::map>& caching_status, + const std::string& output_dir) { + std::string cache_status_path = ov::util::path_join({output_dir, "model_caching_status"}); + if (!ov::util::directory_exists(cache_status_path)) { + ov::util::create_directory_recursive(cache_status_path); + } + for (const auto& status_info : caching_status) { + std::string output_file_path = ov::util::path_join({ cache_status_path, model_cache_status_to_str[status_info.first] + CommonTestUtils::LST_EXTENSION}); + CommonTestUtils::vec2File(status_info.second, output_file_path); + } +} + +std::vector find_models(const std::vector &dirs, const std::string& regexp) { + std::vector models, full_content; + for (const auto& dir : dirs) { + std::vector dir_content; + if (ov::util::directory_exists(dir)) { + dir_content = CommonTestUtils::getFileListByPatternRecursive({dir}, FROTEND_REGEXP); + } else if (ov::util::file_exists(dir) && std::regex_match(dir, std::regex(".*" + std::string(CommonTestUtils::LST_EXTENSION)))) { + dir_content = CommonTestUtils::readListFiles({dir}); + } else { + std::string msg = "Input directory (" + dir + ") doesn't not exist!"; + throw std::runtime_error(msg); + } + if (!dir_content.empty()) { + full_content.insert(full_content.end(), dir_content.begin(), dir_content.end()); + } + } + auto in_regex = std::regex(regexp); + for (const auto& file : full_content) { + if (std::regex_match(file, in_regex)) { + try { + models.emplace_back(file); + } catch (std::exception& e) { + std::cout << "Impossible to read model: " << file << std::endl << "Exception: " << e.what(); + } + } + } + return models; +} + +std::map> cache_models( + std::vector>& caches, + const std::vector& models, + bool extract_body) { + std::map> cache_status = { + { ModelCacheStatus::SUCCEED, {} }, + { ModelCacheStatus::NOT_FULLY_CACHED, {} }, + { ModelCacheStatus::NOT_READ, {} } + }; + auto core = ov::test::utils::PluginCache::get().core(); + + for (auto& cache : caches) { + for (const auto& model : models) { + if (ov::util::file_exists(model)) { + std::cout << "Processing model: " << model << std::endl; + ModelCacheStatus model_status = ModelCacheStatus::SUCCEED; + try { + std::shared_ptr function = core->read_model(model); + try { + for (auto& cache : caches) { + cache->update_cache(function, model, extract_body); + } + } catch (std::exception &e) { + std::cout << "Model processing failed with exception:" << std::endl << e.what() << std::endl; + model_status = ModelCacheStatus::NOT_FULLY_CACHED; + } + } catch (std::exception &e) { + model_status = ModelCacheStatus::NOT_READ; + std::cout << "Model reading failed with exception:" << std::endl << e.what() << std::endl; + } + cache_status[model_status].push_back(model); + } + } + cache->serialize_cache(); + cache->reset_cache(); + } + return cache_status; +} + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/node.cpp new file mode 100644 index 00000000000000..22ea97a37fbcbb --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/utils/node.cpp @@ -0,0 +1,155 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "utils/node.hpp" + +namespace ov { +namespace tools { +namespace subgraph_dumper { + +std::map get_input_info_by_node(const std::shared_ptr& node) { + std::map input_info; + for (size_t port_id = 0; port_id < node->get_input_size(); ++port_id) { + InputInfo in_info; + std::shared_ptr input_node = node->get_input_node_shared_ptr(port_id); + std::string input_name = input_node->get_friendly_name(); + if (std::dynamic_pointer_cast(input_node)) { + auto const_node = + std::dynamic_pointer_cast(input_node); + in_info.is_const = true; + switch (node->get_output_element_type(0)) { + case ov::element::Type_t::boolean: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::bf16: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f16: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f32: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::f64: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i8: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i16: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i32: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::i64: { + in_info.ranges = get_const_ranges(const_node); + break; + } + // TODO cast_vector doesn't support u1 now + // case ov::element::Type_t::u1: + // return get_const_ranges(const_node); + case ov::element::Type_t::u8: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u16: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u32: { + in_info.ranges = get_const_ranges(const_node); + break; + } + case ov::element::Type_t::u64: { + in_info.ranges = get_const_ranges(const_node); + break; + } + default: { + std::cout << "Can't get ranges.. Unsupported data type" << std::endl; + break; + }} + } + if (ov::op::util::is_parameter(input_node) || ov::op::util::is_constant(input_node)) { + input_info.insert({ input_name, in_info }); + } + } + return input_info; +} + +std::shared_ptr clone_node(std::shared_ptr node, + bool is_save_const, + bool is_copy_const_node, + std::string node_name) { + // pass::Manager pass_manager; + // pass_manager.register_pass(); + // auto model = std::make_shared(node); + // pass_manager.run_passes(model, node); + + bool has_parameters = false; + ov::OutputVector inputs; + inputs.resize(node->get_input_size()); + if (node_name.empty()) { + node_name = ov::test::functional::get_node_version(node); + } + for (size_t i = 0; i < node->get_input_size(); ++i) { + std::string input_name = node_name + "_" + std::to_string(i); + // todo: replace deprecated code && remove this w/a for constant size + OPENVINO_SUPPRESS_DEPRECATED_START + const auto constant_input = ov::get_constant_from_source(node->input(i).get_source_output()); + OPENVINO_SUPPRESS_DEPRECATED_END + if (constant_input) { + if (is_save_const || constant_input->get_byte_size() <= 1024) { + auto in_const = std::make_shared(constant_input->get_element_type(), + constant_input->get_shape(), + constant_input->get_data_ptr()); + in_const->set_friendly_name(input_name); + inputs[i] = in_const; + continue; + } + } + has_parameters = true; + auto param = + std::make_shared(node->get_input_element_type(i), node->get_input_partial_shape(i)); + param->set_friendly_name(input_name); + inputs[i] = param; + } + if (!has_parameters && !is_copy_const_node) { + auto cloned_node = clone_node(node, true, true); + std::cout << "The operation: " + node->get_friendly_name() + " does not have parameters! Replace first input to parameter!" << std::endl; + auto param = + std::make_shared(cloned_node->get_input_element_type(0), cloned_node->get_input_partial_shape(0)); + std::string param_name = node_name + "_0"; + param->set_friendly_name(param_name); + auto node_to_replace = cloned_node->get_input_node_shared_ptr(0); + ov::replace_node(node_to_replace, param); + return cloned_node; + } + std::shared_ptr cloned_node = node->clone_with_new_inputs(inputs); + cloned_node->set_friendly_name(node_name); + return cloned_node; +} + +std::shared_ptr generate_model_by_node(const std::shared_ptr& node) { + static size_t model_cnt = 0; + auto cloned_node = clone_node(node); + ov::OutputVector results; + for (auto& out : cloned_node->outputs()) { + results.push_back(std::make_shared(out)); + } + auto model = std::make_shared(results); + model->set_friendly_name(cloned_node->get_friendly_name() + "_" + std::to_string(model_cnt++)); + return model; +} + +} // namespace subgraph_dumper +} // namespace tools +} // namespace ov \ No newline at end of file diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/CMakeLists.txt b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/CMakeLists.txt index ef22845bf907bb..62724fef8b2c82 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/CMakeLists.txt +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/CMakeLists.txt @@ -13,6 +13,7 @@ addIeTargetTest( ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/src/main.cpp INCLUDES ${OpenVINO_SOURCE_DIR}/src/tests/functional/plugin/conformance/subgraphs_dumper_new/include + ${CMAKE_CURRENT_SOURCE_DIR}/ LINK_LIBRARIES PRIVATE func_test_utils diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/cache.cpp index 7ada50c0eff43e..1d2e0c4e35a934 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/cache.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/cache.cpp @@ -28,7 +28,7 @@ class ICacheUnitTest : public ::testing::Test, void SetUp() override { model_name = "test_model"; - test_artifacts_dir = ov::util::path_join({CommonTestUtils::getCurrentWorkingDir(), "test_artifacts"}); + test_artifacts_dir = "test_artifacts"; test_model_path = ov::util::path_join({ test_artifacts_dir, model_name + ".xml" }); ov::util::create_directory_recursive(test_artifacts_dir); { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/graph_cache.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/graph_cache.cpp new file mode 100644 index 00000000000000..e029c5d8d71f3e --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/graph_cache.cpp @@ -0,0 +1,103 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "gtest/gtest.h" + +#include "openvino/op/ops.hpp" +#include "openvino/util/file_util.hpp" +#include "openvino/op/util/op_types.hpp" + +#include "common_test_utils/file_utils.hpp" +#include "common_test_utils/graph_comparator.hpp" + +#include "cache/graph_cache.hpp" +#include "utils/node.hpp" +#include "test_models/model_0.hpp" +#include "test_models/model_1.hpp" +#include "test_models/model_2.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +// ====================== Graph Cache Functional tests ============================== + +class GraphCacheFuncTest : public ::testing::Test { +protected: + std::shared_ptr test_model; + std::string test_artifacts_dir, test_model_name, test_model_path; + + void SetUp() override { + test_model_name = "test_model_name"; + test_artifacts_dir = ov::util::path_join({CommonTestUtils::getCurrentWorkingDir(), "test_artifacts"}); + test_model_path = ov::util::path_join({test_artifacts_dir, test_model_name + ".xml"}); + ov::util::create_directory_recursive(test_artifacts_dir); + { + Model_0 test; + test_model = test.get(); + test_model->set_friendly_name(test_model_name); + } + }; + + void TearDown() override { + CommonTestUtils::removeDir(test_artifacts_dir); + GraphCache::reset(); + } +}; + +TEST_F(GraphCacheFuncTest, get_graph_cache) { + std::shared_ptr graph_cache = nullptr; + EXPECT_NO_THROW(graph_cache = ov::tools::subgraph_dumper::GraphCache::get()); + ASSERT_NE(graph_cache, nullptr); +} + +TEST_F(GraphCacheFuncTest, get_graph_cache_twice) { + std::shared_ptr graph_cache_0 = nullptr, graph_cache_1 = nullptr; + graph_cache_0 = ov::tools::subgraph_dumper::GraphCache::get(); + graph_cache_1 = ov::tools::subgraph_dumper::GraphCache::get(); + ASSERT_EQ(graph_cache_0, graph_cache_1); +} + +TEST_F(GraphCacheFuncTest, update_cache) { + auto graph_cache = ov::tools::subgraph_dumper::GraphCache::get(); + ASSERT_NO_THROW(graph_cache->update_cache(test_model, test_model_path, true)); + ASSERT_NO_THROW(graph_cache->update_cache(test_model, test_model_path, true)); +} + +TEST_F(GraphCacheFuncTest, serialize_cache) { + auto graph_cache = ov::tools::subgraph_dumper::GraphCache::get(); + graph_cache->set_serialization_dir(test_artifacts_dir); + ASSERT_NO_THROW(graph_cache->serialize_cache()); +} + +// ====================== Graph Cache Unit tests ============================== + +class GraphCacheUnitTest : public GraphCacheFuncTest, + public virtual GraphCache { +protected: + std::shared_ptr convert_node; + MetaInfo test_meta; + + void SetUp() override { + GraphCacheFuncTest::SetUp(); + } +}; + +TEST_F(GraphCacheUnitTest, update_cache_by_graph) { + // const std::shared_ptr& model, const std::string& model_path, + // const std::map& input_info, size_t model_op_cnt + Model_2 test; + auto model_to_cache = test.get(); + std::map in_info; + for (const auto& op : model_to_cache->get_ordered_ops()) { + if (ov::op::util::is_parameter(op)) { + in_info.insert({ op->get_friendly_name(), InputInfo()}); + } + } + this->update_cache(model_to_cache, test_model_path, in_info, model_to_cache->get_ordered_ops().size()); + ASSERT_EQ(m_graph_cache.size(), 1); +} +} // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/meta.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/meta.cpp index 154d2a76e721e3..6bf60c76f4fc48 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/meta.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/cache/meta.cpp @@ -5,7 +5,6 @@ #include #include "gtest/gtest.h" - #include "pugixml.hpp" #include "openvino/openvino.hpp" @@ -59,7 +58,7 @@ TEST_F(ModelInfoFuncTest, constructor) { // ======================== Meta Info Functional tests ============================================= -class MetaInfoFuncTest : public ::testing::Test{ +class MetaInfoFuncTest : public ::testing::Test { protected: std::string test_model_path, test_model_name; std::map test_in_info; @@ -148,7 +147,7 @@ TEST_F(MetaInfoUnitTest, serialize) { ASSERT_EQ(model_info[model_name_xml].total_op_cnt, model_xml.attribute("total_op_count").as_uint()); auto paths = model_info[model_name_xml].model_paths; for (const auto& path_xml : model_xml.child("path")) { - auto path_xml_value = std::string(path_xml.name()); + auto path_xml_value = std::string(path_xml.attribute("path").value()); ASSERT_NE(std::find(paths.begin(), paths.end(), path_xml_value), paths.end()); } } diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/convolutions_matcher.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/convolutions_matcher.cpp similarity index 99% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/convolutions_matcher.cpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/convolutions_matcher.cpp index cf147caccb9f2a..df1c85a990427e 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/convolutions_matcher.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/convolutions_matcher.cpp @@ -4,7 +4,7 @@ #include "gtest/gtest.h" -#include "single_op_matchers/convolutions.hpp" +#include "matchers/single_op/convolutions.hpp" #include "openvino/op/ops.hpp" namespace { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/generic_single_op.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/generic_single_op.cpp similarity index 97% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/generic_single_op.cpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/generic_single_op.cpp index c322b1bc39bd14..b2c9b3e57499d6 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/generic_single_op.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/generic_single_op.cpp @@ -6,7 +6,7 @@ #include "openvino/op/ops.hpp" -#include "single_op_matchers/base.hpp" +#include "matchers/single_op/single_op.hpp" namespace { @@ -15,10 +15,10 @@ using namespace ov::tools::subgraph_dumper; class SingleOpMatcherTest : public ::testing::Test { protected: void SetUp() override { - matcher = BaseMatcher(); + matcher = SingleOpMatcher(); } - BaseMatcher matcher; + SingleOpMatcher matcher; }; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/manager.cpp new file mode 100644 index 00000000000000..26bf8ab94058ef --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/manager.cpp @@ -0,0 +1,65 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "openvino/op/abs.hpp" +#include "openvino/op/parameter.hpp" + +#include "matchers/single_op/manager.hpp" +#include "matchers/single_op/single_op.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +// ======================= MatcherManager Unit tests ======================= +class MatchersManagerTest : public MatchersManager, + public ::testing::Test { +protected: + MatchersManager::MatchersMap test_map; + std::shared_ptr test_abs; + std::shared_ptr test_parameter; + + void SetUp() override { + test_map = { + { "test_matcher", SingleOpMatcher::Ptr(new SingleOpMatcher) }, + }; + test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_abs = + std::make_shared(test_parameter); + } +}; + +TEST_F(MatchersManagerTest, constructor) { + ASSERT_NO_THROW(auto m = MatchersManager()); + ASSERT_NO_THROW(auto m = MatchersManager(test_map)); +} + +TEST_F(MatchersManagerTest, set_matchers) { + ASSERT_NO_THROW(this->set_matchers(test_map)); + ASSERT_EQ(this->m_matchers, test_map); +} + +TEST_F(MatchersManagerTest, get_matchers) { + ASSERT_NO_THROW(this->set_matchers(test_map)); + ASSERT_NO_THROW(this->get_matchers()); + ASSERT_EQ(this->m_matchers, this->get_matchers()); +} + +TEST_F(MatchersManagerTest, get_config) { + ASSERT_NO_THROW(this->get_config(test_abs)); +} + +TEST_F(MatchersManagerTest, match) { + this->set_matchers(test_map); + ASSERT_NO_THROW(this->match(test_parameter, test_abs)); + ASSERT_NO_THROW(this->match(test_abs, test_abs)); + ASSERT_TRUE(this->match(test_abs, test_abs)); + ASSERT_TRUE(this->match(test_parameter, test_parameter)); + ASSERT_FALSE(this->match(test_parameter, test_abs)); +} + +} // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/matchers_config.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/matchers_config.cpp similarity index 98% rename from src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/matchers_config.cpp rename to src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/matchers_config.cpp index b88a60c71ab263..91a625541d7ff4 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/matchers_config.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/single_op/matchers_config.cpp @@ -3,7 +3,7 @@ // #include "gtest/gtest.h" -#include "single_op_matchers/base.hpp" +#include "matchers/single_op/single_op.hpp" #include "openvino/op/ops.hpp" namespace { diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/manager.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/manager.cpp new file mode 100644 index 00000000000000..5107b20bdc2674 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/manager.cpp @@ -0,0 +1,95 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "matchers/subgraph/manager.hpp" +#include "matchers/subgraph/subgraph.hpp" + + +#include "openvino/op/abs.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +// ======================= ExtractorsManagerTest Unit tests ======================= +class ExtractorsManagerTest : public ExtractorsManager, + public ::testing::Test { +protected: + void SetUp() override { + test_map = { + { "test_matcher", SubgraphExtractor::Ptr(new SubgraphExtractor) }, + }; + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + } + + ExtractorsManager::ExtractorsMap test_map; + std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; +}; + +TEST_F(ExtractorsManagerTest, constructor) { + ASSERT_NO_THROW(auto m = ExtractorsManager()); + ASSERT_NO_THROW(auto m = ExtractorsManager(test_map)); +} + +TEST_F(ExtractorsManagerTest, set_extractors) { + ASSERT_NO_THROW(this->set_extractors(test_map)); + ASSERT_EQ(this->m_extractors, test_map); +} + +TEST_F(ExtractorsManagerTest, get_extractors) { + ASSERT_NO_THROW(this->set_extractors(test_map)); + ASSERT_NO_THROW(this->get_extractors()); + ASSERT_EQ(this->m_extractors, this->get_extractors()); +} + +TEST_F(ExtractorsManagerTest, match) { + this->set_extractors(test_map); + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); + ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); + ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); +} + +TEST_F(ExtractorsManagerTest, extract) { + this->set_extractors(test_map); + ASSERT_NO_THROW(this->extract(test_model_0_0)); +} + +} // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/subgraph.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/subgraph.cpp new file mode 100644 index 00000000000000..417d47e95944be --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/matchers/subgraph/subgraph.cpp @@ -0,0 +1,73 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" + +#include "matchers/subgraph/subgraph.hpp" + +#include "openvino/op/abs.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +// ======================= ExtractorsManagerTest Unit tests ======================= +class SubgraphExtractorTest : public SubgraphExtractor, + public ::testing::Test { +protected: + void SetUp() override { + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_0 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_0_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + { + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter); + std::shared_ptr test_res = + std::make_shared(test_abs); + test_model_1 = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter}); + } + } + + std::shared_ptr test_model_0_0, test_model_0_1, test_model_1; +}; + +TEST_F(SubgraphExtractorTest, match) { + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_0_1)); + ASSERT_TRUE(this->match(test_model_0_0, test_model_0_1)); + ASSERT_NO_THROW(this->match(test_model_0_0, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_0, test_model_1)); + ASSERT_NO_THROW(this->match(test_model_0_1, test_model_1)); + ASSERT_FALSE(this->match(test_model_0_1, test_model_1)); +} + +TEST_F(SubgraphExtractorTest, extract) { + ASSERT_NO_THROW(this->extract(test_model_0_0)); + ASSERT_NO_THROW(this->extract(test_model_0_1)); + ASSERT_NO_THROW(this->extract(test_model_1)); +} + +} // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_0.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_0.hpp new file mode 100644 index 00000000000000..43f49506ee40c8 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_0.hpp @@ -0,0 +1,77 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/abs.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +class Model_0 { +public: + Model_0() { + // param param + // | | + // abs abs + // | | + // relu relu + // | | + // ------------ + // | + // add + // | + // result + size_t op_idx = 0; + + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter_0); + std::shared_ptr test_relu_0 = + std::make_shared(test_abs_0); + + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + std::shared_ptr test_abs_1 = + std::make_shared(test_parameter_1); + std::shared_ptr test_relu_1 = + std::make_shared(test_abs_1); + + std::shared_ptr test_add_0 = + std::make_shared(test_relu_0, test_relu_1); + std::shared_ptr test_res = + std::make_shared(test_add_0); + model = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter_0, test_parameter_1}); + } + + std::shared_ptr get() { + return model; + } + + std::vector> get_repeat_pattern_ref() { + std::vector> ref; + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter_0); + std::shared_ptr test_relu_0 = + std::make_shared(test_abs_0); + std::shared_ptr res = + std::make_shared(test_relu_0); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0}); + ref.push_back(ref_model); + } + return ref; + } + +protected: + std::shared_ptr model; +}; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_1.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_1.hpp new file mode 100644 index 00000000000000..2af5be4cea1301 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_1.hpp @@ -0,0 +1,227 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/abs.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +class Model_1 { +public: + Model_1() { + // param param param param + // | | | | + // abs abs abs abs + // | | | | + // relu clamp relu clamp + // | | | | + // ------------ ------------ + // | | + // add add param param + // | | | | + // ------------------------------- -------------- + // | | + // Multiply multiply + // | | + // Relu Relu + // | | + // ------------------------------------------- + // | + // Multiply + // | + // result + size_t op_idx = 0; + + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_parameter_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter_0); + test_abs_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_relu_0 = + std::make_shared(test_abs_0); + test_relu_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_parameter_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_abs_1 = + std::make_shared(test_parameter_1); + test_abs_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_clamp_1 = + std::make_shared(test_abs_1, 0, 10); + test_clamp_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_add_0 = + std::make_shared(test_relu_0, test_clamp_1); + test_add_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_parameter_0_0 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + test_parameter_0_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_abs_0_0 = + std::make_shared(test_parameter_0_0); + test_abs_0_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_relu_0_0 = + std::make_shared(test_abs_0_0); + test_relu_0_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_parameter_0_1 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + test_parameter_0_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_abs_0_1 = + std::make_shared(test_parameter_0_1); + test_abs_0_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_clamp_0_1 = + std::make_shared(test_abs_0_1, 0, 10); + test_clamp_0_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_add_0_0 = + std::make_shared(test_relu_0_0, test_clamp_0_1); + test_add_0_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_multiply_0_0 = + std::make_shared(test_add_0, test_add_0_0); + test_multiply_0_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_relu_0_1 = + std::make_shared(test_multiply_0_0); + test_relu_0_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_parameter_1_0 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + test_parameter_1_0->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_parameter_1_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + test_parameter_1_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_multiply_1_1 = + std::make_shared(test_parameter_1_0, test_parameter_1_1); + test_multiply_1_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + std::shared_ptr test_relu_1_1 = + std::make_shared(test_multiply_1_1); + test_relu_1_1->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_add = + std::make_shared(test_relu_0_1, test_relu_1_1); + test_add->set_friendly_name("Op_" + std::to_string(op_idx++)); + + std::shared_ptr test_res = + std::make_shared(test_add); + test_res->set_friendly_name("Op_" + std::to_string(op_idx++)); + model = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter_0, test_parameter_1, + test_parameter_0_0, test_parameter_0_1, + test_parameter_1_0, test_parameter_1_1}); + } + + std::shared_ptr get() { + return model; + } + + std::vector> get_repeat_pattern_ref() { + std::vector> ref; + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter_0); + std::shared_ptr test_relu_0 = + std::make_shared(test_abs_0); + std::shared_ptr res = + std::make_shared(test_relu_0); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0}); + ref.push_back(ref_model); + } + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_0 = + std::make_shared(test_parameter_0); + std::shared_ptr test_relu_0 = + std::make_shared(test_abs_0); + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + std::shared_ptr test_add = + std::make_shared(test_relu_0, test_parameter_1); + std::shared_ptr res = + std::make_shared(test_add); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0, test_parameter_1}); + ref.push_back(ref_model); + } + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_1 = + std::make_shared(test_parameter_1); + std::shared_ptr test_clamp_1 = + std::make_shared(test_abs_1, 0, 10); + std::shared_ptr res = + std::make_shared(test_clamp_1); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0, test_parameter_1}); + ref.push_back(ref_model); + } + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_1 = + std::make_shared(test_parameter_1); + std::shared_ptr test_clamp_1 = + std::make_shared(test_abs_1, 0, 10); + std::shared_ptr test_add = + std::make_shared(test_parameter_0, test_clamp_1); + std::shared_ptr res = + std::make_shared(test_add); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0, test_parameter_1}); + ref.push_back(ref_model); + } + { + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_parameter_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_abs_1 = + std::make_shared(test_parameter_1); + std::shared_ptr test_clamp_1 = + std::make_shared(test_abs_1, 0, 10); + std::shared_ptr test_add = + std::make_shared(test_parameter_0, test_clamp_1); + std::shared_ptr res = + std::make_shared(test_add); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_0, test_parameter_1}); + ref.push_back(ref_model); + } + { + std::shared_ptr test_parameter_1_0 = + std::make_shared(ov::element::f32, ov::Shape{2, 1}); + std::shared_ptr test_parameter_1_1 = + std::make_shared(ov::element::f32, ov::Shape{1, 2}); + std::shared_ptr test_multiply_1 = + std::make_shared(test_parameter_1_0, test_parameter_1_1); + std::shared_ptr test_relu_1 = + std::make_shared(test_multiply_1); + std::shared_ptr res = + std::make_shared(test_relu_1); + auto ref_model = std::make_shared(ov::ResultVector{res}, + ov::ParameterVector{test_parameter_1_0, test_parameter_1_1}); + } + return ref; + } + +protected: + std::shared_ptr model; +}; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_2.hpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_2.hpp new file mode 100644 index 00000000000000..72d28924354595 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/test_models/model_2.hpp @@ -0,0 +1,63 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/abs.hpp" +#include "openvino/op/add.hpp" +#include "openvino/op/clamp.hpp" +#include "openvino/op/multiply.hpp" +#include "openvino/op/relu.hpp" +#include "openvino/op/parameter.hpp" +#include "openvino/op/result.hpp" + +class Model_2 { +public: + Model_2() { + // param + // | + // abs + // | + // clamp + // | + // relu + // | + // abs param + // | | + // ------------- + // | + // add + // | + // result + std::shared_ptr test_parameter = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_parameter_0 = + std::make_shared(ov::element::f32, ov::Shape{2, 5}); + std::shared_ptr test_abs = + std::make_shared(test_parameter_0); + std::shared_ptr test_clamp = + std::make_shared(test_abs, 0, 10); + std::shared_ptr test_relu = + std::make_shared(test_clamp); + std::shared_ptr test_abs_1 = + std::make_shared(test_relu); + std::shared_ptr test_add = + std::make_shared(test_abs_1, test_parameter_0); + std::shared_ptr test_res = + std::make_shared(test_add); + model = std::make_shared(ov::ResultVector{test_res}, + ov::ParameterVector{test_parameter, test_parameter_0}); + } + + std::shared_ptr get() { + return model; + } + + std::vector> get_repeat_pattern_ref() { + return {}; + } + +protected: + std::shared_ptr model; +}; diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/model.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/model.cpp new file mode 100644 index 00000000000000..3f8c93e5afb4a9 --- /dev/null +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/model.cpp @@ -0,0 +1,121 @@ +// Copyright (C) 2018-2023 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "openvino/op/util/op_types.hpp" +#include "utils/model.hpp" +#include "matchers/subgraph/subgraph.hpp" +#include "test_models/model_0.hpp" +#include "test_models/model_1.hpp" +#include "test_models/model_2.hpp" + +namespace { + +using namespace ov::tools::subgraph_dumper; + +inline std::pair, std::set>> +get_functional_ops(const std::shared_ptr& model) { + std::shared_ptr start_node = nullptr; + // todo: check get_ordered_ops (work diffent compilation by compilation) and remove his code after + std::set> nodes; + std::vector> nodes_tmp; + std::vector> layer; + + for (const auto& res : model->get_results()) { + for (size_t i = 0; i < res->inputs().size(); ++i) { + layer.push_back(res->get_input_node_shared_ptr(i)); + } + } + while (!layer.empty()) { + std::vector> prev_layer; + nodes_tmp.insert(nodes_tmp.begin(), layer.begin(), layer.end()); + for (const auto& op : layer) { + for (size_t i = 0; i < op->inputs().size(); ++i) + prev_layer.push_back(op->get_input_node_shared_ptr(i)); + } + layer = prev_layer; + } + for (const auto& node : nodes_tmp) { + if (ov::op::util::is_parameter(node) || ov::op::util::is_output(node)) { + continue; + } + if (start_node == nullptr) { + start_node = node; + } + nodes.insert(node); + } + + // for (const auto& op : model->get_ordered_ops()) { + // if (ov::op::util::is_parameter(op) || ov::op::util::is_output(op)) { + // continue; + // } + // if (start_node == nullptr) { + // start_node = op; + // } + // nodes.insert(op); + // } + return { start_node, nodes }; +} + +TEST(ModelUtilsTest, generate_0) { + Model_0 test; + std::shared_ptr test_model = test.get(), recovered_model; + { + std::unordered_set checked_ops; + auto func_ops = get_functional_ops(test_model); + auto model_with_in_info = generate_model(func_ops.second, func_ops.first, checked_ops); + recovered_model = model_with_in_info.first; + for (const auto& op : recovered_model->get_ordered_ops()) { + if (ov::op::util::is_parameter(op) || ov::op::util::is_constant(op)) { + ASSERT_TRUE(model_with_in_info.second.count(op->get_friendly_name())); + } + } + } + { + SubgraphExtractor extractor; + ASSERT_TRUE(extractor.match(test_model, recovered_model)); + } +} + +TEST(ModelUtilsTest, generate_1) { + Model_1 test; + std::shared_ptr test_model = test.get(), recovered_model; + { + std::unordered_set checked_ops; + auto func_ops = get_functional_ops(test_model); + auto model_with_in_info = generate_model(func_ops.second, func_ops.first, checked_ops); + recovered_model = model_with_in_info.first; + for (const auto& op : recovered_model->get_ordered_ops()) { + if (ov::op::util::is_parameter(op) || ov::op::util::is_constant(op)) { + ASSERT_TRUE(model_with_in_info.second.count(op->get_friendly_name())); + } + } + } + { + SubgraphExtractor extractor; + ASSERT_TRUE(extractor.match(test_model, recovered_model)); + } +} + +TEST(ModelUtilsTest, generate_2) { + Model_2 test; + std::shared_ptr test_model = test.get(), recovered_model; + { + std::unordered_set checked_ops; + auto func_ops = get_functional_ops(test_model); + auto model_with_in_info = generate_model(func_ops.second, func_ops.first, checked_ops); + recovered_model = model_with_in_info.first; + for (const auto& op : recovered_model->get_ordered_ops()) { + if (ov::op::util::is_parameter(op) || ov::op::util::is_constant(op)) { + ASSERT_TRUE(model_with_in_info.second.count(op->get_friendly_name())); + } + } + } + { + SubgraphExtractor extractor; + ASSERT_TRUE(extractor.match(test_model, recovered_model)); + } +} + +} // namespace diff --git a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/node.cpp b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/node.cpp index 5f09dda454402c..0fa65b123418c2 100644 --- a/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/node.cpp +++ b/src/tests/functional/plugin/conformance/subgraphs_dumper_new/tests/utils/node.cpp @@ -37,10 +37,10 @@ TEST(NodeUtilsTest, get_input_info_by_node) { } TEST(NodeUtilsTest, clone_node) { - std::vector values = {-1, -2.05, -3.65, 0, 5, 7}; - auto const_node = std::make_shared(ov::element::Type_t::f32, ov::Shape({2, 3}), values); + std::vector values(512, 1.f); + auto const_node = std::make_shared(ov::element::Type_t::f32, ov::Shape({2, 256}), values); const_node->set_friendly_name("const_0"); - auto param = std::make_shared(ov::element::Type_t::f32, ov::Shape({2, 3})); + auto param = std::make_shared(ov::element::Type_t::f32, ov::Shape({2, 256})); param->set_friendly_name("param_0"); auto add_node_0 = std::make_shared(param, const_node); auto erf_node_0 = std::make_shared(add_node_0); diff --git a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp index 5bc0d10899635d..50632a0a3b8581 100644 --- a/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp +++ b/src/tests/functional/plugin/conformance/test_runner/op_conformance_runner/src/read_ir/read_ir.cpp @@ -448,4 +448,3 @@ std::vector ReadIRTest::calculate_refs() { } // namespace test } // namespace ov - diff --git a/tests/layer_tests/common/mo_convert_test_class.py b/tests/layer_tests/common/mo_convert_test_class.py index f49e9d2fb43a1e..bbcece398af43a 100644 --- a/tests/layer_tests/common/mo_convert_test_class.py +++ b/tests/layer_tests/common/mo_convert_test_class.py @@ -3,7 +3,8 @@ from pathlib import Path -from openvino.runtime import serialize, convert_model +from openvino.runtime import serialize +from openvino.tools.ovc import convert_model from openvino.tools.mo import convert_model as legacy_convert_model from openvino.test_utils import compare_functions @@ -16,7 +17,10 @@ def generate_ir_python_api(**kwargs): output_dir = kwargs['output_dir'] model_name = kwargs['model_name'] del kwargs['output_dir'] - if 'use_legacy_frontend' in kwargs: + del kwargs['model_name'] + if 'use_legacy_frontend' in kwargs or 'use_convert_model_from_mo' in kwargs: + if 'use_convert_model_from_mo' in kwargs: + del kwargs['use_convert_model_from_mo'] model = legacy_convert_model(**kwargs) else: model = convert_model(**kwargs) diff --git a/tests/layer_tests/common/utils/common_utils.py b/tests/layer_tests/common/utils/common_utils.py index 33f3bc30b6291e..a2571ad3519146 100644 --- a/tests/layer_tests/common/utils/common_utils.py +++ b/tests/layer_tests/common/utils/common_utils.py @@ -7,14 +7,13 @@ import subprocess import sys from pathlib import Path - import numpy as np -from openvino.tools.mo import mo logger = logging.getLogger(__name__) def generate_ir(coverage=False, **kwargs): + from openvino.tools.mo import mo mo_path = Path(mo.__file__).parent mo_runner = mo_path.joinpath('main.py').as_posix() if coverage: @@ -48,15 +47,13 @@ def generate_ir(coverage=False, **kwargs): def generate_ir_python_api(coverage=False, **kwargs): - from openvino.runtime import convert_model, serialize - from openvino.tools.mo import convert_model as legacy_convert_model - - if "use_legacy_frontend" in kwargs and kwargs['use_legacy_frontend']: - ov_model = legacy_convert_model(**kwargs) - else: - ov_model = convert_model(**kwargs) + from openvino.runtime import serialize + from openvino.tools.mo import convert_model out_dir = kwargs['output_dir'] + os.sep + kwargs['model_name'] + ".xml" + + # TODO: Remove usage of legacy params from layer tests and switch to convert_model from tools.ovc + ov_model = convert_model(**kwargs) serialize(ov_model, out_dir) return 0, "" diff --git a/tests/layer_tests/mo_python_api_tests/mo_convert_help.py b/tests/layer_tests/mo_python_api_tests/mo_convert_help.py index 639e2478819be2..ff9c334b61461b 100644 --- a/tests/layer_tests/mo_python_api_tests/mo_convert_help.py +++ b/tests/layer_tests/mo_python_api_tests/mo_convert_help.py @@ -1,7 +1,7 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from openvino.runtime import convert_model +from openvino.tools.mo import convert_model if __name__ == "__main__": convert_model(help=True) diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py index c2400a5abc0123..75cb3c52b99b32 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_complex_params.py @@ -4,7 +4,9 @@ import numpy as np import os import pytest -from openvino.runtime import Model, Layout, PartialShape, Shape, layout_helpers, Type, Dimension, InputCutInfo, LayoutMap +from openvino.runtime import Model, Layout, PartialShape, Shape, layout_helpers, Type, Dimension +from openvino.tools.ovc import InputCutInfo +from openvino.tools.mo import LayoutMap from common.mo_convert_test_class import CommonMOConvertTest from common.tf_layer_test_class import save_to_pb @@ -132,11 +134,11 @@ def create_tf_param_res_model(self, tmp_dir): {'params_test': {'input_shape': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]], - 'input':['Input1', 'Input2', 'Relu3']}, + 'input':['Input1', 'Input2', 'Relu3'], 'use_convert_model_from_mo': True}, 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Relu3'}}, {'params_test': {'input_shape': [PartialShape([Dimension(), Dimension(1, 3), Dimension(4, -1), Dimension(-1, 5)]), [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)], - [Dimension(), 3, Dimension(4, -1), Dimension(-1, 5)]], + [Dimension(), 3, Dimension(4, -1), Dimension(-1, 5)]], 'use_convert_model_from_mo': True, 'input':['Input1', 'Input2', 'Relu3']}, 'params_ref': {'input_shape': "[?,1..3,4..,..5],[?,1..3,4,..5],[?,3,4..,..5]", 'input': 'Input1,Input2,Relu3'}}, {'params_test': {'input': [InputCutInfo("Relu1", Shape([3, 2]), Type(np.int32)), @@ -149,26 +151,28 @@ def create_tf_param_res_model(self, tmp_dir): 'params_ref': {'input': "Relu1[3 2]{i32},Relu2[3..10 2..]{i32},Relu3[3 2]{i32}"}}, {'params_test': {'output': ["Sigmoid_0", "Sigmoid_2"]}, 'params_ref': {'output': "Sigmoid_0,Sigmoid_2"}}, - {'params_test': {'mean_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}}, + {'params_test': {'mean_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}, + 'use_convert_model_from_mo': True}, 'params_ref': {'mean_values': "Input1[0.5,1.3,0.67],Input2[4.2,6.7,3.15],Input3[0.757,4.6,7.3]"}}, {'params_test': { - 'mean_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]]}, + 'mean_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'use_convert_model_from_mo': True}, 'params_ref': {'mean_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}}, - {'params_test': {'scale_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}}, + {'params_test': {'scale_values': {'Input1': [0.5,1.3,0.67], 'Input2':[4.2, 6.7, 3.15], 'Input3':[0.757, 4.6, 7.3]}, + 'use_convert_model_from_mo': True}, 'params_ref': {'scale_values': "Input1[0.5,1.3,0.67],Input2[4.2,6.7,3.15],Input3[0.757,4.6,7.3]"}}, {'params_test': { - 'scale_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]]}, + 'scale_values': [[0.5, 1.3, 0.67], [4.2, 6.7, 3.15], [0.757, 4.6, 7.3]], 'use_convert_model_from_mo': True}, 'params_ref': {'scale_values': "[0.5,1.3,0.67],[4.2,6.7,3.15],[0.757,4.6,7.3]"}}, {'params_test': { - 'source_layout': {'Input1': Layout("nchw"), 'Input2': "nchw", 'Input3': "nc??"}}, + 'source_layout': {'Input1': Layout("nchw"), 'Input2': "nchw", 'Input3': "nc??"}, 'use_convert_model_from_mo': True}, 'params_ref': {'source_layout': "Input1(nchw),Input2(nchw),Input3(nc??)"}}, {'params_test': { - 'target_layout': {'Input1': Layout("nhwc"), 'Input2': "nhwc", 'Input3': "n??c"}}, + 'target_layout': {'Input1': Layout("nhwc"), 'Input2': "nhwc", 'Input3': "n??c"}, 'use_convert_model_from_mo': True}, 'params_ref': {'target_layout': "Input1(nhwc),Input2(nhwc),Input3(n??c)"}}, {'params_test': { 'layout': {'Input1': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"), 'Input2': LayoutMap(source_layout="nc??", target_layout=Layout("n??c")), - 'Input3': LayoutMap(source_layout="abcd", target_layout="acdb")}}, + 'Input3': LayoutMap(source_layout="abcd", target_layout="acdb")}, 'use_convert_model_from_mo': True}, 'params_ref': {'layout': "Input1(nchw->nhwc),Input2(nc??->n??c),Input3(abcd->acdb)"}}, {'params_test': {'input': [PartialShape([2, 3, 4]), [2, 3, 4], [Dimension(2), Dimension(3), Dimension(4)]]}, 'params_ref': {'input_shape': "[2,3,4],[2,3,4],[2,3,4]", 'input': 'Input1,Input2,Input3'}}, @@ -222,13 +226,14 @@ def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_ve test_params = params['params_test'] ref_params = params['params_ref'] test_params.update({'input_model': tf_net_path}) + test_params.update({'use_convert_model_from_mo': True}) ref_params.update({'input_model': tf_net_path}) self._test(temp_dir, test_params, ref_params) test_data = [ - {'params_test': {'input_shape': PartialShape([2, 3, 4])}, + {'params_test': {'input_shape': PartialShape([2, 3, 4]), 'use_convert_model_from_mo': True}, 'params_ref': {'input_shape': "[2,3,4]"}}, - {'params_test': {'input_shape': [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)]}, + {'params_test': {'input_shape': [Dimension(), Dimension(1, 3), 4, Dimension(-1, 5)], 'use_convert_model_from_mo': True}, 'params_ref': {'input_shape': "[?,1..3,4,..5]"}}, {'params_test': {'input': InputCutInfo("Relu", [3, 2], Type(np.int32), [1, 2, 3, 4, 5, 6])}, 'params_ref': {'input': "Relu[3 2]{i32}->[1 2 3 4 5 6]"}}, @@ -240,17 +245,17 @@ def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_ve 'params_ref': {'input': "Relu[3 2]"}}, {'params_test': {'input': ("Relu")}, 'params_ref': {'input': "Relu"}}, - {'params_test': {'mean_values': [0.5, 1.3, 0.67]}, + {'params_test': {'mean_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True}, 'params_ref': {'mean_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'scale_values': [0.5, 1.3, 0.67]}, + {'params_test': {'scale_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True}, 'params_ref': {'scale_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'source_layout': Layout("nchw")}, + {'params_test': {'source_layout': Layout("nchw"), 'use_convert_model_from_mo': True}, 'params_ref': {'source_layout': "nchw"}}, - {'params_test': {'target_layout': Layout("nchw")}, + {'params_test': {'target_layout': Layout("nchw"), 'use_convert_model_from_mo': True}, 'params_ref': {'target_layout': "nchw"}}, - {'params_test': {'layout': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc")}, + {'params_test': {'layout': LayoutMap(source_layout=Layout("nchw"), target_layout="nhwc"), 'use_convert_model_from_mo': True}, 'params_ref': {'layout': "nchw->nhwc"}}, - {'params_test': {'layout': Layout("nchw")}, + {'params_test': {'layout': Layout("nchw"), 'use_convert_model_from_mo': True}, 'params_ref': {'layout': "nchw"}}, {'params_test': {'input': [3, 2]}, 'params_ref': {'input': "Input[3 2]"}}, @@ -266,13 +271,13 @@ def test_mo_convert_tf_model_no_concat(self, params, ie_device, precision, ir_ve 'params_ref': {'input': "Input[1]{i32}->[10]"}}, {'params_test': {'input': (np.int32, [1, 2, 3])}, 'params_ref': {'input': "Input[1,2,3]{i32}"}}, - {'params_test': {'input_shape': [Dimension(3, 10), 10, -1]}, + {'params_test': {'input_shape': [Dimension(3, 10), 10, -1], 'use_convert_model_from_mo': True}, 'params_ref': {'input_shape': '[3..10,10,?]'}}, {'params_test': {'input': [Dimension(3, 10), 10, -1]}, 'params_ref': {'input': 'Input[3..10,10,?]'}}, - {'params_test': {'input': PartialShape([1, 100, 100, 3]), 'mean_values': [0.5, 1.3, 0.67]}, + {'params_test': {'input': PartialShape([1, 100, 100, 3]), 'mean_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True}, 'params_ref': {'input': "Input[1,100,100,3]", 'mean_values': "[0.5,1.3,0.67]"}}, - {'params_test': {'input': [1, 100, 100, 3], 'scale_values': [0.5, 1.3, 0.67]}, + {'params_test': {'input': [1, 100, 100, 3], 'scale_values': [0.5, 1.3, 0.67], 'use_convert_model_from_mo': True}, 'params_ref': {'input': "Input[1,100,100,3]", 'scale_values': "[0.5,1.3,0.67]"}}, ] @@ -289,24 +294,6 @@ def test_mo_convert_tf_model_single_input_output(self, params, ie_device, precis ref_params.update({'input_model': tf_net_path}) self._test(temp_dir, test_params, ref_params) - test_data = [ - { - 'params_test': {'transform': ('MakeStateful', {'param_res_names': {'Input:0': 'Identity:0'}})}, - 'params_ref': {'transform': "MakeStateful[param_res_names={\'Input:0\':\'Identity:0\'}]"}} - ] - - @pytest.mark.parametrize("params", test_data) - @pytest.mark.nightly - def test_mo_convert_transform(self, params, ie_device, precision, ir_version, - temp_dir, use_new_frontend, use_old_api): - tf_net_path = self.create_tf_param_res_model(temp_dir) - - test_params = params['params_test'] - ref_params = params['params_ref'] - test_params.update({'input_model': tf_net_path}) - ref_params.update({'input_model': tf_net_path}) - self._test(temp_dir, test_params, ref_params) - @pytest.mark.nightly @pytest.mark.precommit def test_mo_convert_clearing_transformation_registry(self, ie_device, precision, ir_version, diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py index 6dd309c768fbd9..1f088808872039 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_pytorch.py @@ -10,8 +10,8 @@ import pytest import torch import unittest -from openvino.runtime import PartialShape, Dimension, Model, Type, InputCutInfo - +from openvino.runtime import PartialShape, Dimension, Model, Type +from openvino.tools.ovc import InputCutInfo from common.mo_convert_test_class import CommonMOConvertTest @@ -159,7 +159,7 @@ def create_pytorch_nn_module_case2(tmp_dir): sample_input2 = torch.zeros(1, 3, 10, 10) sample_input = sample_input1, sample_input2 - return pt_model, ref_model, {'input_shape': ["[?,3,?,?]", PartialShape([-1, 3, -1, -1])], + return pt_model, ref_model, {'input': [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])], 'example_input': sample_input} @@ -171,7 +171,7 @@ def create_pytorch_nn_module_with_scalar_input(tmp_dir): sample_input2 = torch.zeros(1, 3, 10, 10) sample_input = sample_input1, sample_input2 - return pt_model, ref_model, {'input_shape': ["[]", PartialShape([-1, 3, -1, -1])], + return pt_model, ref_model, {'input': ["[]", PartialShape([-1, 3, -1, -1])], 'example_input': sample_input} @@ -183,7 +183,7 @@ def create_pytorch_nn_module_case3(tmp_dir): sample_input2 = torch.zeros(1, 3, 10, 10) sample_input = tuple([sample_input1, sample_input2]) - return pt_model, ref_model, {'input_shape': "[?,3,?,?],[?,3,?,?]", + return pt_model, ref_model, {'input': "[?,3,?,?],[?,3,?,?]", 'example_input': sample_input} @@ -194,7 +194,7 @@ def create_pytorch_nn_module_case4(tmp_dir): ref_model = make_ref_pt_model_one_input(PartialShape([1, 3, 20, 20])) - return pt_model, ref_model, {'example_input': sample_input, "input_shape": [1, 3, 20, 20]} + return pt_model, ref_model, {'example_input': sample_input, "input": [1, 3, 20, 20]} def create_pytorch_nn_module_case5(tmp_dir): @@ -247,7 +247,7 @@ def create_pytorch_nn_module_sample_input_int32(tmp_dir): def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir): pt_model = make_pt_model_two_inputs() - inp_shapes = ["[?,3,?,?]", PartialShape([-1, 3, -1, -1])] + inp_shapes = [PartialShape("[?,3,?,?]"), PartialShape([-1, 3, -1, -1])] sample_input1 = torch.zeros(1, 3, 10, 10, dtype=torch.int32) sample_input2 = torch.zeros(1, 3, 10, 10, dtype=torch.int32) @@ -255,8 +255,7 @@ def create_pytorch_nn_module_sample_input_int32_two_inputs(tmp_dir): ref_model = make_ref_pt_model_two_inputs( [PartialShape([-1, 3, -1, -1]), inp_shapes[1]], dtype=np.int32) - return pt_model, ref_model, {'input_shape': inp_shapes, - 'input': [np.int32, np.int32], + return pt_model, ref_model, {'input': [(np.int32, inp_shapes[0]), (np.int32, inp_shapes[1])], 'example_input': sample_input} @@ -293,7 +292,7 @@ def create_pytorch_nn_module_layout_list(tmp_dir): ref_model.inputs[1].node.layout = Layout('nhwc') return pt_model, ref_model, { - 'input_shape': [shape, shape], 'layout': ['nchw', Layout('nhwc')], + 'input_shape': [shape, shape], 'layout': ['nchw', Layout('nhwc')], 'use_convert_model_from_mo': True } @@ -308,7 +307,7 @@ def create_pytorch_nn_module_layout_list_case2(tmp_dir): ref_model.inputs[1].node.layout = Layout('nhwc') return pt_model, ref_model, { - 'input_shape': [shape, shape], 'layout': ('nchw', Layout('nhwc'))} + 'input_shape': [shape, shape], 'layout': ('nchw', Layout('nhwc')), 'use_convert_model_from_mo': True} def create_pytorch_nn_module_mean_list(tmp_dir): @@ -330,7 +329,8 @@ def create_pytorch_nn_module_mean_list(tmp_dir): ref_model = Model([sigm], parameter_list, "test") return pt_model, ref_model, { - 'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'compress_to_fp16': False} + 'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'compress_to_fp16': False, + 'use_convert_model_from_mo': True} def create_pytorch_nn_module_mean_list_default_no_compression(tmp_dir): @@ -352,7 +352,7 @@ def create_pytorch_nn_module_mean_list_default_no_compression(tmp_dir): parameter_list = [param1, param2] ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]]} + return pt_model, ref_model, {'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], 'use_convert_model_from_mo': True} def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): @@ -375,7 +375,7 @@ def create_pytorch_nn_module_mean_list_compression_enabled(tmp_dir): return pt_model, ref_model, { 'input_shape': [shape, shape], 'mean_values': [[0, 0, 0], [0, 0, 0]], - 'compress_to_fp16': False} + 'compress_to_fp16': False, 'use_convert_model_from_mo': True} def create_pytorch_nn_module_scale_list(tmp_dir): @@ -396,7 +396,8 @@ def create_pytorch_nn_module_scale_list(tmp_dir): parameter_list = [param1, param2] ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'compress_to_fp16': False} + return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'compress_to_fp16': False, + 'use_convert_model_from_mo': True} def create_pytorch_nn_module_scale_list_default_no_compression(tmp_dir): @@ -418,7 +419,7 @@ def create_pytorch_nn_module_scale_list_default_no_compression(tmp_dir): parameter_list = [param1, param2] ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]]} + return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], 'use_convert_model_from_mo': True} def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): @@ -444,14 +445,14 @@ def create_pytorch_nn_module_scale_list_compression_enabled(tmp_dir): ref_model = Model([sigm], parameter_list, "test") return pt_model, ref_model, {'input_shape': [shape, shape], 'scale_values': [[1, 1, 1], [1, 1, 1]], - 'compress_to_fp16': True} + 'compress_to_fp16': True, 'use_convert_model_from_mo': True} def create_pytorch_nn_module_shapes_list_static(tmp_dir): pt_model = make_pt_model_two_inputs() ref_model = make_ref_pt_model_two_inputs([1, 3, 20, 20]) - return pt_model, ref_model, {'input_shape': [[1, 3, 20, 20], [1, 3, 20, 20]]} + return pt_model, ref_model, {'input': [[1, 3, 20, 20], [1, 3, 20, 20]]} def create_pytorch_nn_module_shapes_list_static_via_input(tmp_dir): @@ -476,7 +477,7 @@ def create_pytorch_nn_module_shapes_list_dynamic(tmp_dir): parameter_list = [param1, param2] ref_model = Model([sigm], parameter_list, "test") - return pt_model, ref_model, {'input_shape': inp_shapes} + return pt_model, ref_model, {'input': inp_shapes} def create_pytorch_nn_module_shapes_list_dynamic_via_input(tmp_dir): @@ -501,7 +502,7 @@ def create_pytorch_nn_module_shapes_list_dynamic_single_input(tmp_dir): pt_model = make_pt_model_one_input() inp_shapes = [[Dimension(-1), 3, 20, Dimension(20, -1)]] ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input_shape': inp_shapes} + return pt_model, ref_model, {'input': inp_shapes} def create_pytorch_nn_module_shapes_list_dynamic_single_input_via_input(tmp_dir): @@ -515,7 +516,7 @@ def create_pytorch_nn_module_shapes_list_static_single_input(tmp_dir): pt_model = make_pt_model_one_input() inp_shapes = [[1, 3, 20, 20]] ref_model = make_ref_pt_model_one_input(inp_shapes[0]) - return pt_model, ref_model, {'input_shape': inp_shapes} + return pt_model, ref_model, {'input': inp_shapes} def create_pytorch_nn_module_shapes_list_static_single_input_via_input(tmp_dir): @@ -677,7 +678,7 @@ def create_pytorch_module_with_optional_inputs_case3(tmp_dir): (1, 3, 10, 10)), "z": torch.ones((1, 3, 10, 10))} ref_model = make_ref_pt_model_with_optional_inputs( [3, 3, 3, 3], z_exist=True) - return net, ref_model, {"example_input": example_input, "input_shape": [[3, 3, 3, 3], [3, 3, 3, 3]]} + return net, ref_model, {"example_input": example_input, "input": [[3, 3, 3, 3], [3, 3, 3, 3]]} def create_pytorch_module_with_optional_inputs_case4(tmp_dir): @@ -691,7 +692,7 @@ def create_pytorch_module_with_optional_inputs_case5(tmp_dir): net = make_pt_model_with_optional_input() ref_model = make_ref_pt_model_with_optional_inputs( [1, 3, -1, -1], z_exist=True) - return net, ref_model, {"input": ["x", "z"], "input_shape": [[1, 3, -1, -1], [1, 3, -1, -1]]} + return net, ref_model, {"input": [("x",[1, 3, -1, -1]), ("z", [1, 3, -1, -1])]} def create_pytorch_module_with_compressed_int8_constant(tmp_dir): @@ -956,11 +957,11 @@ def forward(self, x): class ConvertRaises(unittest.TestCase): def test_example_inputs(self): - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model pytorch_model = create_pt_model_with_custom_op() # Check that mo raises error message of wrong argument. - with self.assertRaisesRegex(AssertionError, ".*argument is not recognized.*"): + with self.assertRaisesRegex(TypeError, ".*got an unexpected keyword argument 'example_inputs'.*"): convert_model(pytorch_model, example_inputs=(torch.tensor(1),)) def test_failed_extension(self): diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py b/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py index 1548a16c6c8f00..9a507300551663 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_convert_tf.py @@ -139,7 +139,7 @@ def __call__(self, x, y): model_ref = Model([sigm], parameter_list, "test") net = Net() - return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]} + return net, model_ref, {'input': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]} def create_tf_module_layout_list(tmp_dir): @@ -166,7 +166,8 @@ def __call__(self, x, y): model_ref.inputs[1].node.layout = Layout('NHC') net = Net() - return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])], 'layout': ["NCH", "NHC"]} + return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])], 'layout': ["NCH", "NHC"], + 'use_convert_model_from_mo': True} def create_tf_module_dynamic(tmp_dir): @@ -192,7 +193,7 @@ def __call__(self, x, y): model_ref = Model([sigm], parameter_list, "test") net = Net() - return net, model_ref, {'input_shape': input_shapes} + return net, model_ref, {'input': input_shapes} def create_keras_layer(tmp_dir): @@ -216,7 +217,7 @@ def call(self, x, y): model_ref = Model([sigm], parameter_list, "test") net = LayerModel() - return net, model_ref, {'input_shape': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]} + return net, model_ref, {'input': [PartialShape([1, 2, 3]), PartialShape([1, 2, 3])]} def create_keras_layer_dynamic(tmp_dir): @@ -242,7 +243,7 @@ def call(self, x, y): model_ref = Model([sigm], parameter_list, "test") net = LayerModel() - return net, model_ref, {'input_shape': input_shapes} + return net, model_ref, {'input': input_shapes} def create_tf_checkpoint(tmp_dir): @@ -518,7 +519,7 @@ def create_keras_layer_with_example_input_2(tmp_dir): def create_keras_layer_with_input_shapes_case1(tmp_dir): model, model_ref = create_keras_layer_input_list() - return model, model_ref, {'input_shape': [[1, 2, 3], [1, 2, 3]]} + return model, model_ref, {'input': [[1, 2, 3], [1, 2, 3]]} def create_keras_layer_with_input_shapes_case2(tmp_dir): @@ -528,7 +529,7 @@ def create_keras_layer_with_input_shapes_case2(tmp_dir): def create_keras_layer_with_input_shapes_case3(tmp_dir): model, model_ref = create_keras_layer_input_dict_one_inp() - return model, model_ref, {'input': ['args'], 'input_shape': [1, 2, 3]} + return model, model_ref, {'input': [('args', [1, 2, 3])]} def create_keras_layer_with_input_shapes_case4(tmp_dir): @@ -669,7 +670,7 @@ def test_mo_import_from_memory_tf_fe(self, create_model, ie_device, precision, i temp_dir): fw_model, graph_ref, mo_params = create_model(temp_dir) - test_params = {'input_model': fw_model, 'use_new_frontend': True} + test_params = {'input_model': fw_model} if mo_params is not None: test_params.update(mo_params) self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) @@ -679,10 +680,10 @@ def test_mo_import_from_memory_tf_fe(self, create_model, ie_device, precision, i def test_unnamed_saved_model_dir(self, ie_device, precision, ir_version, temp_dir): saved_model_dir, graph_ref = create_tf_saved_model_dir(temp_dir) - test_params = {'input_model': saved_model_dir, 'use_new_frontend': True} + test_params = {'input_model': saved_model_dir} self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) - test_params = {'input_model': saved_model_dir, 'use_new_frontend': False} + test_params = {'input_model': saved_model_dir} self._test_by_ref_graph(temp_dir, test_params, graph_ref, compare_tensor_names=False) def test_zero_copy(self, ie_device, precision, ir_version, temp_dir): @@ -741,7 +742,7 @@ def test_memory_loss(self, ie_device, precision, ir_version, temp_dir): import tensorflow as tf tf.compat.v1.reset_default_graph() - from openvino.tools.mo import convert_model + from openvino.tools.ovc import convert_model from openvino.runtime import compile_model import gc @@ -795,7 +796,7 @@ class TFConvertTest(unittest.TestCase): @pytest.mark.precommit def test_tf_function_no_signature(self): import tensorflow as tf - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model @tf.function() def function(x1, x2): diff --git a/tests/layer_tests/mo_python_api_tests/test_mo_help.py b/tests/layer_tests/mo_python_api_tests/test_mo_help.py index e10ba33505d689..33219093c3831e 100644 --- a/tests/layer_tests/mo_python_api_tests/test_mo_help.py +++ b/tests/layer_tests/mo_python_api_tests/test_mo_help.py @@ -5,8 +5,9 @@ import os import sys import unittest -from openvino.tools.mo import mo +from openvino.tools.ovc import ovc from openvino.tools.ovc.cli_parser import get_mo_convert_params +from openvino.tools.mo.utils.cli_parser import get_mo_convert_params as legacy_mo_params from pathlib import Path from common.utils.common_utils import shell @@ -16,8 +17,8 @@ class TestSubprocessMoConvert(unittest.TestCase): def test_mo_convert(self): mo_convert_params = get_mo_convert_params() - # Test cli tool help - mo_path = Path(mo.__file__).parent + # Test ovc tool help + mo_path = Path(ovc.__file__).parent mo_runner = mo_path.joinpath('main.py').as_posix() params = [sys.executable, mo_runner, "--help"] _, mo_output, _ = shell(params) @@ -29,11 +30,12 @@ def test_mo_convert(self): for param_name in group: assert param_name in mo_output - # Test Python API help + # Test Python API help, applicable for convert_model from tools.mo only mo_help_file = os.path.join(os.path.dirname(__file__), "mo_convert_help.py") params = [sys.executable, mo_help_file] _, mo_output, _ = shell(params) - for group in mo_convert_params: + legacy_params = legacy_mo_params() + for group in legacy_params: for param_name in group: assert param_name in mo_output diff --git a/tests/layer_tests/mo_python_api_tests/test_ovc_cli_tool.py b/tests/layer_tests/mo_python_api_tests/test_ovc_cli_tool.py index af363c6e71bc73..0ef3b08a4ba6b8 100644 --- a/tests/layer_tests/mo_python_api_tests/test_ovc_cli_tool.py +++ b/tests/layer_tests/mo_python_api_tests/test_ovc_cli_tool.py @@ -27,7 +27,9 @@ def generate_ir_ovc(coverage=False, **kwargs): else: params = [sys.executable, ovc_runner] for key, value in kwargs.items(): - if key == "batch": + if key == "input_model": + params.append((str(value))) + elif key == "batch": params.extend(("-b", str(value))) elif key == "k": params.extend(("-k", str(value))) @@ -81,7 +83,7 @@ def test_ovc_tool(self, ie_device, precision, ir_version, temp_dir, use_new_fron core = Core() # tests for MO cli tool - exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_dir": temp_dir}) + exit_code, stderr = generate_ir_ovc(coverage=False, **{"input_model": model_path, "output_model": temp_dir + os.sep + "model"}) assert not exit_code ov_model = core.read_model(os.path.join(temp_dir, "model.xml")) diff --git a/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py index d69a5c33dbadaa..838cce35460bbc 100644 --- a/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py +++ b/tests/layer_tests/pytorch_tests/pytorch_layer_test_class.py @@ -125,21 +125,34 @@ def use_ts_backend(): # Compare Ie results with Framework results fw_eps = custom_eps if precision == 'FP32' else 5e-2 is_ok = True + quantized_ops = False + if 'quantized_ops' in kwargs and kwargs['quantized_ops'] is not None: + quantized_ops = kwargs['quantized_ops'] + if quantized_ops: + assert 'quant_size' in kwargs, "quant size must be specified for quantized_ops flag" + quant_size = kwargs['quant_size'] for i in range(len(infer_res)): cur_fw_res = flatten_fw_res[i].to(memory_format=torch.contiguous_format).numpy( ) if isinstance(flatten_fw_res[i], torch.Tensor) else flatten_fw_res[i] + if np.array(cur_fw_res).size == 0: + continue cur_ov_res = infer_res[compiled.output(i)] - print(f"fw_re: {cur_fw_res};\n ov_res: {cur_ov_res}") - if not np.allclose(cur_ov_res, cur_fw_res, - atol=fw_eps, - rtol=fw_eps, equal_nan=True): + print(f"fw_res: {cur_fw_res};\n ov_res: {cur_ov_res}") + n_is_not_close = np.array(cur_fw_res).size - np.isclose(cur_ov_res, cur_fw_res, + atol=fw_eps, + rtol=fw_eps, equal_nan=True).sum() + max_diff = np.array(abs(np.array(cur_ov_res, dtype=np.float32) - np.array(cur_fw_res, dtype=np.float32))).max() + if not quantized_ops and n_is_not_close > 0: + is_ok = False + print("Max diff is {}".format(max_diff)) + elif quantized_ops and (n_is_not_close > int(np.log10(cur_fw_res.size)) or max_diff > np.array(quant_size + fw_eps).max()): is_ok = False - print("Max diff is {}".format( - np.array( - abs(cur_ov_res - cur_fw_res)).max())) + print("Errors outside threshold range: {} with max diff {}, expected at most {} with max diff {}".format( + n_is_not_close, max_diff, int(np.log10(cur_fw_res.size)), quant_size + fw_eps)) else: print("Accuracy validation successful!\n") - print("absolute eps: {}, relative eps: {}".format(fw_eps, fw_eps)) + print("absolute eps: {}, relative eps: {}".format( + fw_eps, fw_eps)) assert is_ok, "Accuracy validation failed" # Each model should specify inputs @@ -148,7 +161,7 @@ def _prepare_input(self): def convert_via_mo(self, model, example_input, trace_model, dynamic_shapes, ov_inputs): import torch - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model kwargs = {"example_input": example_input if len( example_input) > 1 else example_input[0], "compress_to_fp16": False} with torch.no_grad(): diff --git a/tests/layer_tests/pytorch_tests/test_quantize.py b/tests/layer_tests/pytorch_tests/test_quantize.py index ecd06792925456..f1a7522159090e 100644 --- a/tests/layer_tests/pytorch_tests/test_quantize.py +++ b/tests/layer_tests/pytorch_tests/test_quantize.py @@ -48,11 +48,11 @@ def _prepare_input(self): reason="Not supported with FakeQuantize.")) ]) @pytest.mark.nightly - # @pytest.mark.precommit - sporadic issue + @pytest.mark.precommit def test_quantize_per_tensor_dequantize(self, scale, zero_point, dtype, ie_device, precision, ir_version): if dtype == torch.quint8: zero_point = abs(zero_point) self._test(aten_quantize_per_tensor_aten_dequantize(scale, zero_point, dtype), None, ["aten::quantize_per_tensor", "aten::dequantize"], - ie_device, precision, ir_version, ) + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) class TestQuantizePerChannelDequantize(PytorchLayerTest): def _prepare_input(self): @@ -87,9 +87,9 @@ def _prepare_input(self): reason="Not supported with FakeQuantize.")) ]) @pytest.mark.nightly - # @pytest.mark.precommit - sporadic issue - def test_quantize_per_channel_dequantize(self, scale, zero_point, axis, dtype, ie_device, precision, ir_version): + @pytest.mark.precommit + def test_quantize_per_channel_dequantize(self, scale, zero_point, dtype, axis, ie_device, precision, ir_version): np.random.shuffle(scale), np.random.shuffle(zero_point) if dtype == torch.quint8: zero_point = abs(zero_point) - self._test(aten_quantize_per_channel_aten_dequantize(scale, zero_point, axis, dtype), None, ["aten::quantize_per_channel", "aten::dequantize"], - ie_device, precision, ir_version, ) + self._test(aten_quantize_per_channel_aten_dequantize(scale, zero_point, dtype, axis), None, ["aten::quantize_per_channel", "aten::dequantize"], + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_add.py b/tests/layer_tests/pytorch_tests/test_quantized_add.py new file mode 100644 index 00000000000000..ba0776fa19a2c6 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_quantized_add.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +class quantized_add(torch.nn.Module): + def __init__(self, scale, zero_point, dtype) -> None: + torch.nn.Module.__init__(self) + self.scale = scale + self.zero_point = zero_point + self.dtype = dtype + + def forward(self, input_tensor1, input_tensor2): + quantized_tensor1 = torch.quantize_per_tensor(input_tensor1, self.scale, self.zero_point, self.dtype) + quantized_tensor2 = torch.quantize_per_tensor(input_tensor2, self.scale, self.zero_point, self.dtype) + quantized_add = torch.ops.quantized.add(quantized_tensor1, quantized_tensor2, self.scale, self.zero_point) + dequantized_tensor = torch.dequantize(quantized_add) + return dequantized_tensor + +class TestQuantizedAdd(PytorchLayerTest): + def _prepare_input(self): + return (np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32), + np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32)) + + @pytest.mark.parametrize("scale", [ + 1.0, 0.21, 0.62 + ]) + @pytest.mark.parametrize("zero_point", [ + 0, 4, -7 + ]) + @pytest.mark.parametrize("dtype", [ + torch.quint8, + torch.qint8 + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_quantized_add(self, scale, zero_point, dtype, ie_device, precision, ir_version): + if dtype == torch.quint8: zero_point = abs(zero_point) + self._test(quantized_add(scale, zero_point, dtype), None, ["quantized::add"], + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py b/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py new file mode 100644 index 00000000000000..4502c0c1973d29 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_quantized_add_relu.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +class quantized_add_relu(torch.nn.Module): + def __init__(self, scale, zero_point, dtype) -> None: + torch.nn.Module.__init__(self) + self.scale = scale + self.zero_point = zero_point + self.dtype = dtype + + def forward(self, input_tensor1, input_tensor2): + quantized_tensor1 = torch.quantize_per_tensor(input_tensor1, self.scale, self.zero_point, self.dtype) + quantized_tensor2 = torch.quantize_per_tensor(input_tensor2, self.scale, self.zero_point, self.dtype) + quantized_add_relu = torch.ops.quantized.add_relu(quantized_tensor1, quantized_tensor2, self.scale, self.zero_point) + dequantized_tensor = torch.dequantize(quantized_add_relu) + return dequantized_tensor + +class TestQuantizedAddReLU(PytorchLayerTest): + def _prepare_input(self): + return (np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32), + np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32)) + + @pytest.mark.parametrize("scale", [ + 1.0, 0.21, 0.62 + ]) + @pytest.mark.parametrize("zero_point", [ + 0, 4, -7 + ]) + @pytest.mark.parametrize("dtype", [ + torch.quint8, + torch.qint8 + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_quantized_add_relu(self, scale, zero_point, dtype, ie_device, precision, ir_version): + if dtype == torch.quint8: zero_point = abs(zero_point) + self._test(quantized_add_relu(scale, zero_point, dtype), None, ["quantized::add_relu"], + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py b/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py new file mode 100644 index 00000000000000..254b43818e4a34 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_quantized_hardswish.py @@ -0,0 +1,42 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +class quantized_hardswish(torch.nn.Module): + def __init__(self, scale, zero_point, dtype) -> None: + torch.nn.Module.__init__(self) + self.scale = scale + self.zero_point = zero_point + self.dtype = dtype + + def forward(self, input_tensor1): + quantized_tensor1 = torch.quantize_per_tensor(input_tensor1, self.scale, self.zero_point, self.dtype) + quantized_hardswish = torch.ops.quantized.hardswish(quantized_tensor1, self.scale, self.zero_point) + dequantized_tensor = torch.dequantize(quantized_hardswish) + return dequantized_tensor + +class TestQuantizedHardswish(PytorchLayerTest): + def _prepare_input(self): + return (np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32),) + + @pytest.mark.parametrize("scale", [ + 1.0, 0.21, 0.62, + ]) + @pytest.mark.parametrize("zero_point", [ + 0, 4, -7 + ]) + @pytest.mark.parametrize("dtype", [ + torch.quint8, + torch.qint8 + ]) + @pytest.mark.nightly + @pytest.mark.precommit + def test_quantized_hardswish(self, scale, zero_point, dtype, ie_device, precision, ir_version): + if dtype == torch.quint8: zero_point = abs(zero_point) + self._test(quantized_hardswish(scale, zero_point, dtype), None, ["quantized::hardswish"], + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) diff --git a/tests/layer_tests/pytorch_tests/test_quantized_mul.py b/tests/layer_tests/pytorch_tests/test_quantized_mul.py new file mode 100644 index 00000000000000..ab6418ed449206 --- /dev/null +++ b/tests/layer_tests/pytorch_tests/test_quantized_mul.py @@ -0,0 +1,44 @@ +# Copyright (C) 2018-2023 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import numpy as np +import pytest +import torch + +from pytorch_layer_test_class import PytorchLayerTest + +class quantized_mul(torch.nn.Module): + def __init__(self, scale, zero_point, dtype) -> None: + torch.nn.Module.__init__(self) + self.scale = scale + self.zero_point = zero_point + self.dtype = dtype + + def forward(self, input_tensor1, input_tensor2): + quantized_tensor1 = torch.quantize_per_tensor(input_tensor1, self.scale, self.zero_point, self.dtype) + quantized_tensor2 = torch.quantize_per_tensor(input_tensor2, self.scale, self.zero_point, self.dtype) + quantized_mul = torch.ops.quantized.mul(quantized_tensor1, quantized_tensor2, self.scale, self.zero_point) + dequantized_tensor = torch.dequantize(quantized_mul) + return dequantized_tensor + +class TestQuantizedMul(PytorchLayerTest): + def _prepare_input(self): + return (np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32), + np.array(5.00 * np.random.rand(100, 100) + 5.00, dtype=np.float32)) + + @pytest.mark.parametrize("scale", [ + 1.0, 0.21, 0.62 + ]) + @pytest.mark.parametrize("zero_point", [ + 0, 4, -7 + ]) + @pytest.mark.parametrize("dtype", [ + torch.quint8, + torch.qint8 + ]) + @pytest.mark.nightly + # @pytest.mark.precommit - accuracy problem + def test_quantized_mul(self, scale, zero_point, dtype, ie_device, precision, ir_version): + if dtype == torch.quint8: zero_point = abs(zero_point) + self._test(quantized_mul(scale, zero_point, dtype), None, ["quantized::mul"], + ie_device, precision, ir_version, quantized_ops=True, quant_size=scale) diff --git a/tests/layer_tests/tensorflow_tests/test_tf_LRN.py b/tests/layer_tests/tensorflow_tests/test_tf_LRN.py index fca388312767de..ee4e344a1d24d6 100644 --- a/tests/layer_tests/tensorflow_tests/test_tf_LRN.py +++ b/tests/layer_tests/tensorflow_tests/test_tf_LRN.py @@ -26,7 +26,7 @@ def create_lrn_net(self, input_shape, depth_radius, bias, alpha, beta): ] @pytest.mark.parametrize("params", test_data_basic) - @pytest.mark.precommit_tf_fe + #@pytest.mark.precommit_tf_fe - ticket 116032 @pytest.mark.nightly @pytest.mark.skipif(os.getenv("GITHUB_ACTIONS") == 'true', reason="Ticket - 113362") def test_lrn_basic(self, params, ie_device, precision, ir_version, temp_dir, diff --git a/tools/benchmark_tool/CMakeLists.txt b/tools/benchmark_tool/CMakeLists.txt index b41aca487997f6..959e4c9dda96c5 100644 --- a/tools/benchmark_tool/CMakeLists.txt +++ b/tools/benchmark_tool/CMakeLists.txt @@ -26,11 +26,11 @@ endif() # ov_get_pyversion(pyversion) -ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_BENCHMARK_APP} +ov_cpack_add_component(${OV_CPACK_COMP_BENCHMARK_APP} HIDDEN) install(DIRECTORY ${OpenVINOBenchmarkTool_SOURCE_DIR}/openvino DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_BENCHMARK_APP} - ${OV_CPACK_COMP_PYTHON_BENCHMARK_APP_EXCLUDE_ALL} + COMPONENT ${OV_CPACK_COMP_BENCHMARK_APP} + ${OV_CPACK_COMP_BENCHMARK_APP_EXCLUDE_ALL} USE_SOURCE_PERMISSIONS) diff --git a/tools/mo/openvino/tools/mo/__init__.py b/tools/mo/openvino/tools/mo/__init__.py index 84d80c1010ae45..9fc75af1fdd609 100644 --- a/tools/mo/openvino/tools/mo/__init__.py +++ b/tools/mo/openvino/tools/mo/__init__.py @@ -1,5 +1,4 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from openvino.tools.mo.convert import convert_model -from openvino.tools.ovc import InputCutInfo, LayoutMap # pylint: disable=no-name-in-module,import-error +from openvino.tools.mo.convert import convert_model, LayoutMap, InputCutInfo diff --git a/tools/mo/openvino/tools/mo/convert.py b/tools/mo/openvino/tools/mo/convert.py index f40476743f7b30..78a4d5233ec03f 100644 --- a/tools/mo/openvino/tools/mo/convert.py +++ b/tools/mo/openvino/tools/mo/convert.py @@ -2,14 +2,17 @@ # SPDX-License-Identifier: Apache-2.0 import os import pathlib +from collections import namedtuple from typing import Any from openvino.runtime import PartialShape, Shape, Layout, Model from openvino.tools.mo.convert_impl import _convert -from openvino.tools.ovc import InputCutInfo, LayoutMap # pylint: disable=no-name-in-module,import-error from openvino.tools.mo.utils.cli_parser import get_all_cli_parser # pylint: disable=no-name-in-module,import-error from openvino.tools.mo.utils.logger import get_logger_state, restore_logger_state # pylint: disable=no-name-in-module,import-error +LayoutMap = namedtuple("LayoutMap", ["source_layout", "target_layout"], defaults=[None, None]) +InputCutInfo = namedtuple("InputInfo", ["name", "shape", "type", "value"], defaults=[None, None, None, None]) + def convert_model( input_model: [str, pathlib.Path, Any] = None, diff --git a/tools/mo/openvino/tools/mo/utils/cli_parser.py b/tools/mo/openvino/tools/mo/utils/cli_parser.py index 3ed441745318ca..b66377fd038934 100644 --- a/tools/mo/openvino/tools/mo/utils/cli_parser.py +++ b/tools/mo/openvino/tools/mo/utils/cli_parser.py @@ -147,13 +147,13 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type, if isinstance(input, str): # Parse params from string node_name, shape, value, data_type = parse_input_value(input) - return openvino.runtime.InputCutInfo(node_name, + return openvino.tools.mo.InputCutInfo(node_name, PartialShape(shape) if shape is not None else None, data_type, value) - if isinstance(input, openvino.runtime.InputCutInfo): + if isinstance(input, openvino.tools.mo.InputCutInfo): # Wrap input.shape to PartialShape if possible and wrap to InputCutInfo - return openvino.runtime.InputCutInfo(input.name, + return openvino.tools.mo.InputCutInfo(input.name, PartialShape(input.shape) if input.shape is not None else None, input.type, input.value) @@ -183,18 +183,18 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type, else: raise Exception("Incorrect input parameters provided. Expected tuple with input name, " "input type or input shape. Got unknown object: {}".format(val)) - return openvino.runtime.InputCutInfo(name, + return openvino.tools.mo.InputCutInfo(name, PartialShape(shape) if shape is not None else None, inp_type, None) # Case when only type is set if isinstance(input, (type, Type)): - return openvino.runtime.InputCutInfo(None, None, input, None) + return openvino.tools.mo.InputCutInfo(None, None, input, None) # We don't expect here single unnamed value. If list of int is set it is considered as shape. # Setting of value is expected only using InputCutInfo or string analog. - raise Exception("Unexpected object provided for input. Expected openvino.runtime.InputCutInfo " + raise Exception("Unexpected object provided for input. Expected openvino.toos.mo.InputCutInfo " "or tuple or str. Got {}".format(type(input))) @@ -213,12 +213,12 @@ def input_to_input_cut_info(input: [str, tuple, list]): # Parse string with parameters for single input node_name, shape, value, data_type = parse_input_value(input_value) - inputs.append(openvino.runtime.InputCutInfo(node_name, + inputs.append(openvino.tools.mo.InputCutInfo(node_name, PartialShape(shape) if shape is not None else None, data_type, value)) return inputs - if isinstance(input, openvino.runtime.InputCutInfo): + if isinstance(input, openvino.tools.mo.InputCutInfo): # Wrap to list and return return [input] if isinstance(input, tuple): @@ -269,11 +269,11 @@ def input_shape_to_input_cut_info(input_shape: [str, Shape, PartialShape, list, shape = PartialShape(shape) assert inputs[idx].shape is None, "Shape was set in both \"input\" and in \"input_shape\" parameter." \ "Please use either \"input\" or \"input_shape\" for shape setting." - inputs[idx] = openvino.runtime.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value) + inputs[idx] = openvino.tools.mo.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value) else: for shape in input_shape: - inputs.append(openvino.runtime.InputCutInfo(None, PartialShape(shape), None, None)) + inputs.append(openvino.tools.mo.InputCutInfo(None, PartialShape(shape), None, None)) return raise Exception("Unexpected object provided for input_shape. Expected PartialShape, Shape, tuple, list or str. " @@ -375,7 +375,7 @@ def source_target_layout_to_str(value): def layoutmap_to_str(value): if isinstance(value, str): return value - if isinstance(value, openvino.runtime.LayoutMap): + if isinstance(value, openvino.tools.mo.LayoutMap): assert value.source_layout is not None, "Incorrect layout map. 'source_layout' should be set." source_layout = layout_to_str(value.source_layout) if value.target_layout is not None: @@ -400,7 +400,7 @@ def layout_param_to_str(value): raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) values_str.append(op_name + "(" + layoutmap_to_str(layout) + ")") return ",".join(values_str) - if isinstance(value, openvino.runtime.LayoutMap): + if isinstance(value, openvino.tools.mo.LayoutMap): return layoutmap_to_str(value) if isinstance(value, list) or isinstance(value, tuple): values_str = [] @@ -490,7 +490,7 @@ def transform_param_to_str(value): def get_mo_convert_params(): - mo_convert_docs = openvino.runtime.convert_model.__doc__ + mo_convert_docs = openvino.tools.mo.convert_model.__doc__ mo_convert_params = {} group = "Optional parameters:" mo_convert_params[group] = {} @@ -784,7 +784,7 @@ def writable_dir(path: str): def add_args_by_description(args_group, params_description): - signature = inspect.signature(openvino.runtime.convert_model) + signature = inspect.signature(openvino.tools.mo.convert_model) filepath_args = get_params_with_paths_list() cli_tool_specific_descriptions = get_convert_model_help_specifics() for param_name, param_description in params_description.items(): diff --git a/tools/mo/unit_tests/mo/utils/args_to_string_test.py b/tools/mo/unit_tests/mo/utils/args_to_string_test.py index e9776b7c2ec577..7e0679c51e4d3e 100644 --- a/tools/mo/unit_tests/mo/utils/args_to_string_test.py +++ b/tools/mo/unit_tests/mo/utils/args_to_string_test.py @@ -2,9 +2,9 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -from openvino.runtime import Layout, PartialShape, Dimension, Shape, Type +from openvino.runtime import Layout, Dimension -from openvino.runtime import InputCutInfo, LayoutMap +from openvino.tools.mo import LayoutMap from openvino.tools.mo.utils.cli_parser import mean_scale_value_to_str, \ transform_param_to_str, str_list_to_str, source_target_layout_to_str, layout_param_to_str from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry diff --git a/tools/mo/unit_tests/mo/utils/cli_parser_test.py b/tools/mo/unit_tests/mo/utils/cli_parser_test.py index d014e93c838bab..1f2cce048aba07 100644 --- a/tools/mo/unit_tests/mo/utils/cli_parser_test.py +++ b/tools/mo/unit_tests/mo/utils/cli_parser_test.py @@ -20,7 +20,8 @@ from openvino.tools.mo.convert_impl import pack_params_to_args_namespace from openvino.tools.mo.utils.error import Error from unit_tests.mo.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from openvino.runtime import PartialShape, Dimension, Layout, InputCutInfo, LayoutMap +from openvino.runtime import PartialShape, Dimension, Layout +from openvino.tools.mo import LayoutMap, InputCutInfo class TestingMeanScaleGetter(UnitTestWithMockedTelemetry): diff --git a/tools/ovc/CMakeLists.txt b/tools/ovc/CMakeLists.txt index 300526a9cc2e38..6220c0e50848f3 100644 --- a/tools/ovc/CMakeLists.txt +++ b/tools/ovc/CMakeLists.txt @@ -26,11 +26,11 @@ endif() # ov_get_pyversion(pyversion) -ov_cpack_add_component(${OV_CPACK_COMP_PYTHON_OVC} +ov_cpack_add_component(${OV_CPACK_COMP_OVC} HIDDEN) install(DIRECTORY ${OpenVINOConverter_SOURCE_DIR}/openvino DESTINATION ${OV_CPACK_PYTHONDIR} - COMPONENT ${OV_CPACK_COMP_PYTHON_OVC} - ${OV_CPACK_COMP_PYTHON_OVC_EXCLUDE_ALL} + COMPONENT ${OV_CPACK_COMP_OVC} + ${OV_CPACK_COMP_OVC_EXCLUDE_ALL} USE_SOURCE_PERMISSIONS) diff --git a/tools/ovc/openvino/tools/ovc/__init__.py b/tools/ovc/openvino/tools/ovc/__init__.py index 9292269a0a51e6..c55c861e448318 100644 --- a/tools/ovc/openvino/tools/ovc/__init__.py +++ b/tools/ovc/openvino/tools/ovc/__init__.py @@ -1,13 +1,4 @@ # Copyright (C) 2018-2023 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from openvino.tools.ovc.convert import convert_model, InputCutInfo, LayoutMap - -# pylint: disable=no-name-in-module,import-error,no-member -try: - import openvino.runtime - openvino.runtime.convert_model = convert_model - openvino.runtime.InputCutInfo = InputCutInfo - openvino.runtime.LayoutMap = LayoutMap -except: - pass \ No newline at end of file +from openvino.tools.ovc.convert import convert_model, InputCutInfo \ No newline at end of file diff --git a/tools/ovc/openvino/tools/ovc/cli_parser.py b/tools/ovc/openvino/tools/ovc/cli_parser.py index 4326c523b8d645..68b1fcf1406de4 100644 --- a/tools/ovc/openvino/tools/ovc/cli_parser.py +++ b/tools/ovc/openvino/tools/ovc/cli_parser.py @@ -21,7 +21,7 @@ import openvino from openvino.tools.ovc.convert_data_type import destination_type_to_np_data_type from openvino.tools.ovc.error import Error -from openvino.tools.ovc.utils import refer_to_faq_msg, get_mo_root_dir +from openvino.tools.ovc.utils import get_mo_root_dir from openvino.tools.ovc.help import get_convert_model_help_specifics, get_to_string_methods_for_params @@ -36,12 +36,6 @@ def extension_path_to_str_or_extensions_class(extension): return extension -def transformations_config_to_str(value): - if value is None: - return value - return extension_path_to_str_or_extensions_class(value) - - def extensions_to_str_or_extensions_class(extensions): if extensions is None: return None @@ -148,14 +142,14 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type, # Parse params from string node_name, shape, value, data_type = parse_input_value(input) # pylint: disable=no-member - return openvino.runtime.InputCutInfo(node_name, + return openvino.tools.ovc.InputCutInfo(node_name, PartialShape(shape) if shape is not None else None, data_type, value) - if isinstance(input, openvino.runtime.InputCutInfo): # pylint: disable=no-member + if isinstance(input, openvino.tools.ovc.InputCutInfo): # pylint: disable=no-member # Wrap input.shape to PartialShape if possible and wrap to InputCutInfo # pylint: disable=no-member - return openvino.runtime.InputCutInfo(input.name, + return openvino.tools.ovc.InputCutInfo(input.name, PartialShape(input.shape) if input.shape is not None else None, input.type, input.value) @@ -186,18 +180,18 @@ def single_input_to_input_cut_info(input: [str, tuple, list, PartialShape, Type, raise Exception("Incorrect input parameters provided. Expected tuple with input name, " "input type or input shape. Got unknown object: {}".format(val)) # pylint: disable=no-member - return openvino.runtime.InputCutInfo(name, + return openvino.tools.ovc.InputCutInfo(name, PartialShape(shape) if shape is not None else None, inp_type, None) # Case when only type is set if isinstance(input, (type, Type)): - return openvino.runtime.InputCutInfo(None, None, input, None) # pylint: disable=no-member + return openvino.tools.ovc.InputCutInfo(None, None, input, None) # pylint: disable=no-member # We don't expect here single unnamed value. If list of int is set it is considered as shape. # Setting of value is expected only using InputCutInfo or string analog. - raise Exception("Unexpected object provided for input. Expected openvino.runtime.InputCutInfo " + raise Exception("Unexpected object provided for input. Expected openvino.tools.ovc.InputCutInfo " "or tuple or str. Got {}".format(type(input))) @@ -217,13 +211,13 @@ def input_to_input_cut_info(input: [str, tuple, list]): # Parse string with parameters for single input node_name, shape, value, data_type = parse_input_value(input_value) # pylint: disable=no-member - inputs.append(openvino.runtime.InputCutInfo(node_name, + inputs.append(openvino.tools.ovc.InputCutInfo(node_name, PartialShape(shape) if shape is not None else None, data_type, value)) return inputs # pylint: disable=no-member - if isinstance(input, openvino.runtime.InputCutInfo): + if isinstance(input, openvino.tools.ovc.InputCutInfo): # Wrap to list and return return [input] if isinstance(input, tuple): @@ -245,60 +239,15 @@ def input_to_input_cut_info(input: [str, tuple, list]): return [single_input_to_input_cut_info(input)] -def input_shape_to_input_cut_info(input_shape: [str, Shape, PartialShape, list, tuple], inputs: list): +def freeze_placeholder_to_input_cut_info(inputs: list): """ - Parses 'input_shape' to list of PartialShape and updates 'inputs'. - :param input_shape: input shapes passed by user - :param inputs: list of InputCutInfo with information from 'input' parameter - """ - if input_shape is None: - return - if isinstance(input_shape, str): - # Split input_shape to list of string - input_shape = split_shapes(input_shape) - if isinstance(input_shape, (Shape, PartialShape)): - # Whap single shape to list - input_shape = [input_shape] - if isinstance(input_shape, (list, tuple)): - # Check case when single shape is passed as list or tuple - if len(input_shape) > 0 and isinstance(input_shape[0], (int, Dimension)): - input_shape = [input_shape] - - if len(inputs) > 0 and len(input_shape) > 0: - assert len(inputs) == len(input_shape), "Different numbers of inputs were specified in \"input\" parameter " \ - "and \"input_shapes\". \"input\" has {} items, \"input_shape\" has {} item.".format(len(inputs), len(input_shape)) - - # Update inputs with information from 'input_shape' - if len(inputs) > 0: - for idx, shape in enumerate(input_shape): - shape = PartialShape(shape) - assert inputs[idx].shape is None, "Shape was set in both \"input\" and in \"input_shape\" parameter." \ - "Please use either \"input\" or \"input_shape\" for shape setting." - # pylint: disable=no-member - inputs[idx] = openvino.runtime.InputCutInfo(inputs[idx].name, shape, inputs[idx].type, inputs[idx].value) - - else: - for shape in input_shape: - # pylint: disable=no-member - inputs.append(openvino.runtime.InputCutInfo(None, PartialShape(shape), None, None)) - return - - raise Exception("Unexpected object provided for input_shape. Expected PartialShape, Shape, tuple, list or str. " - "Got {}".format(type(input_shape))) - - -def freeze_placeholder_to_input_cut_info(argv_freeze_placeholder_with_value: str, inputs: list): - """ - Parses 'argv_freeze_placeholder_with_value' to dictionary and collects unnamed inputs from 'inputs' to list. - :param argv_freeze_placeholder_with_value: string set by user. - As it was planned to be deprecated no Python analogs were made. + Parses freezing parts from input list. :param inputs: list of InputCutInfo with information from 'input' parameter :returns (placeholder_values, unnamed_placeholder_values), where placeholder_values - dictionary where key is node name, value is node value, unnamed_placeholder_values - list with unnamed node values """ - # Parse argv_freeze_placeholder_with_value to dictionary with names and values - placeholder_values = parse_freeze_placeholder_values(argv_freeze_placeholder_with_value) + placeholder_values = {} unnamed_placeholder_values = [] # Collect values for freezing from 'inputs' @@ -322,36 +271,6 @@ def freeze_placeholder_to_input_cut_info(argv_freeze_placeholder_with_value: str return placeholder_values, unnamed_placeholder_values -def mean_scale_value_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - if isinstance(value, dict): - values_str = [] - for op_name, val in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + value_to_str(val, ",")) - return ",".join(values_str) - if isinstance(value, list) or isinstance(value, tuple): - list_of_lists = False - for val in value: - if isinstance(val, list) or isinstance(val, tuple): - list_of_lists = True - break - if list_of_lists: - values_str = [] - for val in value: - values_str.append(value_to_str(val, ",")) - return ",".join(values_str) - else: - return value_to_str(value, ",") - return value_to_str(value, ",") - - def layout_to_str(layout): if isinstance(layout, str): return layout @@ -360,146 +279,14 @@ def layout_to_str(layout): raise Exception("Incorrect layout type. Expected Layout or string or dictionary, " "where key is operation name and value is layout or list of layouts, got {}".format(type(layout))) - -def source_target_layout_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - if isinstance(value, dict): - values_str = [] - for op_name, layout in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + "(" + layout_to_str(layout) + ")") - return ",".join(values_str) - - return layout_to_str(value) - - -def layoutmap_to_str(value): - if isinstance(value, str): - return value - if isinstance(value, openvino.runtime.LayoutMap): # pylint: disable=no-member - assert value.source_layout is not None, "Incorrect layout map. 'source_layout' should be set." - source_layout = layout_to_str(value.source_layout) - if value.target_layout is not None: - target_layout = layout_to_str(value.target_layout) - source_layout += "->" + target_layout - return source_layout - return layout_to_str(value) - - -def layout_param_to_str(value): - # default empty value - if isinstance(value, tuple) and len(value) == 0: - return value - - if isinstance(value, str): - return value - - if isinstance(value, dict): - values_str = [] - for op_name, layout in value.items(): - if not isinstance(op_name, str): - raise Exception("Incorrect operation name type. Expected string, got {}".format(type(op_name))) - values_str.append(op_name + "(" + layoutmap_to_str(layout) + ")") - return ",".join(values_str) - if isinstance(value, openvino.runtime.LayoutMap): # pylint: disable=no-member - return layoutmap_to_str(value) - if isinstance(value, list) or isinstance(value, tuple): - values_str = [] - for layout in value: - values_str.append(layoutmap_to_str(layout)) - return ",".join(values_str) - - return layoutmap_to_str(value) - - -def batch_to_int(value): - if value is None or isinstance(value, int): - return value - if isinstance(value, Dimension): - if not value.is_static: - # TODO: Ticket 88676 - raise Exception("Dynamic batch for \"batch\" parameter is not supported.") - else: - return value.get_length() - raise Exception("Incorrect batch value. Expected int, got {}.".format(type(value))) - - -def transform_param_value_to_str(value): - # This function supports parsing of parameters of MakeStateful, LowLatency2, Pruning. - # If available transforms list is extended this method should be extended for new transforms. - if isinstance(value, str): - return value - if isinstance(value, bool): - return str(value) - if isinstance(value, dict): - # param_res_names dictionary for MakeStateful transform - values_str = [] - for input_name, output_name in value.items(): - assert isinstance(input_name, str), "Incorrect input name. " \ - "Expected string, got {}".format(type(input_name)) - assert isinstance(output_name, str), "Incorrect output name. " \ - "Expected string, got {}".format(type(output_name)) - values_str.append("\'{}\':\'{}\'".format(input_name, output_name)) - return "{" + ','.join(values_str) + "}" - raise Exception("Unknown parameter type.") - - -def transform_to_str(value): - from openvino.tools.ovc.moc_frontend.offline_transformations import get_available_transformations - - if isinstance(value, str): - return value - - if isinstance(value, tuple): - assert 1 <= len(value) <= 2, "Incorrect definition of transformation in transform argument: " \ - "expected two elements in tuple, provided {}. " \ - "Supported transforms are: {}".format( - len(value), - list(get_available_transformations().keys())) - transform_name = value[0] - assert isinstance(transform_name, str), "Incorrect transform name type. " \ - "Expected string, got {}".format(type(transform_name)) - if len(value) == 2: - params = value[1] - assert isinstance(params, dict), "Incorrect transform params type. " \ - "Expected dictionary, got {}".format(type(params)) - params_str_list = [] - for param_name, val in params.items(): - assert isinstance(param_name, str), "Incorrect transform parameter name type. " \ - "Expected string, got {}".format(type(param_name)) - val_str = transform_param_value_to_str(val) - params_str_list.append(param_name + "=" + val_str) - transform_name += '[' + ','.join(params_str_list) + ']' - return transform_name - raise Exception("Incorrect transform type. Expected tuple with transform name and " - "dictionary with transform parameters. Got object of type {}".format(type(value))) - - -def transform_param_to_str(value): - if value is None or isinstance(value, str): - return value - if isinstance(value, list): - transforms_str = [] - for transform in value: - transforms_str.append(transform_to_str(transform)) - return ','.join(transforms_str) - return transform_to_str(value) - - ParamDescription = namedtuple("ParamData", ["description", "cli_tool_description", "to_string"]) def get_mo_convert_params(): - mo_convert_docs = openvino.runtime.convert_model.__doc__ # pylint: disable=no-member + mo_convert_docs = openvino.tools.ovc.convert_model.__doc__ # pylint: disable=no-member mo_convert_params = {} - group = "Optional parameters:" + group = "Framework-agnostic parameters:" #FIXME: WA for unknown bug in this function mo_convert_params[group] = {} mo_convert_docs = mo_convert_docs[:mo_convert_docs.find('Returns:')] @@ -530,7 +317,7 @@ def get_mo_convert_params(): mo_convert_params[group_name] = {} group = group_name - # TODO: remove this when internal converting of params to string is removed + # TODO: remove this when internal converting of params to string is removed <-- DO IT params_converted_to_string = get_to_string_methods_for_params() params_with_paths = get_params_with_paths_list() @@ -562,8 +349,6 @@ def __init__(self, nargs=0, **kw): def __call__(self, parser, namespace, values, option_string=None): dep_msg = "Use of deprecated cli option {} detected. Option use in the following releases will be fatal. ".format(option_string) - if 'fusing' in option_string: - dep_msg += 'Please use --finegrain_fusing cli option instead' log.error(dep_msg, extra={'is_warning': True}) setattr(namespace, self.dest, True) @@ -585,7 +370,7 @@ def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, True) -def canonicalize_and_check_paths(values: Union[str, List[str]], param_name, +def canonicalize_and_check_paths(values: Union[str, List[str], None], param_name, try_mo_root=False, check_existence=True) -> List[str]: if values is not None: list_of_values = list() @@ -626,7 +411,7 @@ class CanonicalizePathAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, try_mo_root=False, check_existence=False) - setattr(namespace, self.dest, ','.join(list_of_paths)) + setattr(namespace, self.dest, list_of_paths) class CanonicalizeTransformationPathCheckExistenceAction(argparse.Action): @@ -650,7 +435,7 @@ class CanonicalizePathCheckExistenceAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): list_of_paths = canonicalize_and_check_paths(values, param_name=option_string, try_mo_root=False, check_existence=True) - setattr(namespace, self.dest, ','.join(list_of_paths)) + setattr(namespace, self.dest, list_of_paths) class CanonicalizeExtensionsPathCheckExistenceAction(argparse.Action): @@ -791,13 +576,17 @@ def writable_dir(path: str): def add_args_by_description(args_group, params_description): - signature = inspect.signature(openvino.runtime.convert_model) # pylint: disable=no-member + signature = inspect.signature(openvino.tools.ovc.convert_model) # pylint: disable=no-member filepath_args = get_params_with_paths_list() cli_tool_specific_descriptions = get_convert_model_help_specifics() for param_name, param_description in params_description.items(): - if param_name == 'help': + if param_name in ['share_weights', 'example_input']: continue - cli_param_name = "--"+param_name + if param_name == 'input_model': + # input_model is not a normal key for a tool, it will collect all untagged keys + cli_param_name = param_name + else: + cli_param_name = '--' + param_name if cli_param_name not in args_group._option_string_actions: # Get parameter specifics param_specifics = cli_tool_specific_descriptions[param_name] if param_name in \ @@ -806,7 +595,7 @@ def add_args_by_description(args_group, params_description): else param_description.description action = param_specifics['action'] if 'action' in param_specifics else None param_type = param_specifics['type'] if 'type' in param_specifics else None - param_alias = param_specifics['aliases'] if 'aliases' in param_specifics else {} + param_alias = param_specifics['aliases'] if 'aliases' in param_specifics and param_name != 'input_model' else {} param_version = param_specifics['version'] if 'version' in param_specifics else None param_choices = param_specifics['choices'] if 'choices' in param_specifics else None @@ -814,9 +603,7 @@ def add_args_by_description(args_group, params_description): if signature.parameters[param_name].annotation == bool and param_name != 'version': args_group.add_argument( cli_param_name, *param_alias, - type=check_bool if param_type is None else param_type, - nargs="?", - const=True, + action='store_true', help=help_text, default=signature.parameters[param_name].default) # File paths common setting @@ -825,9 +612,10 @@ def add_args_by_description(args_group, params_description): args_group.add_argument( cli_param_name, *param_alias, type=str if param_type is None else param_type, + nargs='*' if param_name == 'input_model' else '?', action=action, help=help_text, - default=signature.parameters[param_name].default) + default=None if param_name == 'input_model' else signature.parameters[param_name].default) # Other params else: additional_params = {} @@ -853,41 +641,16 @@ def get_common_cli_parser(parser: argparse.ArgumentParser = None): mo_convert_params = get_mo_convert_params() mo_convert_params_common = mo_convert_params['Framework-agnostic parameters:'] + from openvino.tools.ovc.version import VersionChecker + # Command line tool specific params - common_group.add_argument('--model_name', '-n', - help='Model_name parameter passed to the final create_ir transform. ' + - 'This parameter is used to name ' + - 'a network in a generated IR and output .xml/.bin files.') - common_group.add_argument('--output_dir', '-o', - help='Directory that stores the generated IR. ' + - 'By default, it is the directory from where the Model Conversion is launched.', - default=get_absolute_path('.'), - action=CanonicalizePathAction, - type=writable_dir) - - # Deprecated params - common_group.add_argument('--freeze_placeholder_with_value', - help='Replaces input layer with constant node with ' - 'provided value, for example: "node_name->True". ' - 'It will be DEPRECATED in future releases. ' - 'Use "input" option to specify a value for freezing.', - default=None) - common_group.add_argument('--static_shape', - help='Enables IR generation for fixed input shape (folding `ShapeOf` operations and ' - 'shape-calculating sub-graphs to `Constant`). Changing model input shape using ' - 'the OpenVINO Runtime API in runtime may fail for such an IR.', - action='store_true', default=False) - common_group.add_argument("--use_new_frontend", - help='Force the usage of new Frontend for model conversion into IR. ' - 'The new Frontend is C++ based and is available for ONNX* and PaddlePaddle* models. ' - 'Model Conversion API uses new Frontend for ONNX* and PaddlePaddle* by default that means ' - '`use_new_frontend` and `use_legacy_frontend` options are not specified.', - action='store_true', default=False) - common_group.add_argument("--use_legacy_frontend", - help='Force the usage of legacy Frontend for model conversion into IR. ' - 'The legacy Frontend is Python based and is available for TensorFlow*, ONNX*, MXNet*, ' - 'Caffe*, and Kaldi* models.', - action='store_true', default=False) + common_group.add_argument('--output_model', + help='This parameter is used to name output .xml/.bin files with converted model.') + common_group.add_argument('--compress_to_fp16', action='store_true', + help='Compress weights in output IR .xml/bin files to FP16.') + common_group.add_argument('--version', action='version', + help='Print ovc version and exit.', + version='OpenVINO Model Converter (ovc) {}'.format(VersionChecker().get_ie_version())) add_args_by_description(common_group, mo_convert_params_common) return parser @@ -895,110 +658,16 @@ def get_common_cli_parser(parser: argparse.ArgumentParser = None): def get_common_cli_options(model_name): d = OrderedDict() d['input_model'] = '- Path to the Input Model' - d['output_dir'] = ['- Path for generated IR', lambda x: x if x != '.' else os.getcwd()] - d['model_name'] = ['- IR output name', lambda x: x if x else model_name] + d['output_dir'] = ['- Path for generated IR', lambda x: x if x != '.' else os.getcwd()] # TODO: Consider removing + d['output_model'] = ['- IR output name', lambda x: x if x else model_name] d['log_level'] = '- Log level' - d['batch'] = ['- Batch', lambda x: x if x else 'Not specified, inherited from the model'] d['input'] = ['- Input layers', lambda x: x if x else 'Not specified, inherited from the model'] d['output'] = ['- Output layers', lambda x: x if x else 'Not specified, inherited from the model'] - d['input_shape'] = ['- Input shapes', lambda x: x if x else 'Not specified, inherited from the model'] - d['source_layout'] = ['- Source layout', lambda x: x if x else 'Not specified'] - d['target_layout'] = ['- Target layout', lambda x: x if x else 'Not specified'] - d['layout'] = ['- Layout', lambda x: x if x else 'Not specified'] - d['mean_values'] = ['- Mean values', lambda x: x if x else 'Not specified'] - d['scale_values'] = ['- Scale values', lambda x: x if x else 'Not specified'] - d['scale'] = ['- Scale factor', lambda x: x if x else 'Not specified'] - d['transform'] = ['- User transformations', lambda x: x if x else 'Not specified'] - d['reverse_input_channels'] = '- Reverse input channels' - d['static_shape'] = '- Enable IR generation for fixed input shape' - d['transformations_config'] = '- Use the transformations config file' return d -def get_advanced_cli_options(): - d = OrderedDict() - d['use_legacy_frontend'] = '- Force the usage of legacy Frontend for model conversion into IR' - d['use_new_frontend'] = '- Force the usage of new Frontend for model conversion into IR' - return d - - -def get_caffe_cli_options(): - d = { - 'input_proto': ['- Path to the Input prototxt', lambda x: x], - 'caffe_parser_path': ['- Path to Python Caffe* parser generated from caffe.proto', lambda x: x], - 'k': '- Path to CustomLayersMapping.xml', - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_tf_cli_options(): - d = { - 'input_model_is_text': '- Input model in text protobuf format', - 'tensorflow_custom_operations_config_update': '- Update the configuration file with input/output node names', - 'tensorflow_object_detection_api_pipeline_config': '- Use configuration file used to generate the model with ' - 'Object Detection API', - 'tensorflow_custom_layer_libraries': '- List of shared libraries with TensorFlow custom layers implementation', - 'tensorboard_logdir': '- Path to model dump for TensorBoard' - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_mxnet_cli_options(): - d = { - 'input_symbol': '- Deploy-ready symbol file', - 'nd_prefix_name': '- Prefix name for args.nd and argx.nd files', - 'pretrained_model_name': '- Pretrained model to be merged with the .nd files', - 'save_params_from_nd': '- Enable saving built parameters file from .nd files', - 'legacy_mxnet_model': '- Enable MXNet loader for models trained with MXNet version lower than 1.0.0', - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_kaldi_cli_options(): - d = { - 'counts': '- A file name with full path to the counts file or empty string if you want to use counts from model', - 'remove_output_softmax': '- Removes the SoftMax layer that is the output layer', - 'remove_memory': '- Removes the Memory layer and use additional inputs and outputs instead' - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - -def get_onnx_cli_options(): - d = { - } - - return OrderedDict(sorted(d.items(), key=lambda t: t[0])) - - def get_params_with_paths_list(): - return ['input_model', 'output_dir', 'caffe_parser_path', 'extensions', 'k', 'output_dir', - 'input_checkpoint', 'input_meta_graph', 'input_proto', 'input_symbol', - 'pretrained_model_name', 'saved_model_dir', 'tensorboard_logdir', - 'tensorflow_custom_layer_libraries', 'tensorflow_custom_operations_config_update', - 'tensorflow_object_detection_api_pipeline_config', - 'transformations_config'] - - -def get_caffe_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for Caffe* - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - caffe_group = parser.add_argument_group('Caffe*-specific parameters') - mo_convert_params_caffe = get_mo_convert_params()['Caffe*-specific parameters:'] - add_args_by_description(caffe_group, mo_convert_params_caffe) - return parser + return ['input_model', 'output_model', 'extensions'] def get_tf_cli_parser(parser: argparse.ArgumentParser = None): @@ -1009,9 +678,6 @@ def get_tf_cli_parser(parser: argparse.ArgumentParser = None): ------- ArgumentParser instance """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) mo_convert_params_tf = get_mo_convert_params()['TensorFlow*-specific parameters:'] tf_group = parser.add_argument_group('TensorFlow*-specific parameters') @@ -1019,43 +685,6 @@ def get_tf_cli_parser(parser: argparse.ArgumentParser = None): return parser -def get_mxnet_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for MXNet* - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - mx_group = parser.add_argument_group('MXNet-specific parameters') - mo_convert_params_mxnet = get_mo_convert_params()['MXNet-specific parameters:'] - add_args_by_description(mx_group, mo_convert_params_mxnet) - - return parser - - -def get_kaldi_cli_parser(parser: argparse.ArgumentParser = None): - """ - Specifies cli arguments for Model Conversion for MXNet* - - Returns - ------- - ArgumentParser instance - """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - - kaldi_group = parser.add_argument_group('Kaldi-specific parameters') - mo_convert_params_kaldi = get_mo_convert_params()['Kaldi-specific parameters:'] - add_args_by_description(kaldi_group, mo_convert_params_kaldi) - return parser - - def get_onnx_cli_parser(parser: argparse.ArgumentParser = None): """ Specifies cli arguments for Model Conversion for ONNX @@ -1064,10 +693,6 @@ def get_onnx_cli_parser(parser: argparse.ArgumentParser = None): ------- ArgumentParser instance """ - if not parser: - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - get_common_cli_parser(parser=parser) - return parser @@ -1079,15 +704,10 @@ def get_all_cli_parser(): ------- ArgumentParser instance """ - parser = argparse.ArgumentParser(usage='%(prog)s [options]') - mo_convert_params_optional = get_mo_convert_params()['Optional parameters:'] - add_args_by_description(parser, mo_convert_params_optional) + parser = argparse.ArgumentParser() get_common_cli_parser(parser=parser) get_tf_cli_parser(parser=parser) - get_caffe_cli_parser(parser=parser) - get_mxnet_cli_parser(parser=parser) - get_kaldi_cli_parser(parser=parser) get_onnx_cli_parser(parser=parser) return parser @@ -1224,7 +844,7 @@ def parse_input_value(input_value: str): if shape is not None and value is not None and partial_shape_prod(shape) != value_size: raise Error("The shape '{}' of the input node '{}' does not correspond to the number of elements '{}' in the " "value: {}".format(shape, node_name, value_size, value)) - return node_name, shape, value, data_type + return node_name if node_name else None, shape, value, data_type def split_str_avoiding_square_brackets(s: str) -> list: @@ -1407,6 +1027,7 @@ def get_layout_values(argv_layout: str = '', argv_source_layout: str = '', argv_ return res_list +#TODO: Should be removed? def parse_freeze_placeholder_values(argv_freeze_placeholder_with_value: str): """ Parses parse_freeze_placeholder_values string. @@ -1431,7 +1052,7 @@ def parse_freeze_placeholder_values(argv_freeze_placeholder_with_value: str): return placeholder_values -def get_freeze_placeholder_values(argv_input: str, argv_freeze_placeholder_with_value: str): +def get_freeze_placeholder_values(argv_input: str): """ Parses values for placeholder freezing and input node names @@ -1440,16 +1061,13 @@ def get_freeze_placeholder_values(argv_input: str, argv_freeze_placeholder_with_ argv_input string with a list of input layers: either an empty string, or strings separated with comma. 'node_name1[shape1]->value1,node_name2[shape2]->value2,...' - argv_freeze_placeholder_with_value - string with a list of input shapes: either an empty string, or tuples separated with comma. - 'placeholder_name1->value1, placeholder_name2->value2,...' Returns ------- parsed placeholders with values for freezing input nodes cleaned from shape info """ - placeholder_values = parse_freeze_placeholder_values(argv_freeze_placeholder_with_value) + placeholder_values = {} input_node_names = None if argv_input is not None: @@ -1493,208 +1111,6 @@ def split_inputs(input_str): return inputs - -def split_shapes(argv_input_shape: str): - range_reg = r'([0-9]*\.\.[0-9]*)' - first_digit_reg = r'([0-9 ]*|-1|\?|{})'.format(range_reg) - next_digits_reg = r'(,{})*'.format(first_digit_reg) - tuple_reg = r'((\({}{}\))|(\[{}{}\]))'.format(first_digit_reg, next_digits_reg, - first_digit_reg, next_digits_reg) - - full_reg = r'^{}(\s*,\s*{})*$|^$'.format(tuple_reg, tuple_reg) - if not re.match(full_reg, argv_input_shape): - raise Error('Input shape "{}" cannot be parsed. ' + refer_to_faq_msg(57), argv_input_shape) - return re.findall(r'[(\[]([0-9,\.\? -]*)[)\]]', argv_input_shape) - -def get_placeholder_shapes(argv_input: str, argv_input_shape: str, argv_batch=None): - """ - Parses input layers names and input shapes from the cli and returns the parsed object. - All shapes are specified only through one command line option either "input" or "input_shape". - - Parameters - ---------- - argv_input - string with a list of input layers: either an empty string, or strings separated with comma. - E.g. 'inp1,inp2', 'node_name1[shape1]->value1,node_name2[shape2]->value2' - argv_input_shape - string with a list of input shapes: either an empty string, or tuples separated with comma. - E.g. '[1,2],[3,4]'. - Only positive integers are accepted. - '?' marks dynamic dimension. - Partial shape is specified with ellipsis. E.g. '[1..10,2,3]' - argv_batch - integer that overrides batch size in input shape - - Returns - ------- - parsed shapes in form of {'name of input':tuple} if names of inputs are provided with shapes - parsed shapes in form of {'name of input':None} if names of inputs are provided without shapes - tuple if only one shape is provided and no input name - None if neither shape nor input were provided - """ - if argv_input_shape and argv_batch: - raise Error("Both \"input_shape\" and \"batch\" were provided. Please provide only one of them. " + - refer_to_faq_msg(56)) - - # attempt to extract shapes from "input" parameters - placeholder_shapes = dict() - placeholder_data_types = dict() - are_shapes_specified_through_input = False - inputs_list = list() - if argv_input: - for input_value in split_inputs(argv_input): - node_name, shape, _, data_type = parse_input_value(input_value) - placeholder_shapes[node_name] = shape - inputs_list.append(node_name) - if data_type is not None: - placeholder_data_types[node_name] = data_type - if shape is not None: - are_shapes_specified_through_input = True - - if argv_input_shape and are_shapes_specified_through_input: - raise Error("Shapes are specified using both \"input\" and \"input_shape\" command-line parameters, but only one " - "parameter is allowed.") - - if argv_batch and are_shapes_specified_through_input: - raise Error("Shapes are specified using both \"input\" and \"batch\" command-line parameters, but only one " - "parameter is allowed.") - - if are_shapes_specified_through_input: - return inputs_list, placeholder_shapes, placeholder_data_types - - shapes = list() - inputs = list() - inputs_list = list() - placeholder_shapes = None - - - if argv_input_shape: - shapes = split_shapes(argv_input_shape) - - if argv_input: - inputs = split_inputs(argv_input) - inputs = [remove_data_type_from_input_value(inp) for inp in inputs] - - # check number of shapes with no input provided - if argv_input_shape and not argv_input: - placeholder_shapes = [PartialShape(shape) for shape in shapes] - if len(placeholder_shapes) == 1: - placeholder_shapes = PartialShape(placeholder_shapes[0]) - # check if number of shapes does not match number of passed inputs - elif argv_input and (len(shapes) == len(inputs) or len(shapes) == 0): - # clean inputs from values for freezing - inputs_without_value = list(map(lambda x: x.split('->')[0], inputs)) - placeholder_shapes = dict(zip_longest(inputs_without_value, - map(lambda x: PartialShape(x) if x is not None else None, shapes))) - for inp in inputs: - if '->' not in inp: - inputs_list.append(inp) - continue - shape = placeholder_shapes[inp.split('->')[0]] - inputs_list.append(inp.split('->')[0]) - - if shape is None: - continue - for dim in shape: - if isinstance(dim, Dimension) and not dim.is_static: - raise Error("Cannot freeze input with dynamic shape: {}".format(shape)) - - elif argv_input: - raise Error('Please provide each input layers with an input layer shape. ' + refer_to_faq_msg(58)) - - return inputs_list, placeholder_shapes, placeholder_data_types - - -def parse_tuple_pairs(argv_values: str): - """ - Gets mean/scale values from the given string parameter - Parameters - ---------- - argv_values - string with a specified input name and list of mean values: either an empty string, or a tuple - in a form [] or (). - E.g. 'data(1,2,3)' means 1 for the RED channel, 2 for the GREEN channel, 3 for the BLUE channel for the data - input layer, or tuple of values in a form [] or () if input is specified separately, e.g. (1,2,3),[4,5,6]. - - Returns - ------- - dictionary with input name and tuple of values or list of values if mean/scale value is specified with input, - e.g.: - "data(10,20,30),info(11,22,33)" -> { 'data': [10,20,30], 'info': [11,22,33] } - "(10,20,30),(11,22,33)" -> [mo_array(10,20,30), mo_array(11,22,33)] - """ - res = {} - if not argv_values: - return res - - matches = [m for m in re.finditer(r'[(\[]([0-9., -]+)[)\]]', argv_values, re.IGNORECASE)] - - error_msg = 'Mean/scale values should consist of name and values specified in round or square brackets ' \ - 'separated by comma, e.g. data(1,2,3),info[2,3,4],egg[255] or data(1,2,3). Or just plain set of ' \ - 'values without names: (1,2,3),(2,3,4) or [1,2,3],[2,3,4].' + refer_to_faq_msg(101) - if not matches: - raise Error(error_msg, argv_values) - - name_start_idx = 0 - name_was_present = False - for idx, match in enumerate(matches): - input_name = argv_values[name_start_idx:match.start(0)] - name_start_idx = match.end(0) + 1 - tuple_value = np.fromstring(match.groups()[0], dtype=float, sep=',') - - if idx != 0 and (name_was_present ^ bool(input_name)): - # if node name firstly was specified and then subsequently not or vice versa - # e.g. (255),input[127] or input(255),[127] - raise Error(error_msg, argv_values) - - name_was_present = True if input_name != "" else False - if name_was_present: - res[input_name] = tuple_value - else: - res[idx] = tuple_value - - if not name_was_present: - # return a list instead of a dictionary - res = sorted(res.values(), key=lambda v: v[0]) - return res - - -def get_tuple_values(argv_values: str or tuple, num_exp_values: int = 3, t=float or int): - """ - Gets mean values from the given string parameter - Args: - argv_values: string with list of mean values: either an empty string, or a tuple in a form [] or (). - E.g. '(1,2,3)' means 1 for the RED channel, 2 for the GREEN channel, 4 for the BLUE channel. - t: either float or int - num_exp_values: number of values in tuple - - Returns: - tuple of values - """ - - digit_reg = r'(-?[0-9. ]+)' if t == float else r'(-?[0-9 ]+)' - - assert num_exp_values > 1, 'Can not parse tuple of size 1' - content = r'{0}\s*,{1}\s*{0}'.format(digit_reg, (digit_reg + ',') * (num_exp_values - 2)) - tuple_reg = r'((\({0}\))|(\[{0}\]))'.format(content) - - if isinstance(argv_values, tuple) and not len(argv_values): - return argv_values - - if not len(argv_values) or not re.match(tuple_reg, argv_values): - raise Error('Values "{}" cannot be parsed. ' + - refer_to_faq_msg(59), argv_values) - - mean_values_matches = re.findall(r'[(\[]([0-9., -]+)[)\]]', argv_values) - - for mean in mean_values_matches: - if len(mean.split(',')) != num_exp_values: - raise Error('{} channels are expected for given values. ' + - refer_to_faq_msg(60), num_exp_values) - - return mean_values_matches - - def split_node_in_port(node_id: str): """Split node_id in form port:node to separate node and port, where port is converted to int""" if isinstance(node_id, str): @@ -1721,129 +1137,6 @@ def split_node_in_port(node_id: str): return node_id, None -def get_mean_scale_dictionary(mean_values, scale_values, argv_input: list): - """ - This function takes mean_values and scale_values, checks and processes them into convenient structure - - Parameters - ---------- - mean_values dictionary, contains input name and mean values passed py user (e.g. {data: np.array[102.4, 122.1, 113.9]}), - or list containing values (e.g. np.array[102.4, 122.1, 113.9]) - scale_values dictionary, contains input name and scale values passed py user (e.g. {data: np.array[102.4, 122.1, 113.9]}) - or list containing values (e.g. np.array[102.4, 122.1, 113.9]) - - Returns - ------- - The function returns a dictionary e.g. - mean = { 'data': np.array, 'info': np.array }, scale = { 'data': np.array, 'info': np.array }, input = "data, info" -> - { 'data': { 'mean': np.array, 'scale': np.array }, 'info': { 'mean': np.array, 'scale': np.array } } - - """ - res = {} - # collect input names - if argv_input: - inputs = [get_node_name_with_port_from_input_value(input_value) for input_value in split_inputs(argv_input)] - else: - inputs = [] - if type(mean_values) is dict: - inputs = list(mean_values.keys()) - if type(scale_values) is dict: - for name in scale_values.keys(): - if name not in inputs: - inputs.append(name) - - # create unified object containing both mean and scale for input - if type(mean_values) is dict and type(scale_values) is dict: - if not mean_values and not scale_values: - return res - - for inp_scale in scale_values.keys(): - if inp_scale not in inputs: - raise Error("Specified scale_values name '{}' do not match to any of inputs: {}. " - "Please set 'scale_values' that correspond to values from input.".format(inp_scale, inputs)) - - for inp_mean in mean_values.keys(): - if inp_mean not in inputs: - raise Error("Specified mean_values name '{}' do not match to any of inputs: {}. " - "Please set 'mean_values' that correspond to values from input.".format(inp_mean, inputs)) - - for inp in inputs: - inp, port = split_node_in_port(inp) - if inp in mean_values or inp in scale_values: - res.update( - { - inp: { - 'mean': - mean_values[inp] if inp in mean_values else None, - 'scale': - scale_values[inp] if inp in scale_values else None - } - } - ) - return res - - # user specified input and mean/scale separately - we should return dictionary - if inputs: - if mean_values and scale_values: - if len(inputs) != len(mean_values): - raise Error('Numbers of inputs and mean values do not match. ' + - refer_to_faq_msg(61)) - if len(inputs) != len(scale_values): - raise Error('Numbers of inputs and scale values do not match. ' + - refer_to_faq_msg(62)) - - data = list(zip(mean_values, scale_values)) - - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - data[i][0], - 'scale': - data[i][1], - - } - } - ) - return res - # only mean value specified - if mean_values: - data = list(mean_values) - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - data[i], - 'scale': - None - - } - } - ) - return res - - # only scale value specified - if scale_values: - data = list(scale_values) - for i in range(len(data)): - res.update( - { - inputs[i]: { - 'mean': - None, - 'scale': - data[i] - - } - } - ) - return res - # mean and/or scale are specified without inputs - return list(zip_longest(mean_values, scale_values)) - - def get_model_name(path_input_model: str) -> str: """ Deduces model name by a given path to the input model @@ -1858,19 +1151,12 @@ def get_model_name(path_input_model: str) -> str: def get_model_name_from_args(argv: argparse.Namespace): - model_name = "" - if hasattr(argv, 'model_name'): - if argv.model_name: - model_name = argv.model_name - elif argv.input_model: - model_name = get_model_name(argv.input_model) - elif argv.saved_model_dir: - model_name = "saved_model" - elif argv.input_meta_graph: - model_name = get_model_name(argv.input_meta_graph) - elif argv.input_symbol: - model_name = get_model_name(argv.input_symbol) - argv.model_name = model_name + if hasattr(argv, 'output_model') and argv.output_model: + model_name = argv.output_model + else: + model_name = argv.input_model + if isinstance(model_name, (tuple, list)) and len(model_name) > 0: + model_name = model_name[0] return model_name @@ -1930,70 +1216,6 @@ def convert_string_to_real_type(value: str): return values[0] if len(values) == 1 else values -def parse_transform(transform: str) -> list: - transforms = [] - - if len(transform) == 0: - return transforms - - all_transforms = re.findall(r"([a-zA-Z0-9]+)(\[([^\]]+)\])*(,|$)", transform) - - # Check that all characters were matched otherwise transform key value is invalid - key_len = len(transform) - for transform in all_transforms: - # In regexp we have 4 groups where 1st group - transformation_name, - # 2nd group - [args], - # 3rd group - args, <-- nested group - # 4th group - EOL - # And to check that regexp matched all string we decrease total length by the length of matched groups (1,2,4) - # In case if no arguments were given to transformation then 2nd and 3rd groups will be empty. - if len(transform) != 4: - raise Error("Unexpected transform key structure: {}".format(transform)) - key_len -= len(transform[0]) + len(transform[1]) + len(transform[3]) - - if key_len != 0: - raise Error("Unexpected transform key structure: {}".format(transform)) - - for transform in all_transforms: - name = transform[0] - args = transform[2] - - args_dict = {} - - if len(args) != 0: - for arg in args.split(';'): - m = re.match(r"^([_a-zA-Z]+)=(.+)$", arg) - if not m: - raise Error("Unrecognized attributes for transform key: {}".format(transform)) - - args_dict[m.group(1)] = convert_string_to_real_type(m.group(2)) - - transforms.append((name, args_dict)) - - return transforms - - -def check_available_transforms(transforms: list): - """ - This function check that transformations specified by user are available. - :param transforms: list of user specified transformations - :return: raises an Error if transformation is not available - """ - from openvino.tools.ovc.moc_frontend.offline_transformations import get_available_transformations - available_transforms = get_available_transformations() - - missing_transformations = [] - for name, _ in transforms: - if name not in available_transforms.keys(): - missing_transformations.append(name) - - if len(missing_transformations) != 0: - raise Error('Following transformations ({}) are not available. ' - 'List with available transformations ({})'.format(','.join(missing_transformations), - ','.join(available_transforms.keys()))) - return True - - def check_positive(value): try: int_value = int(value) diff --git a/tools/ovc/openvino/tools/ovc/convert.py b/tools/ovc/openvino/tools/ovc/convert.py index 733dacd15c5af0..6aaa0e8d7f7883 100644 --- a/tools/ovc/openvino/tools/ovc/convert.py +++ b/tools/ovc/openvino/tools/ovc/convert.py @@ -6,105 +6,42 @@ from collections import namedtuple from typing import Any -from openvino.runtime import PartialShape, Shape, Layout, Model # pylint: disable=no-name-in-module,import-error +from openvino.runtime import PartialShape, Shape, Model # pylint: disable=no-name-in-module,import-error from openvino.tools.ovc.convert_impl import _convert from openvino.tools.ovc.logger import get_logger_state, restore_logger_state from openvino.tools.ovc.cli_parser import get_all_cli_parser +#TODO: Why names InputCutInfo and InputInfo are different InputCutInfo = namedtuple("InputInfo", ["name", "shape", "type", "value"], defaults=[None, None, None, None]) -LayoutMap = namedtuple("LayoutMap", ["source_layout", "target_layout"], defaults=[None, None]) def convert_model( - input_model: [str, pathlib.Path, Any] = None, - - # Optional parameters - help: bool = False, - framework: [str] = None, + input_model: [str, pathlib.Path, Any, list], # TODO: Instead of list just accept arbitrary number of positional arguments # Framework-agnostic parameters input: [str, list, tuple, InputCutInfo] = None, output: [str, list] = None, - input_shape: [str, PartialShape, Shape, list] = None, example_input: Any = None, - batch: int = None, - mean_values: [str, dict, list] = (), - scale_values: [str, dict, list] = (), - scale: [str, float] = None, - reverse_input_channels: bool = False, - source_layout: [str, Layout, dict] = (), - target_layout: [str, Layout, dict] = (), - layout: [str, Layout, LayoutMap, list, dict] = (), - compress_to_fp16: bool = False, extensions: [str, pathlib.Path, list, Any] = None, - transform: [str, list, tuple] = "", - transformations_config: [str, pathlib.Path] = None, - silent: bool = True, - log_level: str = 'ERROR', - version: bool = None, - progress: bool = False, - stream_output: bool = False, + verbose: bool = False, share_weights: bool = True, # PaddlePaddle-specific parameters: - example_output: Any = None, + example_output: Any = None, # TODO: Consider removing # TensorFlow*-specific parameters - input_model_is_text: bool = None, - input_checkpoint: [str, pathlib.Path] = None, - input_meta_graph: [str, pathlib.Path] = None, - saved_model_dir: [str, pathlib.Path] = None, - saved_model_tags: [str, list] = None, - tensorflow_custom_operations_config_update: [str, pathlib.Path] = None, - tensorflow_object_detection_api_pipeline_config: [str, pathlib.Path] = None, - tensorboard_logdir: [str, pathlib.Path] = None, - tensorflow_custom_layer_libraries: [str, pathlib.Path] = None, - - # MXNet-specific parameters: - input_symbol: [str, pathlib.Path] = None, - nd_prefix_name: str = None, - pretrained_model_name: str = None, - save_params_from_nd: bool = None, - legacy_mxnet_model: bool = None, - enable_ssd_gluoncv: bool = False, - - # Caffe*-specific parameters: - input_proto: [str, pathlib.Path] = None, - caffe_parser_path: [str, pathlib.Path] = None, - k: [str, pathlib.Path] = None, - disable_omitting_optional: bool = False, - enable_flattening_nested_params: bool = False, - - # Kaldi-specific parameters: - counts: [str, pathlib.Path] = None, - remove_output_softmax: bool = False, - remove_memory: bool = False, - - **args + saved_model_tags: [str, list] = None, # TODO: Consider removing ) -> Model: """ Converts the model from original framework to OpenVino Model. - Args: - :param help: - Print available parameters. - :param framework: - Name of the framework used to train the input model. - Framework-agnostic parameters: :param input_model: Model object in original framework (PyTorch, Tensorflow) or path to model file. - Tensorflow*: a file with a pre-trained model (binary or text .pb file after freezing). - Caffe*: a model proto file with model weights Supported formats of input model: - PaddlePaddle - paddle.hapi.model.Model - paddle.fluid.dygraph.layers.Layer - paddle.fluid.executor.Executor - PyTorch torch.nn.Module torch.jit.ScriptModule @@ -123,6 +60,11 @@ def convert_model( tf.Module tf.train.checkpoint + PaddlePaddle + paddle.hapi.model.Model + paddle.fluid.dygraph.layers.Layer + paddle.fluid.executor.Executor + :param input: Input can be set by passing a list of InputCutInfo objects or by a list of tuples. Each tuple can contain optionally input name, input @@ -149,94 +91,11 @@ def convert_model( The name of the output operation of the model or list of names. For TensorFlow*, do not add :0 to this name.The order of outputs in converted model is the same as order of specified operation names. - :param input_shape: - Input shape(s) that should be fed to an input node(s) of the model. Input - shapes can be defined by passing a list of objects of type PartialShape, - Shape, [Dimension, ...] or [int, ...] or by a string of the following - format. Shape is defined as a comma-separated list of integer numbers - enclosed in parentheses or square brackets, for example [1,3,227,227] - or (1,227,227,3), where the order of dimensions depends on the framework - input layout of the model. For example, [N,C,H,W] is used for ONNX* models - and [N,H,W,C] for TensorFlow* models. The shape can contain undefined - dimensions (? or -1) and should fit the dimensions defined in the input - operation of the graph. Boundaries of undefined dimension can be specified - with ellipsis, for example [1,1..10,128,128]. One boundary can be - undefined, for example [1,..100] or [1,3,1..,1..]. If there are multiple - inputs in the model, "input_shape" should contain definition of shape - for each input separated by a comma, for example: [1,3,227,227],[2,4] - for a model with two inputs with 4D and 2D shapes. Alternatively, specify - shapes with the "input" option. :param example_input: Sample of model input in original framework. For PyTorch it can be torch.Tensor. For Tensorflow it can be tf.Tensor or numpy.ndarray. For PaddlePaddle it can be Paddle Variable. - :param batch: - Set batch size. It applies to 1D or higher dimension inputs. - The default dimension index for the batch is zero. - Use a label 'n' in "layout" or "source_layout" option to set the batch dimension. - For example, "x(hwnc)" defines the third dimension to be the batch. - :param mean_values: - Mean values to be used for the input image per channel. Mean values can - be set by passing a dictionary, where key is input name and value is mean - value. For example mean_values={'data':[255,255,255],'info':[255,255,255]}. - Or mean values can be set by a string of the following format. Values to - be provided in the (R,G,B) or [R,G,B] format. Can be defined for desired - input of the model, for example: mean_values="data[255,255,255],info[255,255,255]". - The exact meaning and order of channels depend on how the original model - was trained. - :param scale_values: - Scale values to be used for the input image per channel. Scale values - can be set by passing a dictionary, where key is input name and value is - scale value. For example scale_values={'data':[255,255,255],'info':[255,255,255]}. - Or scale values can be set by a string of the following format. Values - are provided in the (R,G,B) or [R,G,B] format. Can be defined for desired - input of the model, for example: scale_values="data[255,255,255],info[255,255,255]". - The exact meaning and order of channels depend on how the original model - was trained. If both "mean_values" and "scale_values" are specified, - the mean is subtracted first and then scale is applied regardless of - the order of options in command line. - :param scale: - All input values coming from original network inputs will be divided - by this value. When a list of inputs is overridden by the "input" parameter, - this scale is not applied for any input that does not match with the original - input of the model. If both "mean_values" and "scale" are specified, - the mean is subtracted first and then scale is applied regardless of - the order of options in command line. - :param reverse_input_channels: - Switch the input channels order from RGB to BGR (or vice versa). Applied - to original inputs of the model if and only if a number of channels equals - 3. When "mean_values"/"scale_values" are also specified, reversing - of channels will be applied to user's input data first, so that numbers - in "mean_values" and "scale_values" go in the order of channels used - in the original model. In other words, if both options are specified, - then the data flow in the model looks as following: Parameter -> ReverseInputChannels - -> Mean apply-> Scale apply -> the original body of the model. - :param source_layout: - Layout of the input or output of the model in the framework. Layout can - be set by passing a dictionary, where key is input name and value is LayoutMap - object. Or layout can be set by string of the following format. Layout - can be specified in the short form, e.g. nhwc, or in complex form, e.g. - "[n,h,w,c]". Example for many names: "in_name1([n,h,w,c]),in_name2(nc),out_name1(n),out_name2(nc)". - Layout can be partially defined, "?" can be used to specify undefined - layout for one dimension, "..." can be used to specify undefined layout - for multiple dimensions, for example "?c??", "nc...", "n...c", etc. - :param target_layout: - Same as "source_layout", but specifies target layout that will be in - the model after processing by ModelOptimizer. - :param layout: - Combination of "source_layout" and "target_layout". Can't be used - with either of them. If model has one input it is sufficient to specify - layout of this input, for example "layout" nhwc. To specify layouts - of many tensors, names must be provided, for example: layout="name1(nchw),name2(nc)". - It is possible to instruct ModelOptimizer to change layout, for example: - layout="name1(nhwc->nchw),name2(cn->nc)". - Also "*" in long layout form can be used to fuse dimensions, for example "[n,c,...]->[n*c,...]". - :param compress_to_fp16: - If the original model has FP32 weights or biases, they are compressed - to FP16. All intermediate data is kept in original precision. Option - can be specified alone as "compress_to_fp16", or explicit True/False - values can be set, for example: "compress_to_fp16=False", or "compress_to_fp16=True" :param extensions: Paths to libraries (.so or .dll) with extensions, comma-separated list of paths, objects derived from BaseExtension class or lists of @@ -244,124 +103,28 @@ def convert_model( a directory or a comma-separated list of directories with extensions are supported. To disable all extensions including those that are placed at the default location, pass an empty string. - :param transform: - Apply additional transformations. 'transform' can be set by a list - of tuples, where the first element is transform name and the second element - is transform parameters. For example: [('LowLatency2', {{'use_const_initializer': - False}}), ...] transform="transformation_name1[args],transformation_name2..." - where [args] is key=value pairs separated by semicolon. Examples: - transform="LowLatency2" or - transform="Pruning" or - transform="LowLatency2[use_const_initializer=False]" or - transform="MakeStateful[param_res_names= - {'input_name_1':'output_name_1','input_name_2':'output_name_2'}]" - Available transformations: "LowLatency2", "MakeStateful", "Pruning" - :param transformations_config: - Use the configuration file with transformations description or pass - object derived from BaseExtension class. Transformations file can - be specified as relative path from the current directory, as absolute - path or as relative path from the mo root directory. - :param silent: - Prevent any output messages except those that correspond to log level - equals ERROR, that can be set with the following option: "log_level". - By default, log level is already ERROR. - :param log_level: - Logger level of logging massages from MO. - Expected one of ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']. - :param version: - Version of Model Conversion API - :param progress: - Enable model conversion progress display. - :param stream_output: - Switch model conversion progress display to a multiline mode. + :param verbose: + Print detailed information about conversion. :param share_weights: - Map memory of weights instead reading files or share memory from input model. - Currently, mapping feature is provided only for ONNX models - that do not require fallback to the legacy ONNX frontend for the conversion. + Reuse weights allocated in the original model. If input model is in file, + then mmap is used to allocate weights directly from file. If input model is + runtime object, then original memory regions allocated in the original model + are reused for weights in the converted model. PaddlePaddle-specific parameters: :param example_output: Sample of model output in original framework. For PaddlePaddle it can be Paddle Variable. TensorFlow*-specific parameters: - :param input_model_is_text: - TensorFlow*: treat the input model file as a text protobuf format. If - not specified, the convert_model() treats it as a binary file by default. - :param input_checkpoint: - TensorFlow*: variables file to load. - :param input_meta_graph: - Tensorflow*: a file with a meta-graph of the model before freezing - :param saved_model_dir: - TensorFlow*: directory with a model in SavedModel format of TensorFlow - 1.x or 2.x version. :param saved_model_tags: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. - :param tensorflow_custom_operations_config_update: - TensorFlow*: update the configuration file with node name patterns - with input/output nodes information. - :param tensorflow_object_detection_api_pipeline_config: - TensorFlow*: path to the pipeline configuration file used to generate - model created with help of Object Detection API. - :param tensorboard_logdir: - TensorFlow*: dump the input graph to a given directory that should be - used with TensorBoard. - :param tensorflow_custom_layer_libraries: - TensorFlow*: comma separated list of shared libraries with TensorFlow* - custom operations implementation. - - MXNet-specific parameters: - :param input_symbol: - Symbol file (for example, model-symbol.json) that contains a topology - structure and layer attributes - :param nd_prefix_name: - Prefix name for args.nd and argx.nd files. - :param pretrained_model_name: - Name of a pretrained MXNet model without extension and epoch number. - This model will be merged with args.nd and argx.nd files - :param save_params_from_nd: - Enable saving built parameters file from .nd files - :param legacy_mxnet_model: - Enable MXNet loader to make a model compatible with the latest MXNet - version. Use only if your model was trained with MXNet version lower - than 1.0.0 - :param enable_ssd_gluoncv: - Enable pattern matchers replacers for converting gluoncv ssd topologies. - - Caffe*-specific parameters: - :param input_proto: - Deploy-ready prototxt file that contains a topology structure and - layer attributes - :param caffe_parser_path: - Path to Python Caffe* parser generated from caffe.proto - :param k: - Path to CustomLayersMapping.xml to register custom layers - :param disable_omitting_optional: - Disable omitting optional attributes to be used for custom layers. - Use this option if you want to transfer all attributes of a custom layer - to IR. Default behavior is to transfer the attributes with default values - and the attributes defined by the user to IR. - :param enable_flattening_nested_params: - Enable flattening optional params to be used for custom layers. Use - this option if you want to transfer attributes of a custom layer to IR - with flattened nested parameters. Default behavior is to transfer - the attributes without flattening nested parameters. - - Kaldi-specific parameters: - :param counts: - Path to the counts file - :param remove_output_softmax: - Removes the SoftMax layer that is the output layer - :param remove_memory: - Removes the Memory layer and use additional inputs outputs instead Returns: openvino.runtime.Model """ params = locals() logger_state = get_logger_state() - del params['args'] - params.update(args) cli_parser = get_all_cli_parser() ov_model, _ = _convert(cli_parser, params, True) restore_logger_state(logger_state) diff --git a/tools/ovc/openvino/tools/ovc/convert_impl.py b/tools/ovc/openvino/tools/ovc/convert_impl.py index aff6907228e2dd..6fd6124fabcb9e 100644 --- a/tools/ovc/openvino/tools/ovc/convert_impl.py +++ b/tools/ovc/openvino/tools/ovc/convert_impl.py @@ -15,40 +15,31 @@ except ImportError: import openvino.tools.ovc.telemetry_stub as tm -from openvino.tools.ovc.moc_frontend.check_config import legacy_transformations_config_used, \ - tensorflow_custom_operations_config_update_used, new_extensions_used +from openvino.tools.ovc.moc_frontend.check_config import new_extensions_used from openvino.tools.ovc.moc_frontend.pipeline import moc_pipeline from openvino.tools.ovc.moc_frontend.moc_emit_ir import moc_emit_ir from openvino.tools.ovc.convert_data_type import destination_type_to_np_data_type -from openvino.tools.ovc.cli_parser import check_available_transforms, \ - get_advanced_cli_options, get_available_front_ends, get_caffe_cli_options, \ - get_common_cli_options, get_kaldi_cli_options, get_layout_values, get_freeze_placeholder_values, \ - get_mean_scale_dictionary, get_mxnet_cli_options, get_onnx_cli_options, \ - get_placeholder_shapes, get_tf_cli_options, parse_transform, parse_tuple_pairs, \ - get_model_name_from_args, depersonalize, get_mo_convert_params, input_to_input_cut_info, \ - input_shape_to_input_cut_info, freeze_placeholder_to_input_cut_info - -from openvino.tools.ovc.error import Error, FrameworkError, legacy_path_error -from openvino.tools.ovc.get_ov_update_message import get_ov_update_message, get_ov_api20_message, \ - get_tf_fe_message, get_try_legacy_fe_message, get_compression_message +from openvino.tools.ovc.cli_parser import get_available_front_ends, \ + get_common_cli_options, get_model_name_from_args, depersonalize, get_mo_convert_params, \ + input_to_input_cut_info, freeze_placeholder_to_input_cut_info + +from openvino.tools.ovc.error import Error, FrameworkError +from openvino.tools.ovc.get_ov_update_message import get_ov_update_message, get_compression_message from openvino.tools.ovc.version import VersionChecker -from openvino.tools.ovc.utils import deduce_legacy_frontend_by_namespace, refer_to_faq_msg, check_values_equal -from openvino.tools.ovc.logger import init_logger, progress_printer +from openvino.tools.ovc.utils import check_values_equal +from openvino.tools.ovc.logger import init_logger from openvino.tools.ovc.telemetry_utils import send_params_info, send_conversion_result, \ get_tid -from openvino.tools.ovc.moc_frontend.check_config import legacy_extensions_used -from openvino.tools.ovc.moc_frontend.check_config import default_path as extensions_default_path from openvino.tools.ovc.moc_frontend.pytorch_frontend_utils import get_pytorch_decoder, extract_input_info_from_example from openvino.tools.ovc.moc_frontend.paddle_frontend_utils import paddle_frontend_converter -from openvino.tools.ovc.moc_frontend.shape_utils import parse_input_shapes # pylint: disable=no-name-in-module,import-error -from openvino.frontend import FrontEndManager, OpConversionFailure, ProgressReporterExtension, TelemetryExtension +from openvino.frontend import FrontEndManager, OpConversionFailure, TelemetryExtension from openvino.runtime import get_version as get_rt_version from openvino.runtime import Type, PartialShape try: - from openvino.frontend.tensorflow.utils import type_supported_by_tf_fe, create_tf_graph_iterator, \ + from openvino.frontend.tensorflow.utils import create_tf_graph_iterator, type_supported_by_tf_fe, \ extract_model_graph # pylint: disable=no-name-in-module,import-error tf_frontend_with_python_bindings_installed = True @@ -63,31 +54,13 @@ def replace_ext(name: str, old: str, new: str): return base + new -def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet: bool, is_kaldi: bool, is_onnx: bool, - model_name: str): +def print_argv(argv: argparse.Namespace, model_name: str): print('Model Conversion arguments:') props = OrderedDict() props['common_args'] = get_common_cli_options(model_name) - props['advanced_args'] = get_advanced_cli_options() - if is_caffe: - props['caffe_args'] = get_caffe_cli_options() - if is_tf: - props['tf_args'] = get_tf_cli_options() - if is_mxnet: - props['mxnet_args'] = get_mxnet_cli_options() - if is_kaldi: - props['kaldi_args'] = get_kaldi_cli_options() - if is_onnx: - props['onnx_args'] = get_onnx_cli_options() framework_specifics_map = { - 'common_args': 'Common parameters:', - 'advanced_args': 'Advanced parameters:', - 'caffe_args': 'Caffe specific parameters:', - 'tf_args': 'TensorFlow specific parameters:', - 'mxnet_args': 'MXNet specific parameters:', - 'kaldi_args': 'Kaldi specific parameters:', - 'onnx_args': 'ONNX specific parameters:', + 'common_args': 'Common parameters:' } lines = [] @@ -97,233 +70,50 @@ def print_argv(argv: argparse.Namespace, is_caffe: bool, is_tf: bool, is_mxnet: if isinstance(desc, list): lines.append('\t{}: \t{}'.format(desc[0], desc[1](getattr(argv, op, 'NONE')))) else: - if op == 'k': - default_path = os.path.join(os.path.dirname(sys.argv[0]), - 'openvino/tools/mo/front/caffe/CustomLayersMapping.xml') - if getattr(argv, op, 'NONE') == default_path: - lines.append('\t{}: \t{}'.format(desc, 'Default')) - continue lines.append('\t{}: \t{}'.format(desc, getattr(argv, op, 'NONE'))) print('\n'.join(lines), flush=True) -def legacy_framework_check(is_caffe, is_mxnet, is_kaldi): - if is_caffe: - legacy_path_error("The provided model is from Caffe framework. This is legacy functionality. ") - if is_mxnet: - legacy_path_error("The provided model is from MxNet framework. This is legacy functionality. ") - if is_kaldi: - legacy_path_error("The provided model is from Kaldi framework. This is legacy functionality. ") - - -def check_legacy_args(non_default_params, python_api_used): - ignored_cli_options = ["output_dir", "model_name"] - legacy_groups = ['Kaldi-specific parameters:', 'Caffe*-specific parameters:', 'MXNet-specific parameters:'] - tf_legacy_args = ['tensorflow_custom_operations_config_update', 'tensorflow_object_detection_api_pipeline_config', - 'tensorboard_logdir', 'tensorflow_custom_layer_libraries', 'saved_model_tags'] - mo_convert_params = get_mo_convert_params() - - for key, value in non_default_params.items(): - if key in ignored_cli_options: - if python_api_used: - print("The provided option \"{}\" is applicable in command line tool only. The option will be ignored.".format(key)) - for group in legacy_groups: - if key in mo_convert_params[group]: - legacy_path_error("The provided option \"{}\" refers to legacy functionality. ".format(key)) - if key in tf_legacy_args: - legacy_path_error("The provided option \"{}\" refers to legacy functionality. ".format(key)) - - - def arguments_post_parsing(argv: argparse.Namespace): - use_legacy_frontend = argv.use_legacy_frontend - use_new_frontend = argv.use_new_frontend - if argv.extensions is None: - argv.extensions = [extensions_default_path()] - - if use_new_frontend and use_legacy_frontend: - raise Error('Options "use_new_frontend" and "use_legacy_frontend" must not be used simultaneously.') - - if use_legacy_frontend: - legacy_path_error('Option "use_legacy_frontend" was used, but legacy frontends are not available. ') - - moc_front_end, available_moc_front_ends = get_moc_frontends(argv) - - if not moc_front_end and use_new_frontend: - raise Error('Option "use_new_frontend" is specified but the Model Conversion API is unable to find new frontend. ' - 'Please ensure that your environment contains new frontend for the input model format or ' - 'try to install openvino-dev and convert the model using convert_model() from openvino.tools.mo.') - - is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx = \ - deduce_legacy_frontend_by_namespace(argv) if not moc_front_end else [False, False, False, False, False] - - legacy_framework_check(is_caffe, is_mxnet, is_kaldi) - - is_legacy_frontend = any([is_tf, is_caffe, is_mxnet, is_kaldi, is_onnx]) - - # handle a default case, i.e. use_new_frontend and use_legacy_frontend are not specified, when no frontend is found - if not is_legacy_frontend and not moc_front_end: - legacy_frameworks = ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'] - frameworks = list(set(legacy_frameworks + available_moc_front_ends)) - if not argv.framework: - raise Error('Framework name can not be deduced from the given options: {}={}. ' - 'Please use "framework" with one from the list: {}.', - '"input_model="', argv.input_model, frameworks) - elif argv.framework not in frameworks: - if argv.framework == 'ir': - raise Error('OpenVINO IR is passed as input_model in convert_model/mo, the IR doesn\'t need ' - 'conversion, please use it in runtime for inference with read_model/compile_model.') - raise Error('Framework {} is not a valid target. Please use "framework" with one from the list: {}. ' + - refer_to_faq_msg(15), argv.framework, frameworks) - - if is_tf and not argv.input_model and not argv.saved_model_dir and not argv.input_meta_graph: - raise Error('Path to input model or saved model dir is required: use "input_model", "saved_model_dir" or ' - '"input_meta_graph"') - elif is_onnx and not argv.input_model: - raise Error('Path to input model is required: use "input_model".') - + # TODO: This function looks similar to another one. Check for code duplicates. log.debug("Model Conversion API started") + log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.output_model)) - log.debug('Output model name would be {}{{.xml, .bin}}'.format(argv.model_name)) - - if not argv.silent: - print_argv(argv, is_caffe, is_tf, is_mxnet, is_kaldi, is_onnx, argv.model_name) - - argv.data_type = 'FP32' # if compression was enabled will be restored back to 'FP16' after apply_offline_transformations - - # This is just to check that transform key is valid and transformations are available - check_available_transforms(parse_transform(argv.transform)) + if argv.verbose: + print_argv(argv, argv.output_model) - if argv.scale and argv.scale_values: - raise Error( - 'Both "scale" and "scale_values" are defined. Specify either scale factor or scale values per input ' + - 'channels. ' + refer_to_faq_msg(19)) - - if argv.scale and argv.scale < 1.0: - log.error("The scale value is less than 1.0. This is most probably an issue because the scale value specifies " - "floating point value which all input values will be *divided*.", extra={'is_warning': True}) - - if argv.input_model and (is_tf and argv.saved_model_dir): - raise Error('Both "input_model" and "saved_model_dir" are defined. ' - 'Specify either input model or saved model directory.') - if is_tf: - if argv.saved_model_tags is not None: - if ' ' in argv.saved_model_tags: - raise Error('Incorrect saved model tag was provided. Specify "saved_model_tags" with no spaces in it') - argv.saved_model_tags = argv.saved_model_tags.split(',') - - if hasattr(argv, 'is_python_api_used') and argv.is_python_api_used: - python_api_params_parsing(argv) - else: - argv.inputs_list, argv.placeholder_shapes, argv.placeholder_data_types = get_placeholder_shapes( - argv.input, argv.input_shape, argv.batch) - argv.freeze_placeholder_with_value, argv.input = get_freeze_placeholder_values( - argv.input, - argv.freeze_placeholder_with_value) - argv.unnamed_freeze_placeholder_with_value = {} + params_parsing(argv) argv.output = argv.output.split(',') if argv.output else None - argv.layout_values = get_layout_values(argv.layout, argv.source_layout, argv.target_layout) - mean_values = parse_tuple_pairs(argv.mean_values) - scale_values = parse_tuple_pairs(argv.scale_values) - mean_scale = get_mean_scale_dictionary(mean_values, scale_values, argv.input) - argv.mean_scale_values = mean_scale log.debug("Placeholder shapes : {}".format(argv.placeholder_shapes)) return argv -def check_fallback(argv: argparse.Namespace): - fallback_reasons = {} - - # Some frontend such as PDPD does not have legacy path so it has no reasons to fallback - if not any(deduce_legacy_frontend_by_namespace(argv)): - return fallback_reasons - - if argv.use_new_frontend: - return fallback_reasons - - fallback_reasons['extensions'] = legacy_extensions_used - fallback_reasons['transformations_config'] = legacy_transformations_config_used - fallback_reasons['tensorflow_custom_operations_config_update'] = tensorflow_custom_operations_config_update_used - - reasons = [reason for reason, is_applicable in fallback_reasons.items() if is_applicable(argv)] - return reasons - - -def update_fallback_with_conversion_error(use_new_frontend: bool, is_tf: bool, ex_msg: str, fallback_reasons: list): - import re - if not is_tf: - # this sort of fallback is only used by TensorFlow Frontend - return False - - if use_new_frontend: - # this option forces to use new TensorFlow Frontend - # so it is not possible for the fallback - return False - - # for TensorFlow FE we have a set of operations that should lead to the fallback to the legacy - conversion_error_re = r"^(\[TensorFlow\ Frontend\]\ Internal\ error\,\ no\ translator\ found\ for\ operation\(s\)\:\ )((\w+)(\,\ \w+)*)$" - conversion_error_match = re.findall(conversion_error_re, ex_msg, re.MULTILINE) - all_fallback_operations = [ - # corresponds to TF1 While operation - "TensorArrayScatterV3", "TensorArrayV3", "TensorArraySizeV3", "TensorArrayGatherV3", - "LoopCond", "Enter", "NextIteration", "Exit", - # corresponds to operations with complex tensors - "FFT", "FFT2D", "FFT3D", "IFFT", "IFFT2D", "IFFT3D", - "RFFT", "RFFT2D", "RFFT3D", "IRFFT", "IRFFT2D", "IRFFT3D", - "Complex", "ComplexAbs", "Real", "Imag", - ] - if len(conversion_error_match) < 1 or len(conversion_error_match[0]) != 4: - # no match for the fallback by unsupported operation - return False - - unsupported_operations = conversion_error_match[0][1].replace(" ", "").split(",") - fallback_operations = [operation for operation in unsupported_operations if operation in all_fallback_operations] - - if len(fallback_operations) == 0: - return False - - fallback_reasons.append("Fallback to the legacy TF FE due to operation(s): " + ', '.join(fallback_operations)) - return True - - -def get_default_frontends(): - # Set which frontend to use by default, values should be 'new' or 'legacy' - default_frontends = { - 'onnx': 'new', - 'tf': 'new' - } - return default_frontends - - def get_moc_frontends(argv: argparse.Namespace): fem = argv.feManager - # Read user flags: - use_legacy_frontend = argv.use_legacy_frontend - use_new_frontend = argv.use_new_frontend - - if not fem or use_legacy_frontend: + if not fem: return None, [] available_moc_front_ends = get_available_front_ends(fem) - - if not argv.framework and argv.input_model: - moc_front_end = fem.load_by_model(argv.input_model) + if argv.framework: + moc_front_end = fem.load_by_framework(argv.framework) # WA to prevent process hanging. Need to remove when 115994 fixed. + moc_front_end = fem.load_by_framework(argv.framework) + return moc_front_end, available_moc_front_ends + if argv.input_model: + if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2: + moc_front_end = fem.load_by_model([argv.input_model[0], argv.input_model[1]]) # WA to prevent process hanging. Need to remove when 115994 fixed. + moc_front_end = fem.load_by_model([argv.input_model[0], argv.input_model[1]]) # TODO: Pass all input model parts + else: + moc_front_end = fem.load_by_model(argv.input_model) # WA to prevent process hanging. Need to remove when 115994 fixed. + moc_front_end = fem.load_by_model(argv.input_model) if not moc_front_end: return None, available_moc_front_ends argv.framework = moc_front_end.get_name() - elif argv.framework in available_moc_front_ends: - moc_front_end = fem.load_by_framework(argv.framework) else: return None, [] - default_frontends = get_default_frontends() - # Disable MOC frontend if default is set to legacy and no user override - if default_frontends.get(moc_front_end.get_name()) == 'legacy' and not use_new_frontend: - return None, available_moc_front_ends - # This check as a workaround to skip IR frontend if not moc_front_end.get_name() in available_moc_front_ends: return None, available_moc_front_ends @@ -332,59 +122,32 @@ def get_moc_frontends(argv: argparse.Namespace): def prepare_ir(argv: argparse.Namespace): - # TODO: remove this workaround once new TensorFlow frontend supports non-frozen formats: checkpoint, MetaGraph, and SavedModel - # Now it converts all TensorFlow formats to the frozen .pb format in case new TensorFlow frontend - is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv) argv = arguments_post_parsing(argv) t = tm.Telemetry() - graph = None - fallback_reasons = [] + if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 1: + argv.input_model = argv.input_model[0] + moc_front_end, available_moc_front_ends = get_moc_frontends(argv) if moc_front_end: - fallback_reasons = check_fallback(argv) - if len(fallback_reasons) == 0: - if is_tf and tf_frontend_with_python_bindings_installed and \ - type_supported_by_tf_fe(argv.input_model): - argv.input_model = create_tf_graph_iterator(argv.input_model, - argv.placeholder_shapes, - argv.placeholder_data_types, - getattr(argv, "example_input", None)) - try: - t.send_event("mo", "conversion_method", moc_front_end.get_name() + "_frontend") - moc_front_end.add_extension(TelemetryExtension("mo", t.send_event, t.send_error, t.send_stack_trace)) - moc_front_end.add_extension(ProgressReporterExtension(progress_printer(argv))) - if legacy_transformations_config_used(argv): - raise Error('Legacy extensions are not supported for the new frontend') - if legacy_extensions_used(argv): - raise Error('Legacy transformations configuration is not supported for the new frontend') - if tensorflow_custom_operations_config_update_used(argv) and is_tf: - raise Error('TensorFlow custom operation config is not supported for the new frontend') - if new_extensions_used(argv): - for extension in argv.extensions: - moc_front_end.add_extension(extension) - ngraph_function = moc_pipeline(argv, moc_front_end) - return graph, ngraph_function - except OpConversionFailure as ex: - # in some set of operations (TF1 While), we have to fallback to the Legacy TensorFlow Frontend - # this is the second attempt for the fallback - if not update_fallback_with_conversion_error(argv.use_new_frontend, is_tf, str(ex), fallback_reasons): - # re-throw exception for all frontends except TensorFlow FE - # and in case unexpected conversion failures - raise - - if len(fallback_reasons) > 0: - reasons_message = ", ".join(fallback_reasons) - t.send_event("mo", "fallback_reason", reasons_message) - log.warning("The IR preparation cannot be executed with new frontend. " - f"The detailed reason why fallback to legacy is needed: not supported {reasons_message} were used. " + - refer_to_faq_msg(105)) - assert not hasattr(argv, 'is_fallback'), '`is_fallback` argument must not exist.' - argv.is_fallback = True - - t.send_event("mo", "conversion_method", "mo_legacy") - legacy_path_error("The provided model cannot be converted with new frontend, as fallback to legacy is needed. ") - return None, None + # TODO: Should be moved to the same place where paddle and pytorch handle their objects + if argv.framework == 'tf' and argv.is_python_object and type_supported_by_tf_fe(argv.input_model): + argv.input_model = create_tf_graph_iterator(argv.input_model, + argv.placeholder_shapes, + argv.placeholder_data_types, + getattr(argv, "example_input", None)) + t.send_event("mo", "conversion_method", moc_front_end.get_name() + "_frontend") + moc_front_end.add_extension(TelemetryExtension("mo", t.send_event, t.send_error, t.send_stack_trace)) + if new_extensions_used(argv): + for extension in argv.extensions: + moc_front_end.add_extension(extension) + ov_model = moc_pipeline(argv, moc_front_end) + return ov_model + + if not argv.input_model: + raise Error('No input model is provided') + + raise Error('Cannot recognize input model.') def check_model_object(argv): @@ -405,6 +168,10 @@ def check_model_object(argv): pass import io + # FIXME: Consuming any io.BytesIO object as an ONNX model is too dengerous and + # can conflict with others in the future (not future proof). + # TODO: Refer to https://onnx.ai/onnx/intro/python.html to find examples with + # real ONNX python objects. ONNX model has onnx.onnx_ml_pb2.ModelProto type. if isinstance(model, io.BytesIO): return 'onnx' @@ -419,24 +186,18 @@ def check_model_object(argv): def driver(argv: argparse.Namespace, non_default_params: dict): - init_logger(argv.log_level.upper(), argv.silent) + if not hasattr(argv, 'log_level'): + argv.log_level = 'ERROR' + init_logger(argv.log_level.upper(), argv.verbose) # Log dictionary with non-default cli parameters where complex classes are excluded. log.debug(str(non_default_params)) start_time = datetime.datetime.now() - graph, ngraph_function = prepare_ir(argv) - legacy_path = False - if graph is not None: - legacy_path_error("") - else: - res_ngraph_function = moc_emit_ir(ngraph_function, argv) - - if res_ngraph_function is None: - return res_ngraph_function + ov_model = moc_emit_ir(prepare_ir(argv), argv) - if not argv.silent: + if argv.verbose: elapsed_time = datetime.datetime.now() - start_time print('[ SUCCESS ] Total execution time: {:.2f} seconds. '.format(elapsed_time.total_seconds())) try: @@ -448,7 +209,7 @@ def driver(argv: argparse.Namespace, non_default_params: dict): except ImportError: pass - return res_ngraph_function, legacy_path + return ov_model def args_dict_to_list(cli_parser, **kwargs): @@ -536,19 +297,17 @@ def show_mo_convert_help(): print() -def input_model_is_object(argv): - # Input model can be set as object only for "input_model" parameter. - # "saved_model_dir" or meta specific options are only used to store paths to the input model. - if 'input_model' not in argv: - return False - if isinstance(argv['input_model'], (str, Path)): +def input_model_is_object(input_model): + if input_model == (): return False - if argv['input_model'] is None: + if isinstance(input_model, (str, Path)): return False + if isinstance(input_model, (tuple, list)): + return all(input_model_is_object(part) for part in input_model) return True -def python_api_params_parsing(argv: argparse.Namespace): +def params_parsing(argv: argparse.Namespace): """ Parses params passed to convert_model and wraps resulting values into dictionaries or lists. After working of this method following values are set in argv: @@ -583,15 +342,12 @@ def python_api_params_parsing(argv: argparse.Namespace): argv.inputs_list = input_names_list argv.input = ','.join(input_names_list) - # Parse input_shape param and update InputCutInfo list - input_shape_to_input_cut_info(argv.input_shape, inputs) - # Parse freeze_placeholder_with_value. # values for freezing can be set both by named and unnamed approach if # 'input' was used without names and 'freeze_placeholder_with_value' was used with names. # So named and unnamed values are stored separately. argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value = \ - freeze_placeholder_to_input_cut_info(argv.freeze_placeholder_with_value, inputs) + freeze_placeholder_to_input_cut_info(inputs) if len(input_names_list) > 0: # Named inputs case @@ -638,7 +394,7 @@ def python_api_params_parsing(argv: argparse.Namespace): def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParser): if len(args) > 0: args_string = params_to_string(**args) - argv, _ = cli_parser.parse_known_args(args_dict_to_list(cli_parser, **args_string)) + argv, _ = cli_parser.parse_known_args(args_dict_to_list(cli_parser, **args_string)) # FIXME: input_model_args can be a list # get list of all available params for convert_model() all_params = {} @@ -659,36 +415,12 @@ def pack_params_to_args_namespace(args: dict, cli_parser: argparse.ArgumentParse return argv -def update_args_for_saved_model_dir(args: dict): - """ - If directory is set in 'input_model' argument, the directory is considered as TF saved model. - In this case this method updates args and moves saved model directory to 'saved_model_dir' param. - :param args: dictionary with arguments from user - """ - if 'saved_model_dir' in args and args['saved_model_dir'] is not None and \ - 'input_model' in args and args['input_model'] is not None: - raise Error("Both \"input_model\" and \"saved_model_dir\" are defined. " - "Please specify either \"input_model\" or \"saved_model_dir\" directory.") - - if 'input_model' in args and isinstance(args['input_model'], (str, Path)) and os.path.isdir(args['input_model']): - args['saved_model_dir'] = args['input_model'] - args['input_model'] = None - - -def silent_is_false(argv: argparse.Namespace): - return argv is not None and hasattr(argv, 'silent') and argv.silent is False - - -def framework_is_tf(args, argv): - if input_model_is_object(args) and check_model_object(args) == "tf": - return True - if argv is not None: - is_tf, _, _, _, _ = deduce_legacy_frontend_by_namespace(argv) - return is_tf - return False +def is_verbose(argv: argparse.Namespace): + return argv is not None and hasattr(argv, 'verbose') and argv.verbose def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): + # FIXME: It doesn't work when -h is passed if 'help' in args and args['help']: show_mo_convert_help() return None, None @@ -701,9 +433,16 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): # before arg parser deliver log_level requested by user init_logger('ERROR', False) argv = None + # Minimize modifications among other places in case if multiple pieces are passed as input_model + if python_api_used: + if 'input_model' not in args: + args['input_model'] = () + if isinstance(args['input_model'], (tuple, list)) and len(args['input_model']) == 1: + args['input_model'] = args['input_model'][0] try: model_framework = None - inp_model_is_object = input_model_is_object(args) + inp_model_is_object = input_model_is_object(args['input_model']) if python_api_used else False + if inp_model_is_object: model_framework = check_model_object(args) if model_framework == "pytorch": @@ -714,12 +453,13 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): raise AssertionError( "'example_inputs' argument is not recognized, maybe you meant to provide 'example_input'?") - decoder = get_pytorch_decoder(args['input_model'], parse_input_shapes(args), example_inputs, args) + get_pytorch_decoder(args['input_model'], example_inputs, args) if model_framework == "paddle": example_inputs = None if 'example_input' in args and args['example_input'] is not None: example_inputs = args['example_input'] + #TODO: Check what example_outputs is and remove if not needed example_outputs = None if 'example_output' in args and args['example_output'] is not None: example_outputs = args['example_output'] @@ -727,45 +467,27 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): example_outputs) pdmodel = paddle_runtime_converter.convert_paddle_to_pdmodel() args['input_model'] = pdmodel - args['framework'] = model_framework - - update_args_for_saved_model_dir(args) argv = pack_params_to_args_namespace(args, cli_parser) + argv.framework = model_framework + argv.is_python_object = inp_model_is_object argv.feManager = FrontEndManager() - frameworks = list(set(['tf', 'caffe', 'mxnet', 'kaldi', 'onnx'] + (get_available_front_ends(argv.feManager) - if argv.feManager else []))) - framework = argv.framework if hasattr(argv, 'framework') and argv.framework is not None else framework - if framework is not None: - assert framework in frameworks, "error: argument \"framework\": invalid choice: '{}'. " \ - "Expected one of {}.".format(framework, frameworks) - setattr(argv, 'framework', framework) # send telemetry with params info send_params_info(argv, cli_parser) non_default_params = get_non_default_params(argv, cli_parser) - check_legacy_args(non_default_params, python_api_used) argv.is_python_api_used = python_api_used if inp_model_is_object: - argv.model_name = "model" - if not hasattr(argv, "model_name") or argv.model_name is None: - argv.model_name = get_model_name_from_args(argv) - - if model_framework is not None: - if argv.framework is not None: - if argv.framework != model_framework: - raise Error("Provided model does not correspond to provided framework. The provided " - "framework is {}, the model type is {} which is expected to be {} framework.".format( - argv.framework, - type(argv.input_model), - model_framework)) - else: - argv.framework = model_framework + argv.output_model = "model" # TODO: Consider removing + if not hasattr(argv, "output_model") or argv.output_model is None: + argv.output_model = get_model_name_from_args(argv) + + argv.framework = model_framework - ov_model, legacy_path = driver(argv, {"conversion_parameters": non_default_params}) + ov_model = driver(argv, {"conversion_parameters": non_default_params}) if inp_model_is_object and model_framework == "paddle": if paddle_runtime_converter: @@ -773,30 +495,22 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): # add MO meta data to model ov_model.set_rt_info(get_rt_version(), "Runtime_version") - ov_model.set_rt_info(str(legacy_path), "legacy_frontend") for key, value in non_default_params.items(): ov_model.set_rt_info(str(value), ["conversion_parameters", str(key)]) - if silent_is_false(argv) or not python_api_used: + if is_verbose(argv) or not python_api_used: if 'compress_to_fp16' in argv and argv.compress_to_fp16: print(get_compression_message()) ov_update_message = get_ov_update_message() - ov_api20_message = get_ov_api20_message() if ov_update_message is not None: print(ov_update_message) - if ov_api20_message is not None and ov_model is not None: - print(ov_api20_message) - is_fallback = getattr(argv, 'is_fallback', False) - if not argv.use_legacy_frontend and framework_is_tf(args, argv) and not is_fallback: - # now TF FE is default frontend for TensorFlow models conversion - print(get_tf_fe_message()) send_conversion_result('success') return ov_model, argv except Exception as e: - if silent_is_false(argv) or not python_api_used: + if is_verbose(argv) or not python_api_used: if isinstance(e, (FileNotFoundError, NotADirectoryError)): log.error('File {} was not found'.format(str(e).split('No such file or directory:')[1])) log.debug(traceback.format_exc()) @@ -810,17 +524,17 @@ def _convert(cli_parser: argparse.ArgumentParser, args, python_api_used): log.error("-------------------------------------------------") log.error("----------------- INTERNAL ERROR ----------------") log.error("Unexpected exception happened.") - log.error("Please contact Model Conversion API developers and forward the following information:") + log.error("Please verify parameters and environment.") + log.error("If you think this is a bug, please create new ticket here: ") + log.error("https://github.com/openvinotoolkit/openvino/issues.") + log.error("-------------- DETAILED INFORMATION -------------") log.error(str(e)) log.error(traceback.format_exc()) - log.error("---------------- END OF BUG REPORT --------------") + log.error("----------------- END OF REPORT -----------------") log.error("-------------------------------------------------") - is_fallback = getattr(argv, 'is_fallback', False) if argv is not None else False - if not argv.use_legacy_frontend and framework_is_tf(args, argv) and not is_fallback: - print(get_try_legacy_fe_message()) send_conversion_result('fail') if python_api_used: - raise e.with_traceback(None) + raise e else: return None, argv diff --git a/tools/ovc/openvino/tools/ovc/error.py b/tools/ovc/openvino/tools/ovc/error.py index a58eb2202d40a8..ec549aa32658f5 100644 --- a/tools/ovc/openvino/tools/ovc/error.py +++ b/tools/ovc/openvino/tools/ovc/error.py @@ -46,8 +46,3 @@ def classify_error_type(e): if m: return m.group(0) return "undefined" - - -def legacy_path_error(functionality_description): - raise Exception("{}Please try to install openvino-dev and use convert_model() " - "from openvino.tools.mo.".format(functionality_description)) diff --git a/tools/ovc/openvino/tools/ovc/get_ov_update_message.py b/tools/ovc/openvino/tools/ovc/get_ov_update_message.py index afb1ba9b482e7f..eb94f4e5f7e3b2 100644 --- a/tools/ovc/openvino/tools/ovc/get_ov_update_message.py +++ b/tools/ovc/openvino/tools/ovc/get_ov_update_message.py @@ -16,24 +16,6 @@ def get_ov_update_message(): return msg_fmt.format(link) if current_date >= expected_update_date else None -def get_ov_api20_message(): - link = "https://docs.openvino.ai/2023.0/openvino_2_0_transition_guide.html" - message = '[ INFO ] The model was converted to IR v11, the latest model format that corresponds to the source DL framework ' \ - 'input/output format. While IR v11 is backwards compatible with OpenVINO Inference Engine API v1.0, ' \ - 'please use API v2.0 (as of 2022.1) to take advantage of the latest improvements in IR v11.\n' \ - 'Find more information about API v2.0 and IR v11 at {}'.format(link) - - return message - - -def get_tf_fe_message(): - link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_TensorFlow_Frontend.html" - message = '[ INFO ] IR generated by new TensorFlow Frontend is compatible only with API v2.0. Please make sure to use API v2.0.\n' \ - 'Find more information about new TensorFlow Frontend at {}'.format(link) - - return message - - def get_compression_message(): link = "https://docs.openvino.ai/2023.0/openvino_docs_MO_DG_FP16_Compression.html" message = '[ INFO ] Generated IR will be compressed to FP16. ' \ @@ -41,8 +23,3 @@ def get_compression_message(): 'by removing argument "compress_to_fp16" or set it to false "compress_to_fp16=False".\n' \ 'Find more information about compression to FP16 at {}'.format(link) return message - - -def get_try_legacy_fe_message(): - message = '[ INFO ] You can also try to install openvino-dev and use convert_model from openvino.tools.mo.\n' - return message diff --git a/tools/ovc/openvino/tools/ovc/help.py b/tools/ovc/openvino/tools/ovc/help.py index 3f65be41c0f3f8..6ea8b27c7dd813 100644 --- a/tools/ovc/openvino/tools/ovc/help.py +++ b/tools/ovc/openvino/tools/ovc/help.py @@ -2,19 +2,18 @@ # SPDX-License-Identifier: Apache-2.0 def get_convert_model_help_specifics(): - from openvino.tools.ovc.cli_parser import CanonicalizeTransformationPathCheckExistenceAction, \ - CanonicalizePathCheckExistenceAction, CanonicalizeExtensionsPathCheckExistenceAction, \ - CanonicalizePathCheckExistenceIfNeededAction, readable_file_or_dir, readable_dirs_or_files_or_empty, \ - check_positive + from openvino.tools.ovc.cli_parser import CanonicalizePathCheckExistenceAction, CanonicalizeExtensionsPathCheckExistenceAction, \ + readable_file_or_dir, readable_dirs_or_files_or_empty from openvino.tools.ovc.version import VersionChecker return { 'input_model': {'description': - 'Tensorflow*: a file with a pre-trained model ' - '(binary or text .pb file after freezing). ' - 'Caffe*: a model proto file with model weights.', 'action': CanonicalizePathCheckExistenceAction, + 'Input model file(s) from TensorFlow, ONNX, PaddlePaddle. ' + 'Use openvino.convert_model in Python to convert models from Pytorch.' + '', + 'action': CanonicalizePathCheckExistenceAction, 'type': readable_file_or_dir, - 'aliases': {'-w', '-m'}}, + 'aliases': {}}, 'input_shape': {'description': 'Input shape(s) that should be fed to an input node(s) ' @@ -38,10 +37,11 @@ def get_convert_model_help_specifics(): 'Alternatively, specify shapes with the --input option.'}, 'input': {'description': - 'Quoted list of comma-separated input nodes names with ' - 'shapes, data types, and values for freezing. The order ' - 'of inputs in converted model is the same as order of ' - 'specified operation names. The shape and value are ' + 'Information for model input required for model conversion. ' + 'This is a comma separated list with optional ' + 'input names, shapes, data types, and values for freezing. ' + 'The order of inputs in converted model will match the order of ' + 'specified inputs. The shape and value are ' 'specified as comma-separated lists. The data type of ' 'input node is specified in braces and can have one of ' 'the values: f64 (float64), f32 (float32), f16 ' @@ -62,38 +62,6 @@ def get_convert_model_help_specifics(): '\"node_name2\" with the value [20,15] of the int32 type ' 'and shape [2]: \n ' '\"0:node_name1[3,4],node_name2:1[2]{i32}->[20,15]\".'}, - 'mean_values': - {'description': - 'Mean values to be used for the input image per ' - 'channel. Values to be provided in the (R,G,B) or ' - '[R,G,B] format. Can be defined for desired input of ' - 'the model, for example: "--mean_values ' - 'data[255,255,255],info[255,255,255]". The exact ' - 'meaning and order of channels depend on how the ' - 'original model was trained.'}, - 'scale_values': - {'description': - 'Scale values to be used for the input image per ' - 'channel. Values are provided in the (R,G,B) or [R,G,B] ' - 'format. Can be defined for desired input of the model, ' - 'for example: "--scale_values ' - 'data[255,255,255],info[255,255,255]". The exact ' - 'meaning and order of channels depend on how the ' - 'original model was trained. If both --mean_values and ' - '--scale_values are specified, the mean is subtracted ' - 'first and then scale is applied regardless of the ' - 'order of options in command line.'}, - 'source_layout': - {'description': - 'Layout of the input or output of the model in the ' - 'framework. Layout can be specified in the short form, ' - 'e.g. nhwc, or in complex form, e.g. \"[n,h,w,c]\". ' - 'Example for many names: \"in_name1([n,h,w,c]),in_name2(' - 'nc),out_name1(n),out_name2(nc)\". Layout can be ' - 'partially defined, \"?\" can be used to specify ' - 'undefined layout for one dimension, \"...\" can be used ' - 'to specify undefined layout for multiple dimensions, ' - 'for example \"?c??\", \"nc...\", \"n...c\", etc.'}, 'transform': {'description': 'Apply additional transformations. Usage: \"--transform ' @@ -115,47 +83,19 @@ def get_convert_model_help_specifics(): 'those that are placed at the default location, pass an empty string.', 'action': CanonicalizeExtensionsPathCheckExistenceAction, 'type': readable_dirs_or_files_or_empty}, - 'transformations_config': - {'description': - 'Use the configuration file with transformations ' - 'description. Transformations file can be specified as ' - 'relative path from the current directory, as absolute ' - 'path or as arelative path from the mo root directory.', - 'action': CanonicalizeTransformationPathCheckExistenceAction}, - 'counts': - {'action': CanonicalizePathCheckExistenceIfNeededAction}, 'version': {'action': 'version', - 'version': 'Version of Model Optimizer is: {}'.format(VersionChecker().get_ie_version())}, - 'scale': - {'type': float, - 'aliases': {'-s'}}, - 'batch': - {'type': check_positive, - 'aliases': {'-b'}}, - 'input_proto': - {'aliases': {'-d'}}, - 'log_level': - {'choices': ['CRITICAL', 'ERROR', 'WARN', 'WARNING', 'INFO', 'DEBUG', 'NOTSET']} + #FIXME: Why the following is not accessible from arg parser? + 'version': 'OpenVINO Model Converter (ovc) {}'.format(VersionChecker().get_ie_version())}, } -# TODO: remove this when internal converting of params to string is removed +# TODO: remove this when internal converting of params to string is removed <-- DO IT def get_to_string_methods_for_params(): - from openvino.tools.ovc.cli_parser import path_to_str_or_object, str_list_to_str, \ - mean_scale_value_to_str, source_target_layout_to_str, layout_param_to_str, transform_param_to_str, \ - extensions_to_str_or_extensions_class, batch_to_int, transformations_config_to_str + from openvino.tools.ovc.cli_parser import path_to_str_or_object, str_list_to_str, extensions_to_str_or_extensions_class return { 'input_model': path_to_str_or_object, 'output': str_list_to_str, - 'mean_values': mean_scale_value_to_str, - 'scale_values': mean_scale_value_to_str, - 'source_layout': source_target_layout_to_str, - 'target_layout': source_target_layout_to_str, - 'layout': layout_param_to_str, - 'transform': transform_param_to_str, 'extensions': extensions_to_str_or_extensions_class, - 'batch': batch_to_int, - 'transformations_config': transformations_config_to_str, 'saved_model_tags': str_list_to_str } diff --git a/tools/ovc/openvino/tools/ovc/logger.py b/tools/ovc/openvino/tools/ovc/logger.py index 643d4d89191312..4795e690599588 100644 --- a/tools/ovc/openvino/tools/ovc/logger.py +++ b/tools/ovc/openvino/tools/ovc/logger.py @@ -64,10 +64,10 @@ def filter(self, record: log.LogRecord): return True # if regex wasn't set print all logs -def init_logger(lvl: str, silent: bool): +def init_logger(lvl: str, verbose: bool): global handler_num log_exp = os.environ.get('MO_LOG_PATTERN') - if silent: + if not verbose: lvl = 'ERROR' fmt = LvlFormatter(lvl=lvl) handler = log.StreamHandler() @@ -89,72 +89,3 @@ def restore_logger_state(state: tuple): logger.setLevel(level) logger.filters = filters logger.handlers = handlers - - -def progress_bar(function: callable): - """ - Decorator for model conversion pipeline progress display - Works in combination with function: mo.utils.class_registration.apply_transform - """ - - def wrapper(*args, **kwargs): - for arg in ['graph', 'curr_transform_num', 'num_transforms']: - msg = 'Progress bar decorator is enabled for Model Conversion API transformation applying cycle only. ' \ - 'Argument `{}` {}' - - assert arg in kwargs, msg.format(arg, 'is missing') - assert kwargs[arg] is not None, msg.format(arg, 'should not be None') - - if 'progress' in kwargs['graph'].graph['cmd_params'] and kwargs['graph'].graph['cmd_params'].progress: - bar_len = 20 - total_replacers_count = kwargs['num_transforms'] - - def progress(i): - return int((i + 1) / total_replacers_count * bar_len) - - def percent(i): - return (i + 1) / total_replacers_count * 100 - - end = '' if not kwargs['graph'].graph['cmd_params'].stream_output else '\n' - curr_i = kwargs['curr_transform_num'] - print('\rProgress: [{:{}}]{:>7.2f}% done'.format('.' * progress(curr_i), bar_len, percent(curr_i)), end=end) - - sys.stdout.flush() - - function(*args, **kwargs) - - return wrapper - -def progress_printer(argv: Namespace): - """ - A higher-order factory function returning a configurable callback displaying a progress bar - Depending on the configuration stored in 'argv' the progress bar can be one-line, multi-line, or silent. - """ - def _progress_bar(progress, total, completed, endline): - bar_len = 20 - - def dots(): - return '.' * int(progress * bar_len) - - print('\rProgress: [{:{}}]{:>7.2f}% done'.format(dots(), bar_len, progress*100), end=endline) - sys.stdout.flush() - - def no_progress_bar(progress, total, completed): - """ A 'dummy' progressbar which doesn't print anything """ - pass - - def oneline_progress_bar(progress, total, completed): - """ A callback that always prints the progress in the same line (mimics real GUI progress bar)""" - _progress_bar(progress, total, completed, '') - - def newline_progress_bar(progress, total, completed): - """ A callback that prints an updated progress bar in separate lines """ - _progress_bar(progress, total, completed, '\n') - - if "progress" in argv and argv.progress: - if "stream_output" in argv and argv.stream_output: - return newline_progress_bar - else: - return oneline_progress_bar - else: - return no_progress_bar diff --git a/tools/ovc/openvino/tools/ovc/main.py b/tools/ovc/openvino/tools/ovc/main.py index c345c05fd03587..efd672c5e8b57f 100644 --- a/tools/ovc/openvino/tools/ovc/main.py +++ b/tools/ovc/openvino/tools/ovc/main.py @@ -21,13 +21,12 @@ def main(): if ngraph_function is None: return 1 - output_dir = argv.output_dir if argv.output_dir != '.' else os.getcwd() - model_path_no_ext = os.path.normpath(os.path.join(output_dir, argv.model_name)) + output_dir = os.getcwd() + model_path_no_ext = os.path.normpath(os.path.join(output_dir, argv.output_model)) model_path = model_path_no_ext + '.xml' serialize(ngraph_function, model_path.encode('utf-8'), model_path.replace('.xml', '.bin').encode('utf-8')) - print('[ SUCCESS ] Generated IR version {} model.'.format(get_ir_version())) print('[ SUCCESS ] XML file: {}'.format(model_path)) print('[ SUCCESS ] BIN file: {}'.format(model_path.replace('.xml', '.bin'))) return 0 diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/moc_emit_ir.py b/tools/ovc/openvino/tools/ovc/moc_frontend/moc_emit_ir.py index c716c5141ca903..fb74143acc837c 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/moc_emit_ir.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/moc_emit_ir.py @@ -4,7 +4,6 @@ import argparse from openvino.runtime import Model # pylint: disable=no-name-in-module,import-error -from openvino.tools.ovc.cli_parser import parse_transform from openvino.tools.ovc.moc_frontend.preprocessing import apply_preprocessing @@ -21,15 +20,14 @@ def moc_emit_ir(ngraph_function: Model, argv: argparse.Namespace): from openvino._offline_transformations import compress_quantize_weights_transformation # pylint: disable=no-name-in-module,import-error compress_quantize_weights_transformation(ngraph_function) - if argv.framework == "onnx": + if argv.framework == "onnx": # TODO: Consider removing # set OldApi map in IR to be executed via OV API 1.x and for parity with legacy MO params_with_custom_types = [] if argv.placeholder_data_types is None \ else list(argv.placeholder_data_types.keys()) apply_moc_legacy_transformations(ngraph_function, params_with_custom_types) - apply_user_transformations(ngraph_function, parse_transform(argv.transform)) - - if argv.compress_to_fp16: + # TODO: Move compression to save_model at the level of main function where serialize is called + if not argv.is_python_api_used and argv.compress_to_fp16: from openvino.tools.ovc.moc_frontend.offline_transformations import compress_model compress_model(ngraph_function) diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/offline_transformations.py b/tools/ovc/openvino/tools/ovc/moc_frontend/offline_transformations.py index 16749584bf905b..afa7a45aa2846a 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/offline_transformations.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/offline_transformations.py @@ -4,7 +4,6 @@ import argparse from typing import List -from openvino.tools.ovc.cli_parser import parse_transform from openvino.tools.ovc.error import Error from openvino.runtime import Model # pylint: disable=no-name-in-module,import-error @@ -117,7 +116,6 @@ def apply_offline_transformations(func: Model, argv: argparse.Namespace): params_with_custom_types = create_params_with_custom_types(argv.packed_user_shapes) apply_moc_legacy_transformations(func, params_with_custom_types) - apply_user_transformations(func, parse_transform(argv.transform)) if "compress_to_fp16" in argv and argv.compress_to_fp16: compress_model(func) diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py b/tools/ovc/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py index 85ed861bccddec..bc5af1c49e5b95 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/paddle_frontend_utils.py @@ -34,7 +34,7 @@ def destroy(self): # remove the *.pdiparams.info if os.path.exists(self.pdiparams_info): os.remove(self.pdiparams_info) - + def convert_paddle_to_pdmodel(self): ''' There are three paddle model categories: diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py b/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py index 7e47ae2c904298..78c270f1e9c489 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/pipeline.py @@ -45,19 +45,18 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd): :param: moc_front_end: Loaded Frontend for converting input model :return: converted nGraph function ready for serialization """ - input_checkpoint = getattr(argv, 'input_checkpoint', None) - share_weights = getattr(argv, 'share_weights', True) - if argv.input_model and input_checkpoint: + + share_weights = getattr(argv, 'share_weights', True) #FIXME: Should be controlled by default value + if isinstance(argv.input_model, (tuple, list)) and len(argv.input_model) == 2: # frozen format with v1 checkpoints - input_model = moc_front_end.load([argv.input_model, argv.input_checkpoint], share_weights) - elif argv.input_model: + assert not hasattr(argv, 'saved_model_tags') or not argv.saved_model_tags + input_model = moc_front_end.load([part for part in argv.input_model], share_weights) + elif hasattr(argv, 'saved_model_tags') and argv.saved_model_tags: + input_model = moc_front_end.load([argv.input_model, argv.saved_model_tags], share_weights) + else: input_model = moc_front_end.load(argv.input_model, share_weights) - elif argv.saved_model_dir: - if argv.saved_model_tags: - input_model = moc_front_end.load([argv.saved_model_dir, argv.saved_model_tags], share_weights) - else: - input_model = moc_front_end.load(argv.saved_model_dir, share_weights) - elif argv.input_meta_graph: + + '''elif argv.input_meta_graph: # TODO: Cover this case input_model = moc_front_end.load(argv.input_meta_graph, share_weights) if argv.output: # Simulate original behavior with freezing model @@ -65,7 +64,7 @@ def moc_pipeline(argv: argparse.Namespace, moc_front_end: FrontEnd): # need to simulate similar behavior with natively supported model outputs = fe_output_user_data_repack(input_model, argv.output, moc_front_end.get_name()) input_model.override_all_outputs([x['node'] for x in outputs]) - + ''' argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value = convert_params_lists_to_dicts( input_model, argv.placeholder_shapes, argv.placeholder_data_types, argv.freeze_placeholder_with_value, argv.unnamed_freeze_placeholder_with_value) @@ -236,83 +235,6 @@ def shape_to_array(shape: PartialShape): layout_values = update_layout_to_dict(model_inputs, argv.layout_values, lambda input_place: input_place.get_names()) - deferred_batch_names = [] - # set batch size for inputs with a static rank - # for all other inputs, set it after shape deduction is performed during model conversion - if argv.batch is not None and argv.batch > 0: - log.debug('Setting batch size to {}'.format(argv.batch)) - frozen_input_names = list(freeze_placeholder.keys()) if freeze_placeholder else [] - for place in model_inputs: - input_partial_shape = input_model.get_partial_shape(place) - input_names = place.get_names() - joined_name = ' '.join(place.get_names()) - assert len(input_names) > 0, "One input place has no names" - - # if this input is frozen, there is no need to set the batch - is_frozen_input = len([name for name in input_names if name in frozen_input_names]) > 0 - if is_frozen_input: - # skip the frozen input - continue - - if not input_partial_shape.rank.is_static: - # found input with dynamic rank, so have to repeat the batch setting after the model conversion - deferred_batch_names += input_names - continue - - batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape, - place.get_names(), layout_values, 'N', 0) - if batch_dim is None: - # skip because no batch dimension exists in the input - continue - - if is_default_index: - # if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1 - validate_batch_in_shape(shape_to_array(input_partial_shape), joined_name) - - assert batch_dim < input_partial_shape.rank.get_length(), \ - "Incorrect layout is specified for {}:" \ - " index of the batch dimension is out of range.".format(input_names[0]) - - new_partial_shape = copy(input_partial_shape) - new_partial_shape[batch_dim] = argv.batch - - log.debug('Input: {}, Old shape: {}, New shape: {}'.format( - joined_name, input_partial_shape, new_partial_shape)) - input_model.set_partial_shape(place, new_partial_shape) - ov_model = moc_front_end.convert(input_model) - if argv.batch is not None and argv.batch > 0 and len(deferred_batch_names) > 0: - # Frontend convert method can include reverse infer functionality that can deduce undefined input shapes - # so try to repeat batch setting again - reshape_dict = {} - log.debug('Deferred batch setting to size {}'.format(argv.batch)) - is_batch_clarified = False - for model_input in ov_model.inputs: - input_name = model_input.any_name - input_partial_shape = model_input.get_partial_shape() - if input_name in deferred_batch_names and input_partial_shape.rank.is_static: - # update input shape with the specified batch for input that originally has dynamic rank - batch_dim, is_default_index = get_dimension_index_by_label(input_partial_shape, - model_input.get_names(), - layout_values, 'N', 0) - if batch_dim is None: - continue - - if is_default_index: - # if the batch index is chosen by default, we need to ensure that its size equals -1, 0 or 1 - validate_batch_in_shape(shape_to_array(input_partial_shape), input_name) - - assert batch_dim < input_partial_shape.rank.get_length(), \ - "Incorrect layout is specified for {}: " \ - "index of the batch dimension is out of range.".format(input_name) - input_partial_shape[batch_dim] = argv.batch - is_batch_clarified = True - - reshape_dict.update({input_name: input_partial_shape}) - - if is_batch_clarified: - # call reshape only if batch dimension for one of the input is clarified - ov_model.reshape(reshape_dict) - return ov_model diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/preprocessing.py b/tools/ovc/openvino/tools/ovc/moc_frontend/preprocessing.py index c42c5725d8607e..cf7c1ef61557dc 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/preprocessing.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/preprocessing.py @@ -14,52 +14,6 @@ from openvino.tools.ovc.utils import refer_to_faq_msg -def update_mean_scale_to_dict(input_nodes: list, mean_scale_val, scale): - """ - Internal function. Updates mean/scale values from array to dictionary - :param: input_nodes Inputs of model - :param: mean_scale_val Parsed 'mean_scale_val' object from command line arguments - :param: scale Global scale factor for all inputs from scale command line arguments - """ - if not isinstance(mean_scale_val, dict): - if len(mean_scale_val) != len(input_nodes): - raise Error('Numbers of inputs and mean/scale values do not match. ' + refer_to_faq_msg(61)) - data = copy(mean_scale_val) - mean_scale_val = {} - for idx, node in enumerate(input_nodes): - names_list = list(node.get_tensor().get_names()) - names_list.sort() - if not names_list: - continue - node_name = names_list[0] - mean_scale_val.update( - { - node_name: { - 'mean': data[idx][0], - 'scale': data[idx][1] - } - } - ) - - if scale: - for node in input_nodes: - names_list = list(node.get_tensor().get_names()) - names_list.sort() - if not names_list: - continue - node_name = names_list[0] - old_val = mean_scale_val[node_name] if node_name in mean_scale_val else None - mean_scale_val.update( - { - node_name: { - 'mean': old_val['mean'] if old_val and 'mean' in old_val else None, - 'scale': scale - } - } - ) - return mean_scale_val - - def check_keys_valid(ov_function: Model, dict_to_validate: dict, search_outputs: bool): """ Internal function: checks if keys from cmd line arguments correspond to ov_function's inputs/outputs @@ -183,147 +137,6 @@ def find_channels_dimension(shape: PartialShape, num_channels: int, name: str, l } return layout_values - -def guess_source_layouts_by_mean_scale(ov_function: Model, layout_values, mean_scale_values: dict): - """ - Internal function. Try to guess source layout for input by its shape and/or framework - :param: ov_function Original model - :param: layout_values Existing source/target layout items specified by user - :param: mean_scale_values Dictionary with mean/scale values defined for each argument - :return: updated layout items with guessed layouts - """ - for ms_name, mean_scale in mean_scale_values.items(): - num_channels_mean = len(mean_scale['mean']) if mean_scale['mean'] is not None else 0 - num_channels_scale = len(mean_scale['scale']) if hasattr(mean_scale['scale'], '__len__') else 0 - - if num_channels_mean > 1 and \ - num_channels_scale > 1 and \ - num_channels_mean is not num_channels_scale: - raise Error('Mean/Scale values for {} have different sizes: {} {}' - .format(ms_name, num_channels_mean, num_channels_scale)) - - need_guess_channels = num_channels_mean > 1 or num_channels_scale > 1 - if not need_guess_channels: # Mean/scale is complex and needs 'channels' specified in layout - continue - - num_channels = num_channels_mean if num_channels_mean > 1 else num_channels_scale - - for i in range(0, len(ov_function.inputs)): - ov_input = ov_function.input(i) - - if not ov_function.get_parameters()[i].layout.empty: - continue - - if ms_name not in ov_input.get_tensor().get_names(): - continue - - layout_item = None - for name in ov_input.get_tensor().get_names(): - if name in layout_values: - layout_item = layout_values[name] - break - - if layout_item is not None: - # User specified some layout, skip guessing - continue - - # Guess layout is applicable only when number of channels is '3' - if num_channels != 3: - raise Error('Can\'t determine channels dimension for {}. ' - 'When number of mean/scale values is {} (not 3), ' - 'please specify layout for input manually'.format(ms_name, num_channels)) - - layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(), - num_channels=num_channels, - name=ms_name, - layout_values=layout_values) - return layout_values - - -def check_suitable_for_reverse(layout: Layout, ov_input): - """ - Internal function. Checks if input with layout is suitable for reversing channels - :param: layout Existing source/target layout items specified by user - :param: ov_input Model's input - :return: True if reverse channels can be applied to input - """ - if not layout_helpers.has_channels(layout): - return False - if ov_input.get_partial_shape().rank.is_dynamic: - return False - - c_idx = layout_helpers.channels_idx(layout) - rank = ov_input.get_partial_shape().rank.get_length() - if c_idx < 0: - c_idx += rank - if c_idx >= rank: - raise Error('Layout {} for input {} is inconsistent with shape {}'.format( - layout, ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape())) - c_num = ov_input.get_partial_shape()[c_idx] - return c_num.is_dynamic or c_num.get_length() == 3 - - -def guess_source_layouts_for_reverse_channels(ov_function: Model, layout_values): - """ - Internal function. Try to guess source layout for input by finding dimension with size=3 (RGB/BGR) - Additionally checks existing layouts and detects suitable inputs for reversing of input channels - :param: ov_function Original model - :param: layout_values Existing source/target layout items specified by user - :return: array with suitable parameters for reversing of input channels - """ - all_params = [] - suitable_params = [] - for i in range(0, len(ov_function.inputs)): - ov_input = ov_function.input(i) - param_info = [ov_input.get_tensor().get_any_name(), ov_input.get_partial_shape()] - all_params.append(param_info) - - if not ov_function.get_parameters()[i].layout.empty: - if check_suitable_for_reverse(ov_function.get_parameters()[i].layout, ov_input): - suitable_params.append(param_info) - continue - - layout_item = None - first_name = ov_input.get_tensor().get_any_name() - for name in ov_input.get_tensor().get_names(): - if name in layout_values: - layout_item = layout_values[name] - break - - if layout_item is not None: - # RIC transformation is applied before changing layout so only source_layout - # should be checked (even is target_layout is also provided) - if layout_item.get('source_layout'): - if check_suitable_for_reverse(Layout(layout_item['source_layout']), ov_input): - suitable_params.append(param_info) - continue - - try: - layout_values = find_channels_dimension(shape=ov_input.get_partial_shape(), - num_channels=3, - name=first_name, - layout_values=layout_values) - except Error as e: - log.debug('Reverse input channels guess did not succeed {}'.format(e)) - else: - layout = layout_values[first_name].get('source_layout') - if layout and check_suitable_for_reverse(Layout(layout), ov_input): - suitable_params.append(param_info) - - if not len(suitable_params): - raise Error('Network has {} inputs overall, but none of them are suitable for input channels reversing.\n' - 'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic ' - 'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}'.format( - len(all_params), all_params)) - elif len(suitable_params) < len(all_params): - log.error('Network has {} inputs overall, but only {} of them are suitable for input channels reversing.\n' - 'Suitable for input channel reversing inputs are 4-dimensional with 3 channels (in case of dynamic ' - 'dimensions C channel must be provided in a layout for this input)\nAll inputs: {}\n' - 'Suitable inputs {}'.format(len(all_params), len(suitable_params), all_params, suitable_params), - extra={'is_warning': True}) - return suitable_params - - def update_tensor_names_to_first_in_sorted_list(values_dict: dict, ov_function: Model): if not isinstance(values_dict, dict): return values_dict @@ -372,38 +185,14 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace): """ prep = PrePostProcessor(ov_function) - if 'mean_scale_values' in argv and argv.mean_scale_values: - mean_scale_values = argv.mean_scale_values - else: - mean_scale_values = {} - - # mean_scale_values stores mean/scale values from command line with names which were set by user. - # For models with single input scale or mean may be unnamed, so name is set by first tensor name from - # names list. This may lead to different naming of preprocessing params for a single node and lead to error. - # To make naming for mean/scale values unified, names provided by user are renamed here - # by the first tensor name from sorted names list. - mean_scale_values = update_tensor_names_to_first_in_sorted_list(mean_scale_values, ov_function) - mean_scale_values = update_mean_scale_to_dict(input_nodes=ov_function.inputs, - mean_scale_val=mean_scale_values, - scale=argv.scale) - # On return, mean_scale_values is a dictionary with input names as key and mean/scale pair as value - # {'inputName': {'mean': [1., 2., 3.], 'scale': [2.]}} - layout_values = {} if 'layout_values' in argv and argv.layout_values: layout_values = update_layout_to_dict(ov_function.inputs, argv.layout_values, lambda ov_input: ov_input.get_tensor().get_names()) - check_keys_valid(ov_function=ov_function, dict_to_validate=mean_scale_values, search_outputs=False) check_keys_valid(ov_function=ov_function, dict_to_validate=layout_values, search_outputs=True) layout_values = update_layout_is_input_flag(ov_function, layout_values) - layout_values = guess_source_layouts_by_mean_scale(ov_function, layout_values, mean_scale_values) - need_reverse = 'reverse_input_channels' in argv and argv.reverse_input_channels - suitable_params_ric = [] - if need_reverse: - suitable_params_ric = guess_source_layouts_for_reverse_channels(ov_function=ov_function, - layout_values=layout_values) for node_name, layout_value in layout_values.items(): if layout_value.get('source_layout'): @@ -417,20 +206,6 @@ def apply_preprocessing(ov_function: Model, argv: argparse.Namespace): else: prep.output(node_name).tensor().set_layout(Layout(layout_value['target_layout'])) - # Apply reverse_input_channels - if need_reverse: - for name, _ in suitable_params_ric: - prep.input(name).preprocess().reverse_channels() - log.debug('reverse_input_channels pre-processing applied to {}'.format(name)) - - for node_name, node_mean_scale_values in mean_scale_values.items(): - # Apply mean first, then scale - if node_mean_scale_values['mean'] is not None: - prep.input(node_name).preprocess().mean(node_mean_scale_values['mean']) - if node_mean_scale_values['scale'] is not None: - prep.input(node_name).preprocess().scale(node_mean_scale_values['scale']) - log.debug('Mean/Scale pre-processing applied to {}'.format(node_name)) - # Apply pre-processing builder to a function ov_function = prep.build() diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py b/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py index 68a4c30e83be8e..9e57edf39369cd 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/pytorch_frontend_utils.py @@ -8,21 +8,20 @@ from openvino.runtime import Tensor, Type, PartialShape from openvino.runtime.utils.types import get_element_type_str -from openvino.tools.ovc.cli_parser import input_to_input_cut_info, input_shape_to_input_cut_info +from openvino.tools.ovc.cli_parser import input_to_input_cut_info from openvino.tools.ovc.error import Error from openvino.tools.ovc.moc_frontend.shape_utils import get_static_shape -def get_pytorch_decoder(model, input_shape, example_inputs, args): +def get_pytorch_decoder(model, example_inputs, args): try: from openvino.frontend.pytorch.decoder import TorchScriptPythonDecoder except Exception as e: log.error("PyTorch frontend loading failed") raise e - inputs = prepare_torch_inputs(example_inputs, input_shape, args.get("input"), allow_none=True) + inputs = prepare_torch_inputs(example_inputs, args.get("input"), allow_none=True) decoder = TorchScriptPythonDecoder(model, example_input=inputs) args['input_model'] = decoder - args["framework"] = "pytorch" args["example_input"] = inputs return args @@ -169,7 +168,7 @@ def get_torch_dtype(dtype): raise Error(f"Unexpected data type for input. Supported torch.dtype, numpy.dtype, ov.Type and str. Got {type(dtype)}") -def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_none=False): +def prepare_torch_inputs(example_inputs, input_info=None, allow_none=False): import torch inputs = None if example_inputs is not None: @@ -190,16 +189,15 @@ def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_non inputs[name] = to_torch_tensor(tensor) else: inputs = to_torch_tensor(inputs) - elif input_info is not None or input_shape is not None: + elif input_info is not None: input_info = input_to_input_cut_info(input_info) or [] - input_shape_to_input_cut_info(input_shape, input_info) inputs = [] inputs_with_names = {} for inp in input_info: shape = inp.shape if shape is None: if not allow_none: - raise Error("Please provide input_shape or example_input for all inputs converting PyTorch model.") + raise Error("Please provide shape in `input` or `example_input` for all inputs converting PyTorch model.") inputs = None break dtype = get_torch_dtype(inp.type) @@ -214,5 +212,5 @@ def prepare_torch_inputs(example_inputs, input_shape, input_info=None, allow_non inputs = inputs_with_names else: if not allow_none: - raise Error("Please provide input_shape or example_input for converting PyTorch model.") + raise Error("Please provide shapes `input` or `example_input` for converting PyTorch model.") return inputs diff --git a/tools/ovc/openvino/tools/ovc/moc_frontend/shape_utils.py b/tools/ovc/openvino/tools/ovc/moc_frontend/shape_utils.py index 6fe29135ba308c..defbe552b1d759 100644 --- a/tools/ovc/openvino/tools/ovc/moc_frontend/shape_utils.py +++ b/tools/ovc/openvino/tools/ovc/moc_frontend/shape_utils.py @@ -4,7 +4,6 @@ import numpy as np from openvino.runtime import PartialShape, Dimension # pylint: disable=no-name-in-module,import-error from openvino.tools.ovc.error import Error -from openvino.tools.ovc.cli_parser import get_placeholder_shapes, split_shapes def get_static_shape(shape: [PartialShape, list, tuple], dynamic_value=None): @@ -63,40 +62,3 @@ def get_dynamic_dims(shape: [PartialShape, list, tuple]): dynamic_dims.append(idx) return dynamic_dims - - -def parse_input_shapes(argv): - input_shapes = None - if 'input_shape' in argv and argv['input_shape'] is not None: - shapes = argv['input_shape'] - if isinstance(shapes, str): - shapes = ["[{}]".format(x) for x in split_shapes(shapes)] - if isinstance(shapes, list) or isinstance(shapes, tuple): - input_shapes = [] - is_single_shape = False - for shape in shapes: - if isinstance(shape, str): - _, shape_tuple, _ = get_placeholder_shapes(argv_input=None, argv_input_shape=shape) - input_shapes.append(shape_tuple) - if is_single_shape: - raise Error("Incorrect format of shape.") - elif isinstance(shape, int) or isinstance(shape, np.int64) or isinstance(shape, Dimension): - is_single_shape = True - input_shapes.append(shape) - else: - input_shapes.append(shape) - if is_single_shape: - return [input_shapes] - else: - return input_shapes - elif isinstance(shapes, PartialShape): - return [shapes] - else: - try: - import torch - if isinstance(shapes, torch.Size): # pylint: disable=no-member - return [shapes] - except ImportError: - raise Error("Unknown type of input shape {}.".format(type(shapes))) - - return input_shapes \ No newline at end of file diff --git a/tools/ovc/openvino/tools/ovc/utils.py b/tools/ovc/openvino/tools/ovc/utils.py index 19551e157a386e..8ed6ed2c7502b4 100644 --- a/tools/ovc/openvino/tools/ovc/utils.py +++ b/tools/ovc/openvino/tools/ovc/utils.py @@ -101,39 +101,6 @@ def validate_batch_in_shape(shape, layer_name: str): 'You can also specify batch dimension by setting "layout". \n\n') .format(layer_name, shape)) - -def deduce_legacy_frontend_by_namespace(argv): - if not hasattr(argv, 'framework') or not argv.framework: - if getattr(argv, 'saved_model_dir', None) or getattr(argv, 'input_meta_graph', None): - argv.framework = 'tf' - elif getattr(argv, 'input_symbol', None) or getattr(argv, 'pretrained_model_name', None): - argv.framework = 'mxnet' - elif getattr(argv, 'input_proto', None): - argv.framework = 'caffe' - elif argv.input_model is None: - raise Error('Path to input model is required: use "input_model".') - else: - argv.framework = guess_framework_by_ext(argv.input_model) - - return map(lambda x: argv.framework == x, ['tf', 'caffe', 'mxnet', 'kaldi', 'onnx']) - - -def guess_framework_by_ext(input_model_path: str) -> int: - if re.match(r'^.*\.caffemodel$', input_model_path): - return 'caffe' - elif re.match(r'^.*\.pb$', input_model_path): - return 'tf' - elif re.match(r'^.*\.pbtxt$', input_model_path): - return 'tf' - elif re.match(r'^.*\.params$', input_model_path): - return 'mxnet' - elif re.match(r'^.*\.nnet$', input_model_path): - return 'kaldi' - elif re.match(r'^.*\.mdl', input_model_path): - return 'kaldi' - elif re.match(r'^.*\.onnx$', input_model_path): - return 'onnx' - def get_ir_version(): """ Default IR version. diff --git a/tools/ovc/unit_tests/moc_tf_fe/check_info_messages_test.py b/tools/ovc/unit_tests/moc_tf_fe/check_info_messages_test.py index 8926d22f1342d1..9508a3a7fd2d6e 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/check_info_messages_test.py +++ b/tools/ovc/unit_tests/moc_tf_fe/check_info_messages_test.py @@ -9,8 +9,7 @@ from unittest.mock import patch from openvino.tools.ovc.main import main -from openvino.tools.ovc.get_ov_update_message import get_tf_fe_message, get_compression_message -from openvino.tools.ovc.get_ov_update_message import get_try_legacy_fe_message +from openvino.tools.ovc.get_ov_update_message import get_compression_message def arg_parse_helper(input_model, @@ -18,41 +17,19 @@ def arg_parse_helper(input_model, use_new_frontend, input_model_is_text, framework, - compress_to_fp16=False, - freeze_placeholder_with_value=None, - tensorflow_object_detection_api_pipeline_config=None): + compress_to_fp16=False): path = os.path.dirname(__file__) input_model = os.path.join(path, "test_models", input_model) return argparse.Namespace( input_model=input_model, - use_legacy_frontend=use_legacy_frontend, - use_new_frontend=use_new_frontend, - framework=framework, - input_model_is_text=input_model_is_text, log_level='INFO', - silent=True, - model_name=None, + verbose=False, + output_model=None, transform=[], - scale=None, output=None, input=None, - input_shape=None, - batch=None, - input_checkpoint=None, - saved_model_dir=None, - input_meta_graph=None, - saved_model_tags=None, output_dir='.', - mean_values=(), - scale_values=(), - layout={}, - source_layout={}, - target_layout={}, - freeze_placeholder_with_value=freeze_placeholder_with_value, - data_type=None, - tensorflow_custom_operations_config_update=None, - tensorflow_object_detection_api_pipeline_config=tensorflow_object_detection_api_pipeline_config, compress_to_fp16=compress_to_fp16, extensions=None ) @@ -63,60 +40,11 @@ class TestInfoMessagesTFFE(unittest.TestCase): return_value=arg_parse_helper(input_model="model_int32.pbtxt", use_legacy_frontend=False, use_new_frontend=True, framework=None, input_model_is_text=True)) - def test_api20_only(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main() - std_out = f.getvalue() - tf_fe_message_found = get_tf_fe_message() in std_out - assert tf_fe_message_found - @patch('openvino.tools.ovc.convert_impl.driver', side_effect=Exception('MESSAGE')) def run_fail_tf_fe(self, mock_driver): from openvino.tools.ovc import convert_model path = os.path.dirname(__file__) - convert_model(os.path.join(path, "test_models", "model_int32.pbtxt"), silent=False) - - def test_suggest_legacy_fe(self): - f = io.StringIO() - with redirect_stdout(f): - try: - self.run_fail_tf_fe() - except: - pass - std_out = f.getvalue() - assert get_try_legacy_fe_message() in std_out - - -class TestInfoMessagesTFFEWithFallback(unittest.TestCase): - @patch('argparse.ArgumentParser.parse_args', - return_value=arg_parse_helper(input_model="model_switch_merge.pbtxt", - use_legacy_frontend=False, use_new_frontend=False, - framework=None, input_model_is_text=True, - freeze_placeholder_with_value="is_training->False")) - def test_tf_fe_message_fallback(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main() - std_out = f.getvalue() - tf_fe_message_found = get_try_legacy_fe_message() in std_out - assert tf_fe_message_found, 'TF FE Info message is found for the fallback case' - - @patch('argparse.ArgumentParser.parse_args', - return_value=arg_parse_helper(input_model="model_int32.pbtxt", - use_legacy_frontend=False, use_new_frontend=True, - compress_to_fp16=True, - framework=None, input_model_is_text=True, - tensorflow_object_detection_api_pipeline_config="config.yml")) - def test_tf_fe_message_fallback(self, mock_argparse): - f = io.StringIO() - with redirect_stdout(f): - main() - std_out = f.getvalue() - tf_fe_message_found = "The provided option \"tensorflow_object_detection_api_pipeline_config\" " \ - "refers to legacy functionality. Please try to install openvino-dev and " \ - "use convert_model() from openvino.tools.mo." in std_out - assert not tf_fe_message_found, 'TF FE Info message is found for the fallback case' + convert_model(os.path.join(path, "test_models", "model_int32.pbtxt"), verbose=True) class TestInfoMessagesCompressFP16(unittest.TestCase): diff --git a/tools/ovc/unit_tests/moc_tf_fe/conversion_basic_models_test.py b/tools/ovc/unit_tests/moc_tf_fe/conversion_basic_models_test.py index 047b05d842f317..f8a2afa8415967 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/conversion_basic_models_test.py +++ b/tools/ovc/unit_tests/moc_tf_fe/conversion_basic_models_test.py @@ -13,18 +13,12 @@ @generator class TestMoFreezePlaceholderTFFE(unittest.TestCase): - def basic(self, input_model, argv_input, inputs, dtype, expected, freeze_placeholder_with_value=None, - input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True, - use_legacy_frontend=False): + def basic(self, input_model, argv_input, inputs, dtype, expected, only_conversion=False): path = os.path.dirname(__file__) input_model = os.path.join(path, "test_models", input_model) try: - model = convert_model(input_model, input=argv_input, - freeze_placeholder_with_value=freeze_placeholder_with_value, - input_shape=input_shape, input_model_is_text=input_model_is_text, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework="tf") + model = convert_model(input_model, input=argv_input) except Exception as ex: self.fail("Model conversion failed due to error: {}".format(ex)) @@ -128,31 +122,27 @@ def test_bool(self, input_freezing_value, inputs, expected, None ), ( - None, + "in1,in2,cond->False", {"in1": np.array([2.0, 4.0, 6.0], dtype=np.float32), "in2": np.array([1.0, 3.0, 5.0], dtype=np.float32)}, np.array([2, 4, 6], dtype=np.float32), np.float32, - "cond->False", - None, True # fill a bug to investigate why compilation of this model is hang on ), # case: input_shape + freeze_placeholder_with_value ( - None, + "in2,in1->[2.0 4.0 6.0],cond->True", {"in2": np.array([1.0, 3.0, 5.0], dtype=np.float32)}, np.array([2, 4, 6], dtype=np.float32), np.float32, - "in1->[2.0 4.0 6.0],cond->True", - "[3]", False ), ], ) def test_bool2(self, input_freezing_value, inputs, expected, - dtype=None, freeze_placeholder_with_value=None, input_shape=None, only_conversion=False): - self.basic("model_bool2.pbtxt", input_freezing_value, inputs, dtype, expected, freeze_placeholder_with_value, - input_shape, only_conversion) + dtype=None, only_conversion=False): + self.basic("model_bool2.pbtxt", input_freezing_value, inputs, dtype, expected, + only_conversion) @generate( *[ @@ -173,10 +163,9 @@ def test_bool2(self, input_freezing_value, inputs, expected, ], ) def test_cutting_fp32(self, input_freezing_value, inputs, expected, - dtype=None, freeze_placeholder_with_value=None, input_shape=None, only_conversion=False): + dtype=None, only_conversion=False): self.basic("model_three_inputs.pbtxt", input_freezing_value, inputs, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) + only_conversion) @generate( *[ @@ -204,11 +193,9 @@ def test_cutting_fp32(self, input_freezing_value, inputs, expected, ], ) def test_placeholder_with_default(self, inputs, inputs_data, expected, - dtype=None, freeze_placeholder_with_value=None, input_shape=None, - only_conversion=False): + dtype=None, only_conversion=False): self.basic("placeholder_with_default.pbtxt", inputs, inputs_data, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) + only_conversion) @generate( *[ @@ -229,29 +216,12 @@ def test_placeholder_with_default(self, inputs, inputs_data, expected, ], ) def test_freeze_placeholder_with_unknown_rank(self, inputs, inputs_data, expected, - dtype=None, freeze_placeholder_with_value=None, input_shape=None, - only_conversion=False): + dtype=None, only_conversion=False): self.basic("mul_with_unknown_rank_y.pbtxt", inputs, inputs_data, dtype, expected, - freeze_placeholder_with_value, - input_shape, only_conversion, True) - - - def test_conversion_failure_fallback_use_new_frontend(self): - with self.assertRaisesRegex(Exception, - "\[TensorFlow Frontend\] Internal error, no translator found for operation\(s\)\: " - "Enter\, Exit\, LoopCond\, Merge\, NextIteration\, Switch\, TensorArrayGatherV3\, " - "TensorArraySizeV3\, TensorArrayV3"): - self.basic("ctc_model_based.pbtxt", None, None, None, None, - None, None, True, True, True, False) - - @unittest.skip("88349: Fix auto-pruning in legacy FE") - def test_conversion_model_oneshot_iterator_use_legacy_frontend(self): - self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, - None, None, True, True, False, True) + only_conversion) def test_conversion_model_oneshot_iterator_default(self): - self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, - None, None, True, True, False, False) + self.basic("model_oneshot_iterator.pbtxt", None, None, None, None, True) @generate( *[ @@ -272,9 +242,7 @@ def test_conversion_model_oneshot_iterator_default(self): @unittest.skip("109220: Use generating script for this test model instead of Git LFS") def test_conversion_model_with_non_standard_extension(self, input_freezing_value, inputs, expected, dtype): - self.basic("model_fp32.frozen", input_freezing_value, inputs, dtype, expected, only_conversion=False, - input_model_is_text=False, use_new_frontend=True, - use_legacy_frontend=False) + self.basic("model_fp32.frozen", input_freezing_value, inputs, dtype, expected, only_conversion=False) @unittest.skip("109220: Make TF FE to return the error") def test_conversion_dir_model(self): @@ -282,8 +250,7 @@ def test_conversion_dir_model(self): "Internal error or inconsistent input model: the frontend supports " "only frozen binary protobuf format."): self.basic(".", None, None, None, None, - only_conversion=True, input_model_is_text=False, use_new_frontend=True, - use_legacy_frontend=False) + only_conversion=True) @generate( *[ @@ -300,8 +267,7 @@ def test_conversion_dir_model(self): ], ) def test_conversion_pbtxt_model_with_inference(self, inputs, expected, dtype): - self.basic("model_with_if.pbtxt", None, inputs, dtype, expected, only_conversion=False, - input_model_is_text=False, use_new_frontend=True, use_legacy_frontend=False) + self.basic("model_with_if.pbtxt", None, inputs, dtype, expected, only_conversion=False) @generate( *[ @@ -311,18 +277,16 @@ def test_conversion_pbtxt_model_with_inference(self, inputs, expected, dtype): "x[2,3]", {"x": np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32)}, np.array([[12, 13, 10], [11, 14, 16]], dtype=np.float32), - np.float32, True, False, + np.float32 ), ( "model_mul_with_undefined_constant.pbtxt", "x[2]", {"x": np.array([11, -12], dtype=np.int32)}, np.array([0, 0], dtype=np.int32), - np.int32, True, False, + np.int32 ), ], ) - def test_conversion_model_with_undefined_constant(self, model_name, argv_input, inputs, expected, dtype, - use_new_frontend, use_legacy_frontend): - self.basic(model_name, argv_input, inputs, dtype, expected, only_conversion=False, - input_model_is_text=True, use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend) + def test_conversion_model_with_undefined_constant(self, model_name, argv_input, inputs, expected, dtype): + self.basic(model_name, argv_input, inputs, dtype, expected, only_conversion=False) diff --git a/tools/ovc/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py b/tools/ovc/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py index b702c7153b68c5..85f26f83215afd 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py +++ b/tools/ovc/unit_tests/moc_tf_fe/conversion_incorrect_models_test.py @@ -12,31 +12,9 @@ @generator class TestMoFreezePlaceholderTFFE(unittest.TestCase): - @generate( - *[ - # the default frontend - ( - False, False, None - ), - ( - False, False, "tf" - ), - # new frontend - ( - True, False, None - ), - ( - True, False, "tf" - ), - ], - ) - def test_conversion_fake_pb_model(self, use_new_frontend, use_legacy_frontend, framework): - with self.assertRaisesRegex(Exception, - "Internal error or inconsistent input model: the frontend supports frozen formats" - " \(.pb and .pbtxt\), SavedModel and MetaGraph \(.meta\), and v1 checkpoints."): + def test_conversion_fake_pb_model(self): + # TODO: Should FEs give detailed report why a model is rejected and should we print out the report? + with self.assertRaisesRegex(Exception, "Cannot recognize input model."): path = os.path.dirname(__file__) input_model = os.path.join(path, "test_models", "fake.pb") - - convert_model(input_model, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework=framework) \ No newline at end of file + convert_model(input_model) \ No newline at end of file diff --git a/tools/ovc/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py b/tools/ovc/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py index 4b7a37495cbad3..db0b7b86715de6 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py +++ b/tools/ovc/unit_tests/moc_tf_fe/conversion_with_checkpoint_v1_test.py @@ -36,7 +36,6 @@ def test_basic_checkpoint_v1(self): checkpoint_byte_stream = self.prepare_checkpoint_v1() ckpt_file.write(bytes(checkpoint_byte_stream)) ckpt_file.close() - basic_check(input_model="model_with_variable_v1.pbtxt", argv_input=None, + basic_check(input_model=["model_with_variable_v1.pbtxt", ckpt_file.name], argv_input=None, input_data={'input1': np.array([[1]], dtype=np.int64)}, - expected_dtype=np.int64, expected_value=np.array([[14108583]], dtype=np.int64), - use_new_frontend=True, use_legacy_frontend=False, input_checkpoint=ckpt_file.name) + expected_dtype=np.int64, expected_value=np.array([[14108583]], dtype=np.int64)) diff --git a/tools/ovc/unit_tests/moc_tf_fe/conversion_with_layout_test.py b/tools/ovc/unit_tests/moc_tf_fe/conversion_with_layout_test.py index 07e3ec1717e2bf..e9d302e7a575da 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/conversion_with_layout_test.py +++ b/tools/ovc/unit_tests/moc_tf_fe/conversion_with_layout_test.py @@ -12,86 +12,3 @@ from openvino.runtime import PartialShape, Dimension from openvino.tools.ovc.convert import convert_model from openvino.tools.ovc.error import Error - - -@generator -class TestConversionWithBatchAndLayout(unittest.TestCase): - def basic_check(self, model_name: str, batch: int, layout: str, refs_shapes: dict): - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", model_name) - ov_model = convert_model(input_model, batch=batch, layout=layout) - - for ov_input in ov_model.inputs: - input_name = ov_input.any_name - assert input_name in refs_shapes, "No reference input shape is found for {}".format(input_name) - input_shape = ov_input.get_partial_shape() - ref_shape = refs_shapes[input_name] - assert input_shape == ref_shape, "Incorrect shape for {} input:" \ - " expected shape - {}, actual shape - {}".format(input_name, ref_shape, - input_shape) - - @unittest.skip("Fix importing of openvino.test_utils in Jenkins") - def test_basic_model_no_layout(self): - from openvino.test_utils import compare_functions - path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", "model_fp32.pbtxt") - ov_model = convert_model(input_model) - - # compare with the reference graph - param1 = opset11.parameter([2, 2], name="in1", dtype=np.float32) - param2 = opset11.parameter([2, 2], name="in2", dtype=np.float32) - add = opset11.add(param1, param2, name="add") - ref_model = Model(add, [param1, param2]) - flag, msg = compare_functions(ov_model, ref_model, compare_tensor_names=False) - assert flag, msg - - @generate( - *[ - ( - "model_fp32.pbtxt", 5, "in1(cn),in2(cn)", - {"in1": PartialShape([2, 5]), "in2": PartialShape([2, 5])}, - ), - ( - "model_fp32.pbtxt", 9, "in1(nc),in2(nc)", - {"in1": PartialShape([9, 2]), "in2": PartialShape([9, 2])}, - ), - ( - "model_fp32.pbtxt", 7, "in1(?c),in2(?c)", - {"in1": PartialShape([2, 2]), "in2": PartialShape([2, 2])}, - ), - ], - ) - def test_basic_model_with_layout(self, model_name: str, batch: int, layout: str, refs_shapes: dict): - self.basic_check(model_name, batch, layout, refs_shapes) - - @generate( - *[ - ( - "model_with_convolution_dynamic_rank.pbtxt", 7, "x(n???),kernel(????)", - {"x": PartialShape([7, Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic()]), - "kernel": PartialShape([2, 2, 3, 1])}, - ), - ( - "model_with_convolution_dynamic_rank.pbtxt", 3, "x(???n),kernel(??n?)", - {"x": PartialShape([Dimension.dynamic(), Dimension.dynamic(), Dimension.dynamic(), 3]), - "kernel": PartialShape([2, 2, 3, 1])}, - ), - ], - ) - def test_model_with_convolution_dynamic_rank(self, model_name: str, batch: int, layout: str, refs_shapes: dict): - self.basic_check(model_name, batch, layout, refs_shapes) - - @generate( - *[ - ( - "model_fp32.pbtxt", 17, "", - {}, - ), - ], - ) - def test_model_expected_failure(self, model_name: str, batch: int, layout: str, refs_shapes: dict): - # try to override batch size by default index (without specifying layout) - with self.assertRaisesRegex(Error, - "When you use \"batch\" option, Model Conversion API applies its value to the first " - "element of the shape if it is equal to -1, 0 or 1\."): - self.basic_check(model_name, batch, layout, refs_shapes) diff --git a/tools/ovc/unit_tests/moc_tf_fe/utils.py b/tools/ovc/unit_tests/moc_tf_fe/utils.py index 9c79f4676266a9..90ce7a3e5d2c8b 100644 --- a/tools/ovc/unit_tests/moc_tf_fe/utils.py +++ b/tools/ovc/unit_tests/moc_tf_fe/utils.py @@ -9,17 +9,15 @@ from openvino.tools.ovc.convert import convert_model -def basic_check(input_model, argv_input, input_data, expected_dtype, expected_value, freeze_placeholder_with_value=None, - input_shape=None, only_conversion=False, input_model_is_text=True, use_new_frontend=True, - use_legacy_frontend=False, extensions=None, input_checkpoint=None): +def basic_check(input_model, argv_input, input_data, expected_dtype, expected_value, \ + only_conversion=False, input_model_is_text=True, use_new_frontend=True, extensions=None): path = os.path.dirname(__file__) - input_model = os.path.join(path, "test_models", input_model) + if isinstance(input_model, (tuple, list)): + input_model = tuple(os.path.join(path, "test_models", part) for part in input_model) + else: + input_model = os.path.join(path, "test_models", input_model) - ov_model = convert_model(input_model, input=argv_input, - freeze_placeholder_with_value=freeze_placeholder_with_value, - input_shape=input_shape, input_model_is_text=input_model_is_text, - use_new_frontend=use_new_frontend, use_legacy_frontend=use_legacy_frontend, - framework="tf", extensions=extensions, input_checkpoint=input_checkpoint) + ov_model = convert_model(input_model, input=argv_input, extensions=extensions) if only_conversion: return ov_model diff --git a/tools/ovc/unit_tests/ovc/convert/import_from_mo_test.py b/tools/ovc/unit_tests/ovc/convert/import_from_mo_test.py index 362d02dca487bb..e2aa1a504f660a 100644 --- a/tools/ovc/unit_tests/ovc/convert/import_from_mo_test.py +++ b/tools/ovc/unit_tests/ovc/convert/import_from_mo_test.py @@ -6,10 +6,11 @@ from pathlib import Path from generator import generator, generate -from openvino.runtime import serialize, InputCutInfo, LayoutMap +from openvino.runtime import serialize +from openvino.tools.ovc import InputCutInfo from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from utils import create_onnx_model, save_to_onnx +from unit_tests.ovc.convert.utils import create_onnx_model, save_to_onnx @generator @@ -57,11 +58,10 @@ def create_onnx_model(): @generate(*[ ({}), ({'input': InputCutInfo(name='LeakyRelu_out', shape=None, type=None, value=None)}), - ({'layout': {'input': LayoutMap(source_layout='NCHW', target_layout='NHWC')}}), ]) # Checks convert import from openvino.tools.mo def test_import(self, params): - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: model = create_onnx_model() @@ -73,7 +73,7 @@ def test_import(self, params): assert os.path.exists(out_xml) def test_input_model_path(self): - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: model = self.create_onnx_model() @@ -87,14 +87,14 @@ def test_input_model_path(self): def test_unnamed_input_model(self): - from openvino.runtime import convert_model + from openvino.tools.ovc import convert_model with tempfile.TemporaryDirectory(dir=self.test_directory) as tmpdir: model = self.create_onnx_model() model_path = save_to_onnx(model, tmpdir) out_xml = os.path.join(tmpdir, "model.xml") ov_model = convert_model(model_path) - serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) + #serialize(ov_model, out_xml.encode('utf-8'), out_xml.replace('.xml', '.bin').encode('utf-8')) #TODO: check that model is correct diff --git a/tools/ovc/unit_tests/ovc/convert/meta_data_test.py b/tools/ovc/unit_tests/ovc/convert/meta_data_test.py index 755116cca0fa60..e000edcaf8f8a6 100644 --- a/tools/ovc/unit_tests/ovc/convert/meta_data_test.py +++ b/tools/ovc/unit_tests/ovc/convert/meta_data_test.py @@ -7,7 +7,8 @@ from generator import generator from openvino.runtime import get_version as get_rt_version -from openvino.runtime import serialize, convert_model +from openvino.runtime import serialize +from openvino.tools.ovc import convert_model from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry from utils import save_to_onnx @@ -58,7 +59,6 @@ def create_onnx_model(): def ref_meta_data(): return { 'Runtime_version': get_rt_version(), - 'legacy_frontend': "False", 'conversion_parameters': { 'input_model': Path.joinpath(Path("DIR"), Path("model.onnx")), } diff --git a/tools/ovc/unit_tests/ovc/utils/args_to_string_test.py b/tools/ovc/unit_tests/ovc/utils/args_to_string_test.py index 755019d910dfa4..4b265647c7b0cc 100644 --- a/tools/ovc/unit_tests/ovc/utils/args_to_string_test.py +++ b/tools/ovc/unit_tests/ovc/utils/args_to_string_test.py @@ -2,60 +2,13 @@ # SPDX-License-Identifier: Apache-2.0 import numpy as np -from openvino.runtime import Layout, PartialShape, Dimension, Shape, Type +from openvino.runtime import Layout, Dimension -from openvino.runtime import InputCutInfo, LayoutMap -from openvino.tools.ovc.cli_parser import mean_scale_value_to_str, \ - transform_param_to_str, str_list_to_str, source_target_layout_to_str, layout_param_to_str +from openvino.tools.ovc.cli_parser import str_list_to_str from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry class TestConvertingConvertArgumentsToString(UnitTestWithMockedTelemetry): - def test_mean_scale_value_to_str(self): - values = [0.5, 1.3, 0.67] - self.assertTrue(mean_scale_value_to_str(values) == "[0.5,1.3,0.67]") - - values = {"input": [0.5, 1.3, 0.67]} - self.assertTrue(mean_scale_value_to_str(values) == "input[0.5,1.3,0.67]") - - values = {"input1": [0.5, 1.3, 0.67], "input2": [4.2, 6.7, 3.15], "input3": [0.757, 4.6, 7.3]} - self.assertTrue(mean_scale_value_to_str(values) == - "input1[0.5,1.3,0.67],input2[4.2,6.7,3.15],input3[0.757,4.6,7.3]") - - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {("a", "b"): [0.5, 1.3, 0.67]}}) - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": {"name": Dimension(1)}}) - self.assertRaises(Exception, mean_scale_value_to_str, **{"value": Dimension(1)}) - - def test_transform_param_to_str(self): - transform = 'MakeStateful' - self.assertTrue(transform_param_to_str(transform) == "MakeStateful") - - transform1 = ('LowLatency2', {'use_const_initializer': False}) - self.assertTrue(transform_param_to_str(transform1) == - "LowLatency2[use_const_initializer=False]") - - transform2 = ('MakeStateful', {'param_res_names': { - 'input_name_1': 'output_name_1', 'input_name_2': 'output_name_2'}}) - self.assertTrue(transform_param_to_str(transform2) == - "MakeStateful[param_res_names={\'input_name_1\':\'output_name_1\'," - "\'input_name_2\':\'output_name_2\'}]") - - transform = [transform1, transform2] - - self.assertTrue(transform_param_to_str(transform) == "LowLatency2[use_const_initializer=False]," - "MakeStateful[param_res_names={" - "\'input_name_1\':\'output_name_1\'," - "\'input_name_2\':\'output_name_2\'}]") - - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', - {'use_const_initializer': False}, - "param")}) - self.assertRaises(Exception, transform_param_to_str, **{"value": (("a", "b"), {})}) - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', Dimension(1))}) - self.assertRaises(Exception, transform_param_to_str, **{"value": ('LowLatency2', - {('a', 'b'): False})}) - self.assertRaises(Exception, transform_param_to_str, **{"value": Dimension(1)}) - def test_str_list_to_str(self): list_str = ["data1", "data2", "data3"] self.assertTrue(str_list_to_str(list_str) == "data1,data2,data3") @@ -65,44 +18,3 @@ def test_str_list_to_str(self): self.assertRaises(Exception, str_list_to_str, **{"values": [int, 1]}) self.assertRaises(Exception, str_list_to_str, **{"values": Dimension(1)}) - - def test_source_target_layout_to_str(self): - layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"} - self.assertTrue(source_target_layout_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)") - - self.assertRaises(Exception, source_target_layout_to_str, **{"value": {"op": Dimension(1)}}) - self.assertRaises(Exception, source_target_layout_to_str, **{"value": {("a", "b"): Layout("nhwc")}}) - self.assertRaises(Exception, source_target_layout_to_str, **{"value": Dimension(1)}) - - def test_layout_param_to_str_to_str(self): - layout = {"input1": Layout("nhwc"), "input2": Layout("n??"), "input3": "nchw"} - self.assertTrue(layout_param_to_str(layout) == "input1([N,H,W,C]),input2([N,?,?]),input3(nchw)") - - layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None) - layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw")) - layout_map3 = LayoutMap(source_layout="abc", target_layout="cab") - - layout = {"input1": layout_map1, "input2": layout_map2, "input3": layout_map3, "input4": Layout("nhwc"), - "input5": "n?"} - - self.assertTrue(layout_param_to_str(layout) == "input1([N,?,?]),input2([N,H,W,C]->nchw)," - "input3(abc->cab),input4([N,H,W,C]),input5(n?)") - - self.assertRaises(Exception, layout_param_to_str, **{"value": {"op": Dimension(1)}}) - self.assertRaises(Exception, layout_param_to_str, **{"value": {("a", "b"): Layout("nhwc")}}) - self.assertRaises(Exception, layout_param_to_str, **{"value": Dimension(1)}) - - layout = ["nhwc", "[n,c]"] - self.assertTrue(layout_param_to_str(layout) == "nhwc,[n,c]") - - layout = ["abc->cab", "..nc"] - self.assertTrue(layout_param_to_str(layout) == "abc->cab,..nc") - - layout_map1 = LayoutMap(source_layout=Layout("n??"), target_layout=None) - layout = [layout_map1, "..nc"] - self.assertTrue(layout_param_to_str(layout) == "[N,?,?],..nc") - - layout_map2 = LayoutMap(source_layout=Layout("nhwc"), target_layout=("nchw")) - layout_map3 = LayoutMap(source_layout="abc", target_layout="cab") - layout = [layout_map2, layout_map3] - self.assertTrue(layout_param_to_str(layout) == "[N,H,W,C]->nchw,abc->cab") diff --git a/tools/ovc/unit_tests/ovc/utils/cli_parser_test.py b/tools/ovc/unit_tests/ovc/utils/cli_parser_test.py index 37fb7eabc47fb2..a805649c67f1e1 100644 --- a/tools/ovc/unit_tests/ovc/utils/cli_parser_test.py +++ b/tools/ovc/unit_tests/ovc/utils/cli_parser_test.py @@ -12,480 +12,40 @@ import numpy as np -from openvino.tools.ovc.cli_parser import get_placeholder_shapes, get_tuple_values, get_mean_scale_dictionary, \ - get_model_name, \ - parse_tuple_pairs, check_positive, writable_dir, readable_dirs, \ - readable_file, get_freeze_placeholder_values, parse_transform, check_available_transforms, get_layout_values, get_all_cli_parser, \ +from openvino.tools.ovc.cli_parser import input_to_input_cut_info, \ + check_positive, writable_dir, readable_dirs, \ + readable_file, get_freeze_placeholder_values, get_all_cli_parser, \ get_mo_convert_params from openvino.tools.ovc.convert_impl import pack_params_to_args_namespace from openvino.tools.ovc.error import Error from unit_tests.ovc.unit_test_with_mocked_telemetry import UnitTestWithMockedTelemetry -from openvino.runtime import PartialShape, Dimension, Layout, InputCutInfo, LayoutMap - - -class TestingMeanScaleGetter(UnitTestWithMockedTelemetry): - def test_tuple_parser(self): - tuple_values = "data(1.1,22.22,333.333),info[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - 'data': np.array([1.1, 22.22, 333.333]), - 'info': np.array([2.2, 33.33, 444.444]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_name_digits_only(self): - tuple_values = "0448(1.1,22.22,333.333),0449[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - '0448': np.array([1.1, 22.22, 333.333]), - '0449': np.array([2.2, 33.33, 444.444]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_same_values(self): - tuple_values = "data(1.1,22.22,333.333),info[1.1,22.22,333.333]" - result = parse_tuple_pairs(tuple_values) - exp_res = { - 'data': np.array([1.1, 22.22, 333.333]), - 'info': np.array([1.1, 22.22, 333.333]) - } - for el in exp_res.keys(): - assert np.array_equal(result[el], exp_res[el]) - - def test_tuple_parser_no_inputs(self): - tuple_values = "(1.1,22.22,333.333),[2.2,33.33,444.444]" - result = parse_tuple_pairs(tuple_values) - exp_res = [np.array([1.1, 22.22, 333.333]), - np.array([2.2, 33.33, 444.444])] - for i in range(0, len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_tuple_parser_error_mixed_with_and_without_name(self): - tuple_values = "(1.1,22.22,333.333),data[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_1(self): - tuple_values = "data(1.1,22.22,333.333),[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_digits(self): - tuple_values = "(0.1,22.22,333.333),0448[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_tuple_parser_error_mixed_with_and_without_name_digits_1(self): - tuple_values = "447(1.1,22.22,333.333),[2.2,33.33,444.444]" - self.assertRaises(Error, parse_tuple_pairs, tuple_values) - - def test_mean_scale_no_input(self): - mean_values = "data(1.1,22.22,333.333)" - scale_values = "info[1.1,22.22,333.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - }, - 'info': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_scale_no_input_diff_len(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,333.333)" - scale_values = "info[1.1,22.22,333.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - }, - 'info': { - 'mean': np.array([2.1, 33.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_only_input(self): - mean_values = "data(1.1,22.22,333.333)" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(''), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_scale_only_input(self): - scale_values = "data(1.1,22.22,333.333)" - result = get_mean_scale_dictionary(parse_tuple_pairs(''), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_scale_only_no_input(self): - scale_values = "(1.1,22.22,333.333)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, None) - exp_res = [ - [ - None, - np.array([1.1, 22.22, 333.333]) - ] - ] - for i in range(len(exp_res)): - for j in range(len(exp_res[i])): - if type(exp_res[i][j]) is np.ndarray: - assert np.array_equal(exp_res[i][j], result[i][j]) - else: - self.assertEqual(exp_res[i][j], result[i][j]) - - def test_scale_only_with_input(self): - scale_values = "(1.1,22.22,333.333)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data') - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_2_scale_only_with_input(self): - scale_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)" - mean_values = "" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data,info') - exp_res = { - 'data': { - 'mean': None, - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': None, - 'scale': np.array([1.2, 22.33, 333.444]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_2_mean_only_with_input(self): - scale_values = "" - mean_values = "(1.1,22.22,333.333),(1.2,22.33,333.444)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data,info') - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None, - }, - 'info': { - 'mean': np.array([1.2, 22.33, 333.444]), - 'scale': None, - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_only_with_input(self): - scale_values = "" - mean_values = "(1.1,22.22,333.333)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, 'data') - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': None - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_mean_scale_diff_no_input(self): - scale_values = "(1.1,22.22,333.333),(1.1,22.22,333.333)" - mean_values = "(2.1,11.22,444.333)" - mean = parse_tuple_pairs(mean_values) - scale = parse_tuple_pairs(scale_values) - result = get_mean_scale_dictionary(mean, scale, None) - exp_res = [ - [ - np.array([2.1, 11.22, 444.333]), # mean - np.array([1.1, 22.22, 333.333]) # scale - ], - [ - None, # mean - np.array([1.1, 22.22, 333.333]) # scale - ] - ] - for i in range(len(exp_res)): - for j in range(len(exp_res[i])): - if type(exp_res[i][j]) is np.ndarray: - assert np.array_equal(exp_res[i][j], result[i][j]) - else: - self.assertEqual(exp_res[i][j], result[i][j]) - - def test_multi_mean_scale_no_input(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)" - scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_input(self): - mean_values = "data(1.1,22.22,333.333),info(2.1,33.22,444.333)" - scale_values = "data[1.1,22.22,333.333],info[2.1,33.22,444.333]" - input_names = 'data,info' - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_input_arrays(self): - mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)" - scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]" - input_names = 'data,info' - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), input_names) - exp_res = { - 'data': { - 'mean': np.array([1.1, 22.22, 333.333]), - 'scale': np.array([1.1, 22.22, 333.333]) - }, - 'info': { - 'mean': np.array([2.1, 33.22, 444.333]), - 'scale': np.array([2.1, 33.22, 444.333]) - } - } - for input in exp_res.keys(): - for key in exp_res[input].keys(): - if type(exp_res[input][key]) is np.ndarray: - assert np.array_equal(exp_res[input][key], result[input][key]) - else: - self.assertEqual(exp_res[input][key], result[input][key]) - - def test_multi_mean_scale_arrays_no_input(self): - mean_values = "(1.1,22.22,333.333),(2.1,33.22,444.333)" - scale_values = "[1.1,22.22,333.333],[2.1,33.22,444.333]" - result = get_mean_scale_dictionary(parse_tuple_pairs(mean_values), parse_tuple_pairs(scale_values), None) - exp_res = [ - [ - np.array([1.1, 22.22, 333.333]), # mean - np.array([1.1, 22.22, 333.333]) # scale - ], - [ - np.array([2.1, 33.22, 444.333]), # mean - np.array([2.1, 33.22, 444.333]) # scale - ] - ] - for i in range(0, len(exp_res)): - for j in range(0, len(exp_res[i])): - assert np.array_equal(exp_res[i][j], result[i][j]) - - def test_scale_do_not_match_input(self): - scale_values = parse_tuple_pairs("input_not_present(255),input2(255)") - mean_values = parse_tuple_pairs("input1(255),input2(255)") - self.assertRaises(Error, get_mean_scale_dictionary, mean_values, scale_values, "input1,input2") - - def test_mean_do_not_match_input(self): - scale_values = parse_tuple_pairs("input1(255),input2(255)") - mean_values = parse_tuple_pairs("input_not_present(255),input2(255)") - self.assertRaises(Error, get_mean_scale_dictionary, mean_values, scale_values, "input1,input2") - - def test_values_match_input_name(self): - # to be sure that we correctly processes complex names - res_values = parse_tuple_pairs("input255(255),input255.0(255.0),multi-dotted.input.3.(255,128,64)") - exp_res = {'input255': np.array([255.0]), - 'input255.0': np.array([255.0]), - 'multi-dotted.input.3.': np.array([255., 128., 64.])} - self.assertEqual(len(exp_res), len(res_values)) - for i, j in zip(exp_res, res_values): - self.assertEqual(i, j) - assert np.array_equal(exp_res[i], res_values[j]) - - def test_input_without_values(self): - self.assertRaises(Error, parse_tuple_pairs, "input1,input2") - - -class TestSingleTupleParsing(UnitTestWithMockedTelemetry): - def test_get_values_ideal(self): - values = "(1.11, 22.22, 333.333)" - result = get_tuple_values(values) - exp_res = ['1.11, 22.22, 333.333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_spaces(self): - values = "(1 , 22 ,333)" - result = get_tuple_values(values) - exp_res = ['1 , 22 ,333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_square(self): - values = "[1,22,333]" - result = get_tuple_values(values) - exp_res = ['1,22,333'] - self.assertEqual(exp_res, result) - - def test_get_values_ideal_square_spaces(self): - values = "[1 , 22 ,333]" - result = get_tuple_values(values) - exp_res = ['1 , 22 ,333'] - self.assertEqual(exp_res, result) - - def test_get_neg_values_ideal(self): - values = "(-1,-22,-333)" - result = get_tuple_values(values) - exp_res = ['-1,-22,-333'] - self.assertEqual(exp_res, result) - - def test_get_neg_values_minus(self): - values = "(-1,--22,-3-33)" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_unbalanced(self): - values = "(1,22,333]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_unbalanced2(self): - values = "[1,22,333)" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_exactly_3(self): - values = "[1,22,333,22]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_exactly_3_1(self): - values = "[1,22]" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_empty(self): - values = "" - self.assertRaises(Error, get_tuple_values, values) - - def test_get_values_empty_tuple(self): - values = () - result = get_tuple_values(values) - exp_res = () - self.assertEqual(exp_res, result) +from openvino.runtime import PartialShape, Dimension, Layout +from openvino.tools.ovc import InputCutInfo class TestShapesParsing(UnitTestWithMockedTelemetry): - def test_get_shapes_several_inputs_several_shapes(self): - argv_input = "inp1,inp2" - input_shapes = "(1,22,333,123), (-1,45,7,1)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([1, 22, 333, 123]), 'inp2': np.array([-1, 45, 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - def test_get_shapes_several_inputs_several_shapes2(self): # shapes specified using --input command line parameter and no values - argv_input = "inp1[1 22 333 123],inp2[-1 45 7 1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([1, 22, 333, 123]), 'inp2': np.array([-1, 45, 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[1,22,333,123],inp2[-1,45,7,1]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([1,22,333,123])), + InputCutInfo(name='inp2', shape=PartialShape([-1,45,7,1]))] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {} - input_node_names_ref = "inp1,inp2" self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_and_freezing_with_scalar_and_without_shapes_in_input(self): # shapes and value for freezing specified using --input command line parameter argv_input = "inp1,inp2->157" - input_list, result_shapes, _ = get_placeholder_shapes(argv_input, None) - ref_shapes = {'inp1': None, 'inp2': None} - self.assertEqual(list(ref_shapes.keys()), list(result_shapes.keys())) - self.assertEqual(input_list, ["inp1","inp2"]) - for i in ref_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_shapes[i]) - - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1'), + InputCutInfo(name='inp2', value=157)] + self.assertEqual(inputs, inputs_ref) + + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp2': 157} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) @@ -495,106 +55,51 @@ def test_get_shapes_and_freezing_with_scalar_and_without_shapes_in_input(self): def test_get_shapes_and_freezing_with_scalar(self): # shapes and value for freezing specified using --input command line parameter argv_input = "inp1,inp2[]->157" - input_list, result_shapes, _ = get_placeholder_shapes(argv_input, None) - ref_shapes = {'inp1': None, 'inp2': ()} - self.assertEqual(list(ref_shapes.keys()), list(result_shapes.keys())) - for i in ref_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_shapes[i]) - self.assertEqual(input_list, ["inp1","inp2"]) - - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp2': 157} - - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - for i in placeholder_values_ref.keys(): - self.assertEqual(placeholder_values_res[i], placeholder_values_ref[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1'), + InputCutInfo(name='inp2', shape=PartialShape([]), value=157)] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_several_inputs_several_shapes3(self): # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - input_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3,2,3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3])), + InputCutInfo(name='inp3', shape=PartialShape([5]), value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_shapes3_comma_sep(self): # shapes and value for freezing specified using --input command line parameter argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0, 1.0, 2.0, 3.0,5.0]" - input_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_shapes4(self): - # shapes specified using --input_shape and values for freezing using --input command line parameter - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3])), + InputCutInfo(name='inp3', shape=PartialShape([5]), value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_shapes5(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - argv_freeze_placeholder_with_value = "inp2->[5.0 7.0 3.0],inp4->[100.0 200.0]" - - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, - argv_freeze_placeholder_with_value) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), - 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'], ), - 'inp2': np.array(['5.0', '7.0', '3.0']), 'inp4': np.array(['100.0', '200.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(sorted(list(placeholder_values_res.keys())), sorted(list(placeholder_values_ref.keys()))) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) def test_get_shapes_several_inputs_several_shapes6(self): # 0D value for freezing specified using --input command line parameter without shape - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([3, 2, 3]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3,2,3],inp3->False" + inputs_list, result, _ = input_to_input_cut_info(argv_input) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3])), + InputCutInfo(name='inp3', value=False)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) for i in placeholder_values_ref.keys(): @@ -602,14 +107,14 @@ def test_get_shapes_several_inputs_several_shapes6(self): def test_get_shapes_several_inputs_several_shapes7(self): # 0D shape and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3,2,3],inp3[]->True" + inputs_list, result, _ = input_to_input_cut_info(argv_input) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3])), + InputCutInfo(name='inp3', shape=PartialShape([]), value=True)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) for i in placeholder_values_ref.keys(): @@ -617,590 +122,284 @@ def test_get_shapes_several_inputs_several_shapes7(self): def test_get_shapes_and_data_types1(self): argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1","inp2","inp3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3]), type=np.int32), + InputCutInfo(name='inp3', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_with_input_ports(self): argv_input = "1:inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},0:inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'1:inp1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), '0:inp3': np.array([5])} - ref_result_data_types = {'inp2': np.int32, '0:inp3': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["1:inp1","inp2","0:inp3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='1:inp1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3]), type=np.int32), + InputCutInfo(name='0:inp3', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_with_output_ports(self): argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3:4': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3]), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_with_output_ports_comma_sep(self): argv_input = "inp1:1[3,1]->[1.0,2.0 ,3.0],inp2[3,2, 3]{i32},inp3:4[5]{f32}->[1.0, 1.0,2.0, 3.0,5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': np.array([3, 1]), 'inp2': np.array([3, 2, 3]), 'inp3:4': np.array([5])} - ref_result_data_types = {'inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2', shape=PartialShape([3,2,3]), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_shape_only(self): argv_input = "placeholder1[3 1],placeholder2,placeholder3" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1': np.array([3, 1]), 'placeholder2': None, - 'placeholder3': None} - ref_result_data_types = {} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1","placeholder2","placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='placeholder1', shape=PartialShape([3,1])), + InputCutInfo(name='placeholder2'), + InputCutInfo(name='placeholder3')] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_shape_with_ports_only(self): argv_input = "placeholder1:4[3 1],placeholder2,2:placeholder3" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1:4': np.array([3, 1]), 'placeholder2': None, - '2:placeholder3': None} - ref_result_data_types = {} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1:4","placeholder2","2:placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='placeholder1:4', shape=PartialShape([3,1])), + InputCutInfo(name='placeholder2'), + InputCutInfo(name='2:placeholder3')] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_when_no_freeze_value(self): argv_input = "placeholder1{i32}[3 1],placeholder2,placeholder3{i32}" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'placeholder1': np.array([3, 1]), 'placeholder2': None, - 'placeholder3': None} - ref_result_data_types = {'placeholder1': np.int32, 'placeholder3': np.int32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["placeholder1","placeholder2","placeholder3"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='placeholder1', shape=PartialShape([3,1]), type=np.int32), + InputCutInfo(name='placeholder2'), + InputCutInfo(name='placeholder3', type=np.int32)] + self.assertEqual(inputs, inputs_ref) def test_wrong_data_types(self): argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3]{abracadabra},inp3[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_shapes_specified_using_both_params(self): - # shapes specified using both command line parameter --input and --input_shape - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3,2,3), (5)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_shape_and_value_shape_mismatch(self): # size of value tensor does not correspond to specified shape for the third node argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3 2 3],inp3[5 3]->[2.0 3.0 5.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, None) + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_wrong_data_for_input_cmd_param(self): # test that wrongly formatted data specified in --input is handled properly argv_input = "abc->[1.0" - self.assertRaises(Error, get_freeze_placeholder_values, argv_input, None) + self.assertRaises(Error, get_freeze_placeholder_values, argv_input) argv_input = "def[2 2]->[1.0 2.0 3.0 4.0],abc->1.0 34]" - self.assertRaises(Error, get_freeze_placeholder_values, argv_input, None) - - def test_get_shapes_several_inputs_several_shapes_not_equal(self): - argv_input = "inp1,inp2,inp3" - input_shapes = "(1,22,333,123), (-1,45,7,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_several_shapes_one_input(self): - argv_input = "inp1" - input_shapes = "(1,22,333,123), (-1,45,7,1), (-1,456,7,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_one_shape(self): - argv_input = "inp1" - input_shapes = "(1,22,333,123)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([1, 22, 333, 123])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) + self.assertRaises(Error, get_freeze_placeholder_values, argv_input) def test_get_shapes_no_input_no_shape(self): argv_input = "" - input_shapes = "" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = None - assert np.array_equal(result, exp_res) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [] + self.assertEqual(inputs, inputs_ref) - def test_get_shapes_no_input_one_shape(self): - argv_input = "" - input_shapes = "(12,4,1)" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = np.array([12, 4, 1]) - assert np.array_equal(result, exp_res) def test_get_shapes_no_input_one_shape2(self): - argv_input = "" - input_shapes = "[12,4,1]" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = np.array([12, 4, 1]) - assert np.array_equal(result, exp_res) + argv_input = "[12,4,1]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(shape=PartialShape([12,4,1]))] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_for_scalar_inputs(self): - argv_input = "" - input_shapes = "[]" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - ref_result = np.array([]) - assert np.array_equal(result, ref_result) + argv_input = "[]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(shape=PartialShape([]))] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_two_input_shapes_with_scalar(self): - argv_input = "" - input_shapes = "[12,4,1],[]" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - ref_result = [np.array([12, 4, 1]), np.array([])] - for shape, ref_shape in zip(result, ref_result): - assert np.array_equal(shape, ref_shape) + argv_input = "[12,4,1],[]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(shape=PartialShape([12,4,1])), + InputCutInfo(shape=PartialShape([]))] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_two_input_shapes(self): - argv_input = "" - input_shapes = "[12,4,1],[10]" - _, result, _ = get_placeholder_shapes(argv_input, input_shapes) - ref_result = [np.array([12, 4, 1]), np.array([10])] - for shape, ref_shape in zip(result, ref_result): - assert np.array_equal(shape, ref_shape) - - def test_get_shapes_two_named_input_shapes_with_scalar(self): - argv_input = "inp1,inp2" - input_shapes = "[12,4,1],[]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - - exp_res = {'inp1': np.array([12, 4, 1]), 'inp2': np.array([])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) + argv_input = "[12,4,1],[10]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(shape=PartialShape([12,4,1])), + InputCutInfo(shape=PartialShape([10])),] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_one_input_no_shape(self): argv_input = "inp1" - input_shapes = "" - input_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(input_list, ["inp1"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_one_input_wrong_shape8(self): - argv_input = "inp1" - input_shapes = "[2,4,1)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape9(self): - argv_input = "inp1" - input_shapes = "(2,4,1]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape10(self): - argv_input = "inp1" - input_shapes = "(2,,,4,1]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape2(self): - argv_input = "inp1" - input_shapes = "(2,4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape3(self): - argv_input = "inp1" - input_shapes = "2,4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape4(self): - argv_input = "inp1" - input_shapes = "2;4;1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape5(self): - argv_input = "inp1" - input_shapes = "2, 4,1" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape6(self): - argv_input = "inp1" - input_shapes = "(2, 4,1),[4,6,8]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_wrong_shape7(self): - argv_input = "inp1" - input_shapes = "[2,4,1],(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_several_shapes(self): - argv_input = "inp1" - input_shapes = "(2,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_first_neg_shape1(self): - argv_input = "inp1,inp2" - input_shapes = "(-1,4,1),(4,6,8)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': np.array([-1, 4, 1]), 'inp2': np.array([4, 6, 8])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_shapes_one_input_first_neg_shape_not_one(self): - argv_input = "inp1" - input_shapes = "(-12,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_long_dimension_with_invalid_character(self): - # test for regular expression denial of service - argv_input = "inp1,inp2" - input_shapes = "(222222222222222222222222222222222222222222!,4,1),(4,6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_one_input_any_neg_shape(self): - argv_input = "inp1, inp2" - input_shapes = "(12,4,1),(4,-6,8)" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) - - def test_get_shapes_several_inputs_several_partial_shapes(self): - argv_input = "inp1,inp2" - input_shapes = "(1,..22,1..100,?), (-1,45..,7,1)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': PartialShape([1, Dimension(0, 22), Dimension(1, 100), -1]), 'inp2': PartialShape([-1, Dimension(45, -1), 7, 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1')] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_several_inputs_several_partial_shapes2(self): # shapes specified using --input command line parameter and no values - argv_input = "inp1[1 ? 50..100 123],inp2[-1 45.. ..7 1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([1, -1, (50, 100), 123]), 'inp2': PartialShape([-1, Dimension(45,-1), Dimension(0, 7), 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[1,?,50..100,123],inp2[-1,45..,..7,1]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape("[1,?,50..100,123]")), + InputCutInfo(name='inp2', shape=PartialShape("[-1,45..,..7,1]"))] + self.assertEqual(inputs, inputs_ref) + + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {} - input_node_names_ref = "inp1,inp2" self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_partial_shapes3(self): # shapes and value for freezing specified using --input command line parameter - argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': (3, 1), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3': (5,)} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - - def test_get_shapes_several_inputs_several_partial_shapes4(self): - # shapes specified using --input_shape and values for freezing using --input command line parameter - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3..,..2,5..10,?,-1), (5)" - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': (3, 1), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3': (5,)} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3..,..2,5..10,?,-1],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', shape=PartialShape([5]), value=["1.0", "1.0", "2.0", "3.0", "5.0"])] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} input_node_names_ref = "inp1,inp2,inp3" self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) - for i in placeholder_values_ref.keys(): - assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) - - def test_get_shapes_several_inputs_several_partial_shapes5(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0],inp2,inp3->[1.0 1.0 2.0 3.0 5.0]" - input_shapes = "(3,1), (3..,..2,5..10,?,-1), (5)" - argv_freeze_placeholder_with_value = "inp2->[5.0 7.0 3.0],inp4->[100.0 200.0]" - - inputs_list, result, _ = get_placeholder_shapes(argv_input, input_shapes) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': PartialShape([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, argv_freeze_placeholder_with_value) - placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'],), - 'inp2': np.array(['5.0', '7.0', '3.0']), 'inp4': np.array(['100.0', '200.0'])} - input_node_names_ref = "inp1,inp2,inp3" - self.assertEqual(sorted(list(placeholder_values_res.keys())), sorted(list(placeholder_values_ref.keys()))) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) - self.assertEqual(input_node_names_ref, input_node_names_res) def test_get_shapes_several_inputs_several_partial_shapes6(self): # 0D value for freezing specified using --input command line parameter without shape argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', value=False)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_partial_shapes7(self): # 0D shape and value for freezing specified using --input command line parameter argv_input = "inp1[3 1]->[1.0 2.0 3.0],inp2[3.. ..2 5..10 ? -1],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', shape=PartialShape([]), value=True)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1","inp2","inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_and_data_types_partial_shape_with_input_port(self): argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],0:inp2[3.. ..2 5..10 ? -1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), '0:inp2': PartialShape([Dimension(3, -1), Dimension(-1, 2), Dimension(5, 10), -1, -1]), 'inp3:4': np.array([5])} - ref_result_data_types = {'0:inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","0:inp2","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='0:inp2', shape=PartialShape("[3..,..2,5..10,?,-1]"), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_partial_shape_with_output_port(self): argv_input = "inp1:1[3 1]->[1.0 2.0 3.0],inp2:3[3.. ..2 5..10 ? -1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), 'inp2:3': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), 'inp3:4': PartialShape([5])} - ref_result_data_types = {'inp2:3': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1","inp2:3","inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) - - def test_partial_shapes_negative_case(self): - argv_input = "inp1" - input_shapes = "[6754fg..23ed]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2:3', shape=PartialShape("[3..,..2,5..10,?,-1]"), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_partial_shapes_freeze_dynamic_negative_case1(self): argv_input = "inp1:1[3 1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_partial_shapes_freeze_dynamic_negative_case2(self): argv_input = "inp1:1[1 2 -1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - def test_partial_shapes_freeze_dynamic_negative_case3(self): - # some values for freezing specified using --freeze_placeholder_with_value - argv_input = "inp1->[1.0 2.0 3.0]" - input_shapes = "[3,1..10]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, input_shapes) + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_get_shapes_several_inputs_several_partial_shapes2_comma_separator(self): # shapes specified using --input command line parameter and no values argv_input = "inp1[1,?,50..100,123],inp2[-1,45..,..7,1]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([1, -1, (50, 100), 123]), - 'inp2': PartialShape([-1, Dimension(45, -1), Dimension(0, 7), 1])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape("[1,?,50..100,123]")), + InputCutInfo(name='inp2', shape=PartialShape("[-1,45..,..7,1]"))] + self.assertEqual(inputs, inputs_ref) + + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_partial_shapes3_comma_separator(self): # shapes and value for freezing specified using --input command line parameter argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3..,..2,5..10,?,-1],inp3[5]->[1.0 1.0 2.0 3.0 5.0]" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), 'inp2': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), - 'inp3': PartialShape([5])} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', shape=PartialShape([5]), value=["1.0", "1.0", "2.0", "3.0", "5.0"])] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': np.array(['1.0', '1.0', '2.0', '3.0', '5.0'])} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_partial_shapes6_comma_separator(self): # 0D value for freezing specified using --input command line parameter without shape argv_input = "inp1[3, 1]->[1.0 2.0 3.0],inp2[3.., ..2, 5..10, ?,-1],inp3->False" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), - 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), 'inp3': None} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', value=False)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': False} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_several_inputs_several_partial_shapes7_comma_separator(self): # 0D shape and value for freezing specified using --input command line parameter argv_input = "inp1[3,1]->[1.0 2.0 3.0],inp2[3.., ..2,5..10, ?,-1],inp3[]->True" - inputs_list, result, _ = get_placeholder_shapes(argv_input, None) - exp_res = {'inp1': PartialShape([3, 1]), - 'inp2': PartialShape([(3, np.iinfo(np.int64).max), (0, 2), (5, 10), -1, -1]), - 'inp3': np.array(False).shape} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input, None) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1', shape=PartialShape([3,1]), value=["1.0", "2.0", "3.0"]), + InputCutInfo(name='inp2', shape=PartialShape("[3..,..2,5..10,?,-1]")), + InputCutInfo(name='inp3', shape=PartialShape([]), value=True)] + self.assertEqual(inputs, inputs_ref) + placeholder_values_res, input_node_names_res = get_freeze_placeholder_values(argv_input) placeholder_values_ref = {'inp1': np.array(['1.0', '2.0', '3.0']), 'inp3': True} self.assertEqual(list(placeholder_values_res.keys()), list(placeholder_values_ref.keys())) - self.assertEqual(inputs_list, ["inp1", "inp2", "inp3"]) for i in placeholder_values_ref.keys(): assert np.array_equal(placeholder_values_res[i], placeholder_values_ref[i]) def test_get_shapes_and_data_types_partial_shape_with_input_port_comma_separator(self): argv_input = "inp1:1[3,1]->[1.0 2.0 3.0],0:inp2[ 3.. ,..2, 5..10, ?,-1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), - '0:inp2': PartialShape([Dimension(3, -1), Dimension(-1, 2), Dimension(5, 10), -1, -1]), - 'inp3:4': np.array([5])} - ref_result_data_types = {'0:inp2': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1", "0:inp2", "inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='0:inp2', shape=PartialShape("[3..,..2,5..10,?,-1]"), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_get_shapes_and_data_types_partial_shape_with_output_port_comma_separator(self): argv_input = "inp1:1[3,1]->[1.0 2.0 3.0],inp2:3[3..,..2,5..10,?,-1]{i32},inp3:4[5]{f32}->[1.0 1.0 2.0 3.0 5.0]" - input_list, result_shapes, result_data_types = get_placeholder_shapes(argv_input, "") - ref_result_shapes = {'inp1:1': PartialShape([3, 1]), - 'inp2:3': PartialShape([Dimension(3, -1), Dimension(0, 2), Dimension(5, 10), -1, -1]), - 'inp3:4': PartialShape([5])} - ref_result_data_types = {'inp2:3': np.int32, 'inp3:4': np.float32} - self.assertEqual(list(ref_result_shapes.keys()), list(result_shapes.keys())) - for i in ref_result_shapes.keys(): - assert np.array_equal(result_shapes[i], ref_result_shapes[i]) - self.assertEqual(list(ref_result_data_types.keys()), list(result_data_types.keys())) - self.assertEqual(input_list, ["inp1:1", "inp2:3", "inp3:4"]) - for i in ref_result_data_types.keys(): - np.testing.assert_equal(result_data_types[i], ref_result_data_types[i]) + inputs = input_to_input_cut_info(argv_input) + inputs_ref = [InputCutInfo(name='inp1:1', shape=PartialShape([3,1]), value=['1.0', '2.0', '3.0']), + InputCutInfo(name='inp2:3', shape=PartialShape("[3..,..2,5..10,?,-1]"), type=np.int32), + InputCutInfo(name='inp3:4', shape=PartialShape([5]), type=np.float32, value=['1.0', '1.0', '2.0', '3.0', '5.0'])] + self.assertEqual(inputs, inputs_ref) def test_partial_shapes_freeze_dynamic_negative_case1_comma_separator(self): argv_input = "inp1:1[3,1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_partial_shapes_freeze_dynamic_negative_case2_comma_separator(self): argv_input = "inp1:1[1,2,-1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_partial_shapes_freeze_dynamic_negative_case3_comma_separator(self): argv_input = "inp1:1[3,1..10]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") + self.assertRaises(Error, input_to_input_cut_info, argv_input) def test_partial_shapes_freeze_dynamic_negative_case4_comma_separator(self): argv_input = "inp1:1[1, 2, -1]->[1.0 2.0 3.0]" - self.assertRaises(Error, get_placeholder_shapes, argv_input, "") - - -class TestModelNameParsing(unittest.TestCase): - def test_model_name_ideal(self): - model_name = '/home/models/mymodel.caffemodel' - res = get_model_name(model_name) - exp_res = 'mymodel' - self.assertEqual(exp_res, res) - - def test_model_name_no_name(self): - model_name = '/home/models/.caffemodel' - res = get_model_name(model_name) - exp_res = 'model' - self.assertEqual(exp_res, res) - - def test_model_name_no_ext(self): - model_name = '/home/models/caffemodel' - res = get_model_name(model_name) - exp_res = 'caffemodel' - self.assertEqual(exp_res, res) - - def test_model_name_no_name_no_path(self): - model_name = '.caffemodel' - res = get_model_name(model_name) - exp_res = 'model' - self.assertEqual(exp_res, res) - - @patch("openvino.tools.ovc.cli_parser.os") - def test_model_name_win(self, old_os): - old_os.path.basename.return_value = "caffemodel" - old_os.path.splitext.return_value = ("caffemodel", "") - model_name = r'\home\models\caffemodel' - res = get_model_name(model_name) - - exp_res = 'caffemodel' - self.assertEqual(exp_res, res) - - def test_model_name_dots(self): - model_name = r'/home/models/squeezenet_v1.1.caffemodel' - res = get_model_name(model_name) - exp_res = 'squeezenet_v1.1' - self.assertEqual(exp_res, res) + self.assertRaises(Error, input_to_input_cut_info, argv_input) class PositiveChecker(unittest.TestCase): @@ -1289,761 +488,21 @@ def test_non_readable_file(self): readable_file(__class__.NOT_EXISTING_FILE) -class TransformChecker(unittest.TestCase): - def test_empty(self): - self.assertEqual(parse_transform(""), []) - - def test_single_pass(self): - self.assertEqual(parse_transform("LowLatency2"), [("LowLatency2", {})]) - - def test_single_pass_with_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True]"), - [("LowLatency2", {"use_const_initializer": True})]) - - def test_single_pass_with_multiple_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True;dummy_attr=3.14]"), - [("LowLatency2", {"use_const_initializer": True, "dummy_attr": 3.14})]) - - def test_multiple_passes_with_args(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True],DummyPass[type=ReLU]"), - [("LowLatency2", {"use_const_initializer": True}), - ("DummyPass", {"type": "ReLU"})]) - - def test_multiple_passes_with_args2(self): - self.assertEqual(parse_transform("LowLatency2[use_const_initializer=True,False],DummyPass1," - "DummyPass2[types=ReLU,PReLU;values=1,2,3]"), - [("LowLatency2", {"use_const_initializer": [True, False]}), - ("DummyPass1", {}), - ("DummyPass2", {"types": ["ReLU", "PReLU"], "values": [1, 2, 3]})]) - - def test_multiple_passes_no_args(self): - self.assertEqual(parse_transform("DummyPass,LowLatency22"), - [("DummyPass", {}), ("LowLatency22", {})]) - - def test_single_pass_neg(self): - self.assertRaises(Error, parse_transform, "LowLatency2!") - - def test_multiple_passes_neg(self): - self.assertRaises(Error, parse_transform, "LowLatency2;DummyPass") - - def test_single_pass_with_args_neg1(self): - self.assertRaises(Error, parse_transform, "LowLatency2[=2]") - - def test_single_pass_with_args_neg2(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=]") - - def test_single_pass_with_args_neg3(self): - self.assertRaises(Error, parse_transform, "LowLatency2[]") - - def test_single_pass_with_args_neg4(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=value;]") - - def test_single_pass_with_args_neg5(self): - self.assertRaises(Error, parse_transform, "LowLatency2[value]") - - def test_single_pass_with_args_neg6(self): - self.assertRaises(Error, parse_transform, "LowLatency2[key=value") - - @patch("openvino.tools.ovc.moc_frontend.offline_transformations.get_available_transformations") - def test_check_low_latency_is_available(self, available_transformations): - available_transformations.return_value = {"LowLatency2": None} - try: - check_available_transforms([("LowLatency2", "")]) - except Error as e: - self.assertTrue(False, "Exception \"{}\" is unexpected".format(e)) - - @patch("openvino.tools.ovc.moc_frontend.offline_transformations.get_available_transformations") - def test_check_dummy_pass_is_available(self, available_transformations): - available_transformations.return_value = {"LowLatency2": None} - self.assertRaises(Error, check_available_transforms, [("DummyPass", "")]) - - -class TestLayoutParsing(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "name1([n,h,w,c]),name2([n,h,w,c]->[n,c,h,w])" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': None}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "name1(nhwc),name2(nhwc->nchw)" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "name1(n...c),name2(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'n...c', 'target_layout': None}, - 'name2': {'source_layout': 'n...c', 'target_layout': 'nc...'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_4(self): - argv_layout = "nhwc" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_5(self): - argv_layout = "[n,h,w,c]" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_6(self): - argv_layout = "nhwc->nchw" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_7(self): - argv_layout = "[n,h,w,c]->[n,c,h,w]" - result = get_layout_values(argv_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_8(self): - argv_layout = "name1-0(n...c),name2-0(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = {'name1-0': {'source_layout': 'n...c', 'target_layout': None}, - 'name2-0': {'source_layout': 'n...c', 'target_layout': 'nc...'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "name1(nhwc),name2([])" - result = get_layout_values(argv_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_1(self): - argv_source_layout = "[n,h,w,c]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_2(self): - argv_source_layout = "nhwc" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "name1(nhwc),name2(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': 'nchw', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "name1([n,h,w,c]),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "name1(nhwc),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "name1(nhwc),name2[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[n,c,h,w]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "name1(nhwc),name2([])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': None}, - 'name2': {'source_layout': '[]', 'target_layout': None}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_1(self): - argv_target_layout = "[n,h,w,c]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': None, 'target_layout': '[n,h,w,c]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_2(self): - argv_target_layout = "nhwc" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': None, 'target_layout': 'nhwc'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "name1(nhwc),name2(nchw)" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "name1([n,h,w,c]),name2([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': '[n,h,w,c]'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "name1(nhwc),name2([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "name1(nhwc),name2[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "name1(nhwc),name2[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': None, 'target_layout': 'nhwc'}, - 'name2': {'source_layout': None, 'target_layout': '[]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_1(self): - argv_source_layout = "[n,h,w,c]" - argv_target_layout = "[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_2(self): - argv_source_layout = "nhwc" - argv_target_layout = "nchw" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "name1(nhwc),name2(nhwc)" - argv_target_layout = "name1(nchw),name2(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': 'nhwc', 'target_layout': 'nchw'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "name1([n,h,w,c]),name2([n,h,w,c])" - argv_target_layout = "name1([n,c,h,w]),name2([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "name1(nhwc),name2[n,h,w,c]" - argv_target_layout = "name1(nchw),name2[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_6(self): - argv_source_layout = "name1.0:a/b(nhwc),name2\\d\\[n,h,w,c]" - argv_target_layout = "name1.0:a/b(nchw),name2\\d\\[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1.0:a/b': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2\\d\\': {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_7(self): - argv_source_layout = "name1-0[n,h,w,c],name2-1(?c??)" - argv_target_layout = "name1-0(nchw),name2-1[?,?,?,c]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1-0': {'source_layout': '[n,h,w,c]', 'target_layout': 'nchw'}, - 'name2-1': {'source_layout': '?c??', 'target_layout': '[?,?,?,c]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "name1(nhwc),name2[]" - argv_target_layout = "name1(nchw),name2[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = {'name1': {'source_layout': 'nhwc', 'target_layout': 'nchw'}, - 'name2': {'source_layout': '[]', 'target_layout': '[]'}} - self.assertEqual(list(exp_res.keys()), list(result.keys())) - for i in exp_res.keys(): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_raises_if_layout_and_source_layout_provided(self): - argv_layout = "nhwc" - argv_source_layout = "nhwc" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_source_layout=argv_source_layout) - - def test_get_layout_raises_if_layout_and_target_layout_provided(self): - argv_layout = "nhwc->nchw" - argv_target_layout = "nchw" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_target_layout=argv_target_layout) - - def test_get_layout_raises_if_layout_with_source_and_target_layout_provided(self): - argv_layout = "nhwc->nchw" - argv_source_layout = "nhwc" - argv_target_layout = "nchw" - with self.assertRaises(Error): - get_layout_values(argv_layout=argv_layout, argv_source_layout=argv_source_layout, - argv_target_layout=argv_target_layout) - - def test_get_layout_raises_incorrect_format(self): - argv_layout = "name[n,h,w,c]->nchw" - with self.assertRaises(Error): - res = get_layout_values(argv_layout=argv_layout) - print(res) - - -class TestLayoutParsingEmptyNames(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "([n,h,w,c]),([n,h,w,c]->[n,c,h,w])" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "(nhwc),(nhwc->nchw)" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "(n...c),(n...c->nc...)" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'n...c', 'target_layout': None}, - {'source_layout': 'n...c', 'target_layout': 'nc...'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "(nhwc),([])" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "(nhwc),(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nchw', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "([n,h,w,c]),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "(nhwc),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "(nhwc),[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "(nhwc),([])" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "(nhwc),(nchw)" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "([n,h,w,c]),([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': '[n,h,w,c]'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "(nhwc),([n,c,h,w])" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "(nhwc),[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "(nhwc),[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "(nhwc),(nhwc)" - argv_target_layout = "(nchw),(nchw)" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "([n,h,w,c]),([n,h,w,c])" - argv_target_layout = "([n,c,h,w]),([n,c,h,w])" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "(nhwc),[n,h,w,c]" - argv_target_layout = "(nchw),[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "(nhwc),[]" - argv_target_layout = "(nchw),[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[]', 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - -class TestLayoutParsingEmptyNamesNoBrackets(unittest.TestCase): - def test_get_layout_1(self): - argv_layout = "[n,h,w,c],[n,h,w,c]->[n,c,h,w]" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_2(self): - argv_layout = "nhwc,nhwc->nchw" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_3(self): - argv_layout = "n...c,n...c->nc..." - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'n...c', 'target_layout': None}, - {'source_layout': 'n...c', 'target_layout': 'nc...'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_scalar(self): - argv_layout = "nhwc,[]" - result = get_layout_values(argv_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_3(self): - argv_source_layout = "nhwc,nchw" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': 'nchw', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_4(self): - argv_source_layout = "[n,h,w,c],[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_5(self): - argv_source_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_6(self): - argv_source_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[n,c,h,w]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_layout_scalar(self): - argv_source_layout = "nhwc,[]" - result = get_layout_values(argv_source_layout=argv_source_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': None}, - {'source_layout': '[]', 'target_layout': None}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_3(self): - argv_target_layout = "nhwc,nchw" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_4(self): - argv_target_layout = "[n,h,w,c],[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': '[n,h,w,c]'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_5(self): - argv_target_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_6(self): - argv_target_layout = "nhwc,[n,c,h,w]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_target_layout_scalar(self): - argv_target_layout = "nhwc,[]" - result = get_layout_values(argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': None, 'target_layout': 'nhwc'}, - {'source_layout': None, 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_3(self): - argv_source_layout = "nhwc,nhwc" - argv_target_layout = "nchw,nchw" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': 'nhwc', 'target_layout': 'nchw'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_4(self): - argv_source_layout = "[n,h,w,c],[n,h,w,c]" - argv_target_layout = "[n,c,h,w],[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_5(self): - argv_source_layout = "nhwc,[n,h,w,c]" - argv_target_layout = "nchw,[n,c,h,w]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[n,h,w,c]', 'target_layout': '[n,c,h,w]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def test_get_layout_source_target_layout_scalar(self): - argv_source_layout = "nhwc,[]" - argv_target_layout = "nchw,[]" - result = get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout) - exp_res = [{'source_layout': 'nhwc', 'target_layout': 'nchw'}, - {'source_layout': '[]', 'target_layout': '[]'}] - self.assertEqual(exp_res, result) - for i in range(len(exp_res)): - assert np.array_equal(result[i], exp_res[i]) - - def wrong_case_1(self): - argv_source_layout = "[n,h,w,c]),[n,h,w,c]" - argv_target_layout = "[n,c,h,w],[n,c,h,w]" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout, argv_target_layout=argv_target_layout)) - - def wrong_case_2(self): - argv_source_layout = "[nchv" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout)) - - def wrong_case_3(self): - argv_source_layout = "nchv->" - self.assertRaises(get_layout_values(argv_source_layout=argv_source_layout)) - class TestPackParamsToArgsNamespace(unittest.TestCase): def test_mo_convert_params(self): from openvino.frontend import ConversionExtension args = {'input_model': os.path.dirname(__file__), - 'input_shape': [PartialShape([1,100,100,3]), [2,3]], 'extensions': ConversionExtension("Ext", lambda x: x), - 'reverse_input_channels': True, - 'scale': 0.5, 'input': ['name', InputCutInfo("a", [1,2,3], numpy.float32, [5, 6, 7])], - 'batch': 1, - 'output': ["a", "b", "c"], - 'mean_values': [0.5, 0.3], - 'scale_values': {"a": np.array([0.4]), "b": [0.5, 0.6]}, - 'source_layout': Layout("nchw"), - 'layout': {"a": LayoutMap("nchw","nhwc"), "b": "nc"}, - 'transform': ('LowLatency2', {'use_const_initializer': False})} + 'output': ["a", "b", "c"]} cli_parser = get_all_cli_parser() argv = pack_params_to_args_namespace(args, cli_parser) assert argv.input_model == args['input_model'] assert argv.extensions == [args['extensions']] - assert argv.reverse_input_channels == args['reverse_input_channels'] - assert argv.scale == 0.5 - assert argv.batch == 1 - assert argv.input_shape == [PartialShape([1,100,100,3]), [2,3]] assert argv.input == ['name', InputCutInfo("a", [1,2,3], numpy.float32, [5, 6, 7])] assert argv.output == "a,b,c" - assert argv.mean_values == "[0.5,0.3]" - assert argv.scale_values == "a[0.4],b[0.5,0.6]" - assert argv.source_layout == "[N,C,H,W]" - assert argv.layout == "a(nchw->nhwc),b(nc)" - assert argv.transform == "LowLatency2[use_const_initializer=False]" for arg, value in vars(argv).items(): if arg not in args and arg != 'is_python_api_used': @@ -2068,22 +527,9 @@ def test_unknown_params(self): class TestConvertModelParamsParsing(unittest.TestCase): def test_mo_convert_params_parsing(self): ref_params = { - 'Optional parameters:': {'help', 'framework'}, - 'Framework-agnostic parameters:': {'input_model', 'input_shape', 'scale', 'reverse_input_channels', - 'log_level', 'input', 'output', 'mean_values', 'scale_values', 'source_layout', - 'target_layout', 'layout', 'compress_to_fp16', 'transform', 'extensions', - 'batch', 'silent', 'version', 'progress', 'stream_output', - 'transformations_config', 'example_input', 'share_weights'}, - 'Caffe*-specific parameters:': {'input_proto', 'caffe_parser_path', 'k', 'disable_omitting_optional', - 'enable_flattening_nested_params'}, - 'TensorFlow*-specific parameters:': {'input_model_is_text', 'input_checkpoint', 'input_meta_graph', - 'saved_model_dir', 'saved_model_tags', - 'tensorflow_custom_operations_config_update', - 'tensorflow_object_detection_api_pipeline_config', - 'tensorboard_logdir', 'tensorflow_custom_layer_libraries'}, - 'MXNet-specific parameters:': {'input_symbol', 'nd_prefix_name', 'pretrained_model_name', 'save_params_from_nd', - 'legacy_mxnet_model', 'enable_ssd_gluoncv'}, - 'Kaldi-specific parameters:': {'counts', 'remove_output_softmax', 'remove_memory'}, + 'Framework-agnostic parameters:': {'input_model', 'input', 'output', 'example_input', + 'extensions', 'verbose', 'share_weights'}, + 'TensorFlow*-specific parameters:': {'saved_model_tags'}, 'PaddlePaddle-specific parameters:': {'example_output'}, } @@ -2096,7 +542,8 @@ def test_mo_convert_params_parsing(self): for group_name, params in ref_params.items(): for param_name in params: param_name = '--' + param_name - if group_name == 'PaddlePaddle-specific parameters:': + if group_name == 'PaddlePaddle-specific parameters:' or \ + param_name in ['--input_model', '--share_weights', '--example_input']: assert param_name not in cli_parser._option_string_actions else: assert param_name in cli_parser._option_string_actions