From 96745d665b9f063a62d5798d76cb6a90da3f23f5 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Wed, 11 Jun 2025 18:59:24 +0300 Subject: [PATCH 1/6] Adopt TFLite fixes for new engine --- modules/dnn/src/tflite/tflite_importer.cpp | 20 +++++++++++++++-- modules/dnn/test/test_tflite_importer.cpp | 26 +++++++++++----------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index e44dd669270b..5a5e7e3aded4 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -310,6 +310,10 @@ void TFLiteImporter::populateNet() } throw; } + if (op_outputs->Get(0) == 194) + { + break; + } } if (newEngine) { @@ -488,11 +492,13 @@ void TFLiteImporter::parseConvolution(const Operator& op, const std::string& opc layerParams.type = "Convolution"; int inpId = op.inputs()->Get(0); + bool additionalPreLayer = false; if (layouts[inpId] == DNN_LAYOUT_UNKNOWN && modelTensors->Get(inpId)->shape()->size() == 4) { int permId = addPermuteLayer({0, 3, 1, 2}, layerParams.name + "/permute_input", layerIds[inpId], isInt8(op) ? CV_8S : CV_32F, op.inputs()->Get(0)); // NHWC -> NCHW layerIds[inpId] = std::make_pair(permId, 0); layouts[op.outputs()->Get(0)] = DNN_LAYOUT_NHWC; + additionalPreLayer = true; } auto options = reinterpret_cast(op.builtin_options()); @@ -554,7 +560,7 @@ void TFLiteImporter::parseConvolution(const Operator& op, const std::string& opc std::string fusedActivationType = EnumNameActivationFunctionType(options->fused_activation_function()); bool haveFusedActivation = fusedActivationType != "NONE"; - addLayer(layerParams, op, false, haveFusedActivation); + addLayer(layerParams, op, additionalPreLayer, haveFusedActivation); parseFusedActivation(op, options->fused_activation_function()); } @@ -1083,7 +1089,17 @@ int TFLiteImporter::addConstLayer(const Mat& blob, const std::string& name) { LayerParams lp; lp.blobs.push_back(blob.u ? blob : blob.clone()); // some tensors are owned by OpenCV - return dstNet.addLayer(name, "Const", lp); + if (newEngine) + { + lp.type = "Const"; + lp.name = name; + addLayer(lp, {}, {name}); + return -1; + } + else + { + return dstNet.addLayer(name, "Const", lp); + } } void TFLiteImporter::parseDeconvolution(const Operator& op, const std::string& opcode, LayerParams& layerParams) { diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index 52e5ecef275e..1b5ca8b03334 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -55,18 +55,18 @@ void Test_TFLite::testModel(Net& net, const std::string& modelName, const Mat& i std::vector outs; net.forward(outs, outNames); - ASSERT_EQ(outs.size(), outNames.size()); - for (int i = 0; i < outNames.size(); ++i) { - std::replace(outNames[i].begin(), outNames[i].end(), ':', '_'); - Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str()))); - // A workaround solution for the following cases due to inconsistent shape definitions. - // The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369 - if (modelName == "face_landmark" || modelName == "selfie_segmentation") { - ref = ref.reshape(1, 1); - outs[i] = outs[i].reshape(1, 1); - } - normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf); - } + // ASSERT_EQ(outs.size(), outNames.size()); + // for (int i = 0; i < outNames.size(); ++i) { + // std::replace(outNames[i].begin(), outNames[i].end(), ':', '_'); + // Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str()))); + // // A workaround solution for the following cases due to inconsistent shape definitions. + // // The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369 + // if (modelName == "face_landmark" || modelName == "selfie_segmentation") { + // ref = ref.reshape(1, 1); + // outs[i] = outs[i].reshape(1, 1); + // } + // normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf); + // } } void Test_TFLite::testModel(const std::string& modelName, const Mat& input, double l1, double lInf) @@ -283,7 +283,7 @@ TEST_P(Test_TFLite, StridedSlice) { testLayer("strided_slice"); } -TEST_P(Test_TFLite, DISABLED_face_blendshapes) +TEST_P(Test_TFLite, face_blendshapes) { Mat inp = blobFromNPY(findDataFile("dnn/tflite/face_blendshapes_inp.npy")); testModel("face_blendshapes", inp); From 30a084da62b6cc3d7a8798908ee967154c97615b Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 12 Jun 2025 15:54:18 +0300 Subject: [PATCH 2/6] model infered --- modules/dnn/src/net_impl2.cpp | 5 ++ modules/dnn/src/tflite/tflite_importer.cpp | 86 +++++++++------------- 2 files changed, 41 insertions(+), 50 deletions(-) diff --git a/modules/dnn/src/net_impl2.cpp b/modules/dnn/src/net_impl2.cpp index 37f172133964..948851e0f79d 100644 --- a/modules/dnn/src/net_impl2.cpp +++ b/modules/dnn/src/net_impl2.cpp @@ -333,7 +333,12 @@ void Net::Impl::allocateLayerOutputs( tempShapes.clear(); tempTypes.clear(); layer->getMemoryShapes(inpShapes, (int)noutputs, outShapes, tempShapes); + std::cout << layer->name << " " << layer->type << std::endl; + if (inpTypes.size() > 0) + std::cout << " in " << inpTypes[0] << std::endl; layer->getTypes(inpTypes, (int)noutputs, (int)tempShapes.size(), outTypes, tempTypes); + if (outTypes.size() > 0) + std::cout << " out " << outTypes[0] << std::endl; CV_Assert(tempShapes.size() == tempTypes.size()); CV_Assert(outShapes.size() == outTypes.size()); CV_Assert(outShapes.size() == noutputs); diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index 5a5e7e3aded4..908ab181025f 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -86,7 +86,7 @@ class TFLiteImporter { int addReshapeLayer(const std::vector& shape, int axis, int num_axes, const std::string& name, const std::pair& inpId, int dtype, int inpTensorId); int addFlattenLayer(int axis, int end_axis, const std::string& name, const std::pair& inpId, int dtype, int outTensorId); - int addConstLayer(const Mat& data, const std::string& name); + void addConstLayer(const Mat& data, int tensorIdx); inline bool isInt8(const Operator& op); inline void getQuantParams(const Operator& op, float& inpScale, int& inpZero, float& outScale, int& outZero); @@ -310,10 +310,10 @@ void TFLiteImporter::populateNet() } throw; } - if (op_outputs->Get(0) == 194) - { - break; - } + // if (op_outputs->Get(0) == 71) + // { + // break; + // } } if (newEngine) { @@ -726,8 +726,7 @@ void TFLiteImporter::parseEltwise(const Operator& op, const std::string& opcode, if (layouts[op.inputs()->Get(0)] == DNN_LAYOUT_NHWC && blob.dims == 1) { blob = blob.reshape(1, {1, (int)blob.total(), 1, 1}); } - int constId = addConstLayer(blob, modelTensors->Get(idx)->name()->str()); - layerIds[idx] = std::make_pair(constId, 0); + addConstLayer(blob, idx); } @@ -736,18 +735,18 @@ void TFLiteImporter::parseEltwise(const Operator& op, const std::string& opcode, addLayer(layerParams, op, false, haveFusedActivation); parseFusedActivation(op, activ); - // Layers that split on multiple operations - if (opcode == "SQUARED_DIFFERENCE") { - LayerParams lp; - lp.set("power", 2); - int id = dstNet.addLayerToPrev(layerParams.name + "/square", "Power", isOpInt8 ? CV_8S : CV_32F, lp); - layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); - } - else if (opcode == "RSQRT") { - LayerParams lp; - int id = dstNet.addLayerToPrev(layerParams.name + "/inv", "Reciprocal", isOpInt8 ? CV_8S : CV_32F, lp); - layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); - } + // // Layers that split on multiple operations + // if (opcode == "SQUARED_DIFFERENCE") { + // LayerParams lp; + // lp.set("power", 2); + // int id = dstNet.addLayerToPrev(layerParams.name + "/square", "Power", isOpInt8 ? CV_8S : CV_32F, lp); + // layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); + // } + // else if (opcode == "RSQRT") { + // LayerParams lp; + // int id = dstNet.addLayerToPrev(layerParams.name + "/inv", "Reciprocal", isOpInt8 ? CV_8S : CV_32F, lp); + // layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); + // } } void TFLiteImporter::parsePooling(const Operator& op, const std::string& opcode, LayerParams& layerParams) { @@ -872,8 +871,7 @@ void TFLiteImporter::parseConcat(const Operator& op, const std::string& opcode, transposeND(blob, {0, 3, 1, 2}, nchwBlob); blob = nchwBlob; } - int constId = addConstLayer(blob, modelTensors->Get(idx)->name()->str()); - layerIds[idx] = std::make_pair(constId, 0); + addConstLayer(blob, idx); } std::string fusedActivationType = EnumNameActivationFunctionType(options->fused_activation_function()); @@ -1085,20 +1083,23 @@ int TFLiteImporter::addFlattenLayer(int axis, int end_axis, const std::string& n } } -int TFLiteImporter::addConstLayer(const Mat& blob, const std::string& name) +void TFLiteImporter::addConstLayer(const Mat& blob, int tensorIdx) { + const std::string& name = modelTensors->Get(tensorIdx)->name()->str(); LayerParams lp; lp.blobs.push_back(blob.u ? blob : blob.clone()); // some tensors are owned by OpenCV + std::cout << "add const " << name << std::endl; if (newEngine) { lp.type = "Const"; lp.name = name; addLayer(lp, {}, {name}); - return -1; + layerIds[tensorIdx] = std::make_pair(-1, -1); } else { - return dstNet.addLayer(name, "Const", lp); + int constId = dstNet.addLayer(name, "Const", lp); + layerIds[tensorIdx] = std::make_pair(constId, 0); } } @@ -1239,17 +1240,17 @@ void TFLiteImporter::parseStridedSlice(const Operator& op, const std::string& op layerParams.name += "/slice"; } - addLayer(layerParams, op); + addLayer(layerParams, op, false, false); - for (int axis = 0; axis < num; ++axis) - { - if (!(shrinkMask & (1 << axis))) - continue; - std::string name = (axis == lastShrinkAxis) ? layerName : format("%s/shrink_axis_%d", layerName.c_str(), axis); - int layerId = addFlattenLayer(axis, axis + 1, name, - layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.inputs()->Get(0)); - layerIds[op.inputs()->Get(0)] = std::make_pair(layerId, 0); - } + // for (int axis = 0; axis < num; ++axis) + // { + // if (!(shrinkMask & (1 << axis))) + // continue; + // std::string name = (axis == lastShrinkAxis) ? layerName : format("%s/shrink_axis_%d", layerName.c_str(), axis); + // int layerId = addFlattenLayer(axis, axis + 1, name, + // layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.inputs()->Get(0)); + // layerIds[op.inputs()->Get(0)] = std::make_pair(layerId, 0); + // } } void TFLiteImporter::parseFullyConnected(const Operator& op, const std::string& opcode, LayerParams& layerParams) { @@ -1344,22 +1345,7 @@ void TFLiteImporter::parseDetectionPostProcess(const Operator& op, const std::st layerParams.set("variance_encoded_in_target", true); } - LayerParams priorsLP; - priorsLP.name = layerParams.name + "/priors"; - priorsLP.type = "Const"; - priorsLP.blobs.resize(1, priors); - - if (newEngine) - { - std::string outTensorName = modelTensors->Get(op.inputs()->Get(2))->name()->str(); - addLayer(priorsLP, {}, {outTensorName}); - layerIds[op.inputs()->Get(2)] = std::make_pair(-1, -1); - } - else - { - int priorsId = dstNet.addLayer(priorsLP.name, priorsLP.type, priorsLP); - layerIds[op.inputs()->Get(2)] = std::make_pair(priorsId, 0); - } + addConstLayer(priors, op.inputs()->Get(2)); addLayer(layerParams, op); } From e12d7db7634468ad53ca8de64be273a2ddcb0516 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 12 Jun 2025 16:27:15 +0300 Subject: [PATCH 3/6] Enable test --- modules/dnn/src/net_impl2.cpp | 5 --- modules/dnn/src/tflite/tflite_importer.cpp | 37 ++++++++++++---------- modules/dnn/test/test_tflite_importer.cpp | 24 +++++++------- 3 files changed, 33 insertions(+), 33 deletions(-) diff --git a/modules/dnn/src/net_impl2.cpp b/modules/dnn/src/net_impl2.cpp index 948851e0f79d..37f172133964 100644 --- a/modules/dnn/src/net_impl2.cpp +++ b/modules/dnn/src/net_impl2.cpp @@ -333,12 +333,7 @@ void Net::Impl::allocateLayerOutputs( tempShapes.clear(); tempTypes.clear(); layer->getMemoryShapes(inpShapes, (int)noutputs, outShapes, tempShapes); - std::cout << layer->name << " " << layer->type << std::endl; - if (inpTypes.size() > 0) - std::cout << " in " << inpTypes[0] << std::endl; layer->getTypes(inpTypes, (int)noutputs, (int)tempShapes.size(), outTypes, tempTypes); - if (outTypes.size() > 0) - std::cout << " out " << outTypes[0] << std::endl; CV_Assert(tempShapes.size() == tempTypes.size()); CV_Assert(outShapes.size() == outTypes.size()); CV_Assert(outShapes.size() == noutputs); diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index 908ab181025f..fe8f626f80e4 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -310,10 +310,6 @@ void TFLiteImporter::populateNet() } throw; } - // if (op_outputs->Get(0) == 71) - // { - // break; - // } } if (newEngine) { @@ -1088,7 +1084,6 @@ void TFLiteImporter::addConstLayer(const Mat& blob, int tensorIdx) const std::string& name = modelTensors->Get(tensorIdx)->name()->str(); LayerParams lp; lp.blobs.push_back(blob.u ? blob : blob.clone()); // some tensors are owned by OpenCV - std::cout << "add const " << name << std::endl; if (newEngine) { lp.type = "Const"; @@ -1235,22 +1230,32 @@ void TFLiteImporter::parseStridedSlice(const Operator& op, const std::string& op lastShrinkAxis = axis; } std::string layerName = layerParams.name; - if (lastShrinkAxis != -1) + if (!newEngine && lastShrinkAxis != -1) { layerParams.name += "/slice"; } - addLayer(layerParams, op, false, false); + addLayer(layerParams, op, false, lastShrinkAxis != -1); - // for (int axis = 0; axis < num; ++axis) - // { - // if (!(shrinkMask & (1 << axis))) - // continue; - // std::string name = (axis == lastShrinkAxis) ? layerName : format("%s/shrink_axis_%d", layerName.c_str(), axis); - // int layerId = addFlattenLayer(axis, axis + 1, name, - // layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.inputs()->Get(0)); - // layerIds[op.inputs()->Get(0)] = std::make_pair(layerId, 0); - // } + for (int axis = 0; axis < num; ++axis) + { + if (!(shrinkMask & (1 << axis))) + continue; + if (newEngine) + { + if (axis != lastShrinkAxis) + CV_Error(Error::StsNotImplemented, "Multiple axes shrink in new engine"); + addFlattenLayer(axis, axis + 1, layerName, + layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.outputs()->Get(0)); + } + else + { + std::string name = (axis == lastShrinkAxis) ? layerName : format("%s/shrink_axis_%d", layerName.c_str(), axis); + int layerId = addFlattenLayer(axis, axis + 1, name, + layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.inputs()->Get(0)); + layerIds[op.inputs()->Get(0)] = std::make_pair(layerId, 0); + } + } } void TFLiteImporter::parseFullyConnected(const Operator& op, const std::string& opcode, LayerParams& layerParams) { diff --git a/modules/dnn/test/test_tflite_importer.cpp b/modules/dnn/test/test_tflite_importer.cpp index 1b5ca8b03334..b4e89ee46cdd 100644 --- a/modules/dnn/test/test_tflite_importer.cpp +++ b/modules/dnn/test/test_tflite_importer.cpp @@ -55,18 +55,18 @@ void Test_TFLite::testModel(Net& net, const std::string& modelName, const Mat& i std::vector outs; net.forward(outs, outNames); - // ASSERT_EQ(outs.size(), outNames.size()); - // for (int i = 0; i < outNames.size(); ++i) { - // std::replace(outNames[i].begin(), outNames[i].end(), ':', '_'); - // Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str()))); - // // A workaround solution for the following cases due to inconsistent shape definitions. - // // The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369 - // if (modelName == "face_landmark" || modelName == "selfie_segmentation") { - // ref = ref.reshape(1, 1); - // outs[i] = outs[i].reshape(1, 1); - // } - // normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf); - // } + ASSERT_EQ(outs.size(), outNames.size()); + for (int i = 0; i < outNames.size(); ++i) { + std::replace(outNames[i].begin(), outNames[i].end(), ':', '_'); + Mat ref = blobFromNPY(findDataFile(format("dnn/tflite/%s_out_%s.npy", modelName.c_str(), outNames[i].c_str()))); + // A workaround solution for the following cases due to inconsistent shape definitions. + // The details please see: https://github.com/opencv/opencv/pull/25297#issuecomment-2039081369 + if (modelName == "face_landmark" || modelName == "selfie_segmentation") { + ref = ref.reshape(1, 1); + outs[i] = outs[i].reshape(1, 1); + } + normAssert(ref, outs[i], outNames[i].c_str(), l1, lInf); + } } void Test_TFLite::testModel(const std::string& modelName, const Mat& input, double l1, double lInf) From aeea1d0a8f3589aef896f96aa2ff3f3cba9059bc Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 12 Jun 2025 16:44:03 +0300 Subject: [PATCH 4/6] Accuracy test pass --- modules/dnn/src/tflite/tflite_importer.cpp | 31 +++++++++++++--------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index fe8f626f80e4..771a2a17d91d 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -728,21 +728,26 @@ void TFLiteImporter::parseEltwise(const Operator& op, const std::string& opcode, std::string fusedActivationType = EnumNameActivationFunctionType(activ); bool haveFusedActivation = fusedActivationType != "NONE"; - addLayer(layerParams, op, false, haveFusedActivation); + addLayer(layerParams, op, false, haveFusedActivation || opcode == "SQUARED_DIFFERENCE" || opcode == "RSQRT"); parseFusedActivation(op, activ); - // // Layers that split on multiple operations - // if (opcode == "SQUARED_DIFFERENCE") { - // LayerParams lp; - // lp.set("power", 2); - // int id = dstNet.addLayerToPrev(layerParams.name + "/square", "Power", isOpInt8 ? CV_8S : CV_32F, lp); - // layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); - // } - // else if (opcode == "RSQRT") { - // LayerParams lp; - // int id = dstNet.addLayerToPrev(layerParams.name + "/inv", "Reciprocal", isOpInt8 ? CV_8S : CV_32F, lp); - // layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); - // } + if (opcode == "SQUARED_DIFFERENCE" || opcode == "RSQRT") { + LayerParams lp; + if (opcode == "RSQRT") + lp.type = "Reciprocal"; + else { + lp.type = "Power"; + lp.set("power", 2); + } + if (newEngine) { + std::string tensorName = modelTensors->Get(op.outputs()->Get(0))->name()->str(); + addLayer(lp, {tensorName + "_additional_post_layer"}, {tensorName}); + layerIds[op.outputs()->Get(0)] = std::make_pair(-1, -1); + } else { + int id = dstNet.addLayerToPrev(layerParams.name + "/post", "Power", isOpInt8 ? CV_8S : CV_32F, lp); + layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); + } + } } void TFLiteImporter::parsePooling(const Operator& op, const std::string& opcode, LayerParams& layerParams) { From 016b01ed20b567251e371a8033d055fa0110f27f Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 12 Jun 2025 16:48:26 +0300 Subject: [PATCH 5/6] Replace exceptions to warnings --- modules/dnn/src/tflite/tflite_importer.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index 771a2a17d91d..580f05b57fb3 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -732,6 +732,8 @@ void TFLiteImporter::parseEltwise(const Operator& op, const std::string& opcode, parseFusedActivation(op, activ); if (opcode == "SQUARED_DIFFERENCE" || opcode == "RSQRT") { + if (haveFusedActivation) + CV_LOG_WARNING(NULL, format("%s with fused activation on new engine is not tested", opcode.c_str())); LayerParams lp; if (opcode == "RSQRT") lp.type = "Reciprocal"; @@ -1249,7 +1251,7 @@ void TFLiteImporter::parseStridedSlice(const Operator& op, const std::string& op if (newEngine) { if (axis != lastShrinkAxis) - CV_Error(Error::StsNotImplemented, "Multiple axes shrink in new engine"); + CV_LOG_WARNING(NULL, "StridedSlice with multiple axes shrink in new engine is not tested"); addFlattenLayer(axis, axis + 1, layerName, layerIds[op.outputs()->Get(0)], isInt8(op) ? CV_8S : CV_32F, op.outputs()->Get(0)); } From 38160f6d7d11cdcefac6a6eb241f1b5869a15152 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 12 Jun 2025 20:02:58 +0300 Subject: [PATCH 6/6] fix classic engine --- modules/dnn/src/tflite/tflite_importer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/dnn/src/tflite/tflite_importer.cpp b/modules/dnn/src/tflite/tflite_importer.cpp index 580f05b57fb3..dd74d6ead0e7 100644 --- a/modules/dnn/src/tflite/tflite_importer.cpp +++ b/modules/dnn/src/tflite/tflite_importer.cpp @@ -746,7 +746,7 @@ void TFLiteImporter::parseEltwise(const Operator& op, const std::string& opcode, addLayer(lp, {tensorName + "_additional_post_layer"}, {tensorName}); layerIds[op.outputs()->Get(0)] = std::make_pair(-1, -1); } else { - int id = dstNet.addLayerToPrev(layerParams.name + "/post", "Power", isOpInt8 ? CV_8S : CV_32F, lp); + int id = dstNet.addLayerToPrev(layerParams.name + "/post", lp.type, isOpInt8 ? CV_8S : CV_32F, lp); layerIds[op.outputs()->Get(0)] = std::make_pair(id, 0); } } pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy