Skip to content

How to load OM file directly in OpenCV DNN CANN backend and achieve the same inference speed as after in-memory conversion? #27402

Open
@IvanKreed

Description

@IvanKreed

Describe the feature and motivation

Hello
I'm working with OpenCV DNN and the CANN backend on Ascend hardware. I noticed a big difference in inference speed depending on how the OM file is loaded.
If I convert my ONNX model to OM in memory (right in the code, without saving/loading from disk) and then run inference, I get super fast results — about 3-4 ms per image after the first run.
But if I save the OM file to disk and then load it (even though it's the exact same file), the first inference is slow (which is expected), but all the following inferences are much slower than with the in-memory approach — around 300 ms per image, and it never gets faster, even after many warmup runs.
Is there any way to make inference with a loaded OM file as fast as with the in-memory converted model?
Or is this just a limitation of how CANN loads OM files?
Maybe there are some flags or tricks for OM file compilation or loading that could help?
initbackend code:

void NetImplCann::initBackend(const std::vector& blobsToKeep_)
{
CV_TRACE_FUNCTION();
CV_CheckEQ(preferableBackend, DNN_BACKEND_CANN, "");

// netWasAllocated turns to false if requested output is changed or input shape changes
if (netWasConverted && netWasAllocated)
    return;

if (!netWasConverted)
{
    newWasSupported = true;
    for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); ++it)
    {
        auto& ld = it->second;
        auto layer = ld.layerInstance;
        if (ld.id != 0 && !layer->supportBackend(preferableBackend))
        {
            newWasSupported = false;
            CV_LOG_ONCE_WARNING(NULL, "DNN/CANN: layer (name=" << ld.name << ", type=" << ld.type << ") is not supported by CANN backend. Going back to default backend on CPU target");
        }
    }
}
if (!newWasSupported)
    return ;

// initialize each blob wrappers' names
for (MapIdToLayerData::const_iterator it = layers.begin(); it != layers.end(); ++it)
{
    const LayerData& ld = it->second;
    if (ld.id == 0)
    {
        for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
        {
            auto cannWrapper = ld.outputBlobsWrappers[i].dynamicCast<CannBackendWrapper>();
            // cannWrapper->name = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
            cannWrapper->name = std::string("y");
        }
    }
    else
    {
        for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
        {
            auto cannWrapper = ld.outputBlobsWrappers[i].dynamicCast<CannBackendWrapper>();
            // cannWrapper->name = ld.outputBlobsWrappers.size() > 1 ? (ld.name + ":" + std::to_string(i)) : ld.name;
            cannWrapper->name = ld.outputBlobsWrappers.size() > 1 ? (std::string("y") + std::to_string(i)) : std::string("y");
        }
    }
}

// convert layers to CANN operators,
// collect graph input and output operators,
// collect and input and output wrappers
int firstOutputLayerId = -1;
std::vector<Ptr<BackendNode> > netInputNodes;
std::vector<ge::Operator> graphInputOps, graphOutputOps;
std::vector<Ptr<BackendWrapper>> graphInputWrappers, graphOutputWrappers;
CV_LOG_INFO(NULL, "DNN/CANN: converting layers to CANN operators");
for (MapIdToLayerData::iterator it = layers.begin(); it != layers.end(); ++it)
{
    LayerData& ld = it->second;
    auto layer = ld.layerInstance;

    if (ld.id == 0)
    {
        for (int i = 0; i < ld.outputBlobsWrappers.size(); i++)
        {
            // retrieve tensor description
            auto wrapper = ld.outputBlobsWrappers[i];
            graphInputWrappers.push_back(wrapper);
            auto cannWrapper = wrapper.dynamicCast<CannBackendWrapper>();
            CV_Assert(!cannWrapper.empty());

            // create graph input op
            std::string inputOpName = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
            auto inputOp = std::make_shared<ge::op::Data>(inputOpName);

            inputOp->update_input_desc_x(*(cannWrapper->desc_));
            inputOp->update_output_desc_y(*(cannWrapper->desc_));

            graphInputOps.push_back(*inputOp);
            netInputNodes.push_back(Ptr<BackendNode>(new CannBackendNode(inputOp)));
        }
    }
    else
    {
        ld.skip = true; // skip all cann operators

        std::vector<Ptr<BackendNode> > layerInputNodes;
        for (int i = 0; i < ld.inputBlobsId.size(); i++)
        {
            int layerInputLid = ld.inputBlobsId[i].lid;
            int layerInputOid = ld.inputBlobsId[i].oid;
            if (layerInputLid == 0)
            {
                layerInputNodes.push_back(netInputNodes[layerInputOid]);
            }
            else
            {
                layerInputNodes.push_back(layers[layerInputLid].backendNodes[preferableBackend]);
            }
        }

        CV_LOG_INFO(NULL, "DNN/CANN: converting layer " << ld.name << "@" << ld.type << "@" << ld.id << " to CANN operator");
        auto backendNode = layer->initCann(ld.inputBlobsWrappers, ld.outputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty

        // collect outputs
        bool isOutputNode = ld.consumers.size() == 0 ? true : false;
        if (isOutputNode)
        {
            if (firstOutputLayerId < 0)
                firstOutputLayerId = ld.id;
            auto cannNode = backendNode.dynamicCast<CannBackendNode>();
            graphOutputOps.push_back(*(cannNode->getOp()));
            // assume cann graph outputs and dnn net outputs have the same order
            for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
            {
                graphOutputWrappers.push_back(ld.outputBlobsWrappers[i]);
            }
        }

        ld.backendNodes[preferableBackend] = backendNode;
    }
}
CV_LOG_INFO(NULL, "DNN/CANN: done converting layers to CANN operators");

// build graph from collected graph inputs and outputs
CV_LOG_INFO(NULL, "DNN/CANN: building ge::Graph");
std::string graphName = cv::format("graph_%d", networkId);
std::shared_ptr<ge::Graph> graph = std::make_shared<ge::Graph>(graphName.c_str());
(void)graph->SetInputs(graphInputOps);
(void)graph->SetOutputs(graphOutputOps);
CV_LOG_INFO(NULL, "DNN/CANN: done building ge::Graph");

// convert ge::Graph to OM buffer
CV_LOG_INFO(NULL, "DNN/CANN: converting ge::Graph to OM buffer");
std::shared_ptr<ge::ModelBufferData> modelBuffer = compileCannGraph(graph);
CV_LOG_INFO(NULL, "DNN/CANN: OM buffer size = " << modelBuffer->length);
CV_LOG_INFO(NULL, "DNN/CANN: done building ge::Graph to OM buffer");

// keep net in the first output node and mark the node runnable
auto& ld = layers[firstOutputLayerId];
auto cannNode = ld.backendNodes[preferableBackend].dynamicCast<CannBackendNode>();
std::shared_ptr<CannNet> net = std::shared_ptr<CannNet>(new CannNet());
net->loadModelBuffer(modelBuffer);
net->bindInputWrappers(graphInputWrappers);
net->bindOutputWrappers(graphOutputWrappers);
cannNode->net = net;
ld.skip = false;

netWasConverted = true;

}

Additional context

No response

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions

      pFad - Phonifier reborn

      Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

      Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


      Alternative Proxies:

      Alternative Proxy

      pFad Proxy

      pFad v3 Proxy

      pFad v4 Proxy