Input Image Batch¶
Batched ITensor Input
The following example shows how to load multiple inputs and batch them into one ITensor.
std::unique_ptr<zdl::DlSystem::ITensor> loadInputTensorBatched (std::unique_ptr<zdl::SNPE::SNPE> & snpe , std::vector<std::string>& fileLines, size_t batchSize)
{
std::unique_ptr<zdl::DlSystem::ITensor> input;
const auto &strList_opt = snpe->getInputTensorNames();
if (!strList_opt) throw std::runtime_error("Error obtaining Input tensor names");
const auto &strList = *strList_opt;
// Make sure the network requires only a single input
assert (strList.size() == 1);
// Load batch of inputs from file into vector
std::vector<float> inputVec;
for (size_t i = 0; i < batchSize; i++) {
// If the network has a single input, each line represents the input file to be loaded and combined into one batched ITensor
std::string filePath(fileLines[i]);
std::cout << "Batch " << i <<": Processing DNN Input: " << filePath << "\n";
std::vector<float> loadedFile = loadFloatDataFile(filePath);
inputVec.insert(inputVec.end(), loadedFile.begin(), loadedFile.end());
}
/* Create an input tensor that is correctly sized to hold the batched input of the network. Dimensions that have no fixed size will be represented with a value of 0. */
const auto &inputDims_opt = snpe->getInputDimensions(strList.at(0));
const auto &inputShape = *inputDims_opt;
/* Calculate the total number of elements that can be stored in the tensor so that we can check that the input contains the expected number of elements.
With the input dimensions computed create a tensor to convey the input into the network. */
input = zdl::SNPE::SNPEFactory::getTensorFactory().createTensor(inputShape);
/* Copy the loaded input file contents into the networks input tensor.|Qualcomm(R)| Neural Processing SDK's ITensor supports C++ STL functions like std::copy() */
std::copy(inputVec.begin(), inputVec.end(), input->begin());
return input;
}
Batched ITensor Input
The following example shows how to separate a batched output ITensor into individual tensors and writing them to disk.
void executeNetwork(std::unique_ptr<zdl::SNPE::SNPE>& snpe,
std::unique_ptr<zdl::DlSystem::ITensor>& input,
std::string OutputDir,
int num)
{
//Execute the network and store the outputs that were specified when creating the network in a TensorMap
static zdl::DlSystem::TensorMap outputTensorMap;
snpe->execute(input.get(), outputTensorMap);
zdl::DlSystem::StringList tensorNames = outputTensorMap.getTensorNames();
//Iterate through the output Tensor map, and print each output layer name
std::for_each( tensorNames.begin(), tensorNames.end(), [&](const char* name)
{
// Split the batched tensor
for (size_t i = 0; i < batchSize; i++) {
std::ostringstream path;
path << OutputDir << "/"
<< "Result_" << num << "/"
<< name << ".raw";
auto tensorPtr = outputTensorMap.getTensor(name);
size_t batchChunk = tensorPtr->getSize() / batchSize;
SaveITensor(path.str(), tensorPtr, i, batchChunk);
}
});
}
// The following is a partial snippet of the function
void SaveITensor(const std::string& path, const zdl::DlSystem::ITensor* tensor, size_t batchIndex, size_t batchChunk)
{
...
std::ofstream os(path, std::ofstream::binary);
if (!os)
{
std::cerr << "Failed to open output file for writing: " << path << "\n";
std::exit(EXIT_FAILURE);
}
for ( auto it = tensor->cbegin() + batchIndex * batchChunk; it != tensor->cbegin() + (batchIndex + 1) * batchChunk; ++it )
{
float f = *it;
if (!os.write(reinterpret_cast<char*>(&f), sizeof(float)))
{
std::cerr << "Failed to write data to: " << path << "\n";
std::exit(EXIT_FAILURE);
}
}
}