diff --git a/samples/cpp/speech_sample/main.cpp b/samples/cpp/speech_sample/main.cpp index f62d9f009f0e2d..431bddee3e6e53 100644 --- a/samples/cpp/speech_sample/main.cpp +++ b/samples/cpp/speech_sample/main.cpp @@ -342,11 +342,17 @@ int main(int argc, char* argv[]) { } count_file = reference_name_files.empty() ? 1 : reference_name_files.size(); } + if (count_file > executableNet.outputs().size()) { + throw std::logic_error( + "The number of output/reference files is not equal to the number of network outputs."); + } // ----------------------------------------------------------------------------------------------------- // --------------------------- Step 5. Do inference -------------------------------------------------------- std::vector> ptrUtterances; - std::vector> vectorPtrScores((outputs.size() == 0) ? 1 : outputs.size()); - std::vector numScoresPerOutput((outputs.size() == 0) ? 1 : outputs.size()); + std::vector> vectorPtrScores((outputs.size() == 0) ? executableNet.outputs().size() + : outputs.size()); + std::vector numScoresPerOutput((outputs.size() == 0) ? executableNet.outputs().size() + : outputs.size()); std::vector> vectorPtrReferenceScores(reference_name_files.size()); std::vector vectorFrameError(reference_name_files.size()), vectorTotalError(reference_name_files.size()); @@ -474,8 +480,9 @@ int main(int argc, char* argv[]) { inferRequest.inferRequest.wait(); if (inferRequest.frameIndex >= 0) for (size_t next_output = 0; next_output < count_file; next_output++) { - std::string outputName = (outputs.size() == 0) ? executableNet.output(0).get_any_name() - : output_names[next_output]; + std::string outputName = (outputs.size() == 0) + ? executableNet.output(next_output).get_any_name() + : output_names[next_output]; auto dims = executableNet.output(outputName).get_shape(); numScoresPerOutput[next_output] = std::accumulate(std::begin(dims), std::end(dims), @@ -493,10 +500,6 @@ int main(int argc, char* argv[]) { ov::Tensor outputBlob = inferRequest.inferRequest.get_tensor(executableNet.output(outputName)); - if (!outputs.empty()) { - outputBlob = - inferRequest.inferRequest.get_tensor(executableNet.output(outputName)); - } // locked memory holder should be alive all time while access to its buffer happens auto byteSize = numScoresPerOutput[next_output] * sizeof(float); std::memcpy(outputFrame, outputBlob.data(), byteSize); @@ -654,8 +657,8 @@ int main(int argc, char* argv[]) { } if (!FLAGS_r.empty()) { // print statistical score error - std::string outputName = - (outputs.size() == 0) ? executableNet.output(0).get_any_name() : output_names[next_output]; + std::string outputName = (outputs.size() == 0) ? executableNet.output(next_output).get_any_name() + : output_names[next_output]; std::cout << "Output name: " << outputName << std::endl; std::cout << "Number scores per frame: " << numScoresPerOutput[next_output] / batchSize << std::endl << std::endl;