diff --git a/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/README.md b/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/README.md index e5b1b19..b544ead 100644 --- a/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/README.md +++ b/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/README.md @@ -52,23 +52,33 @@ $ cp ${MODEL_DIR}/model_files/labels.txt ${MODEL_DIR}/example_pipeline/build/ $ cp ${MODEL_DIR}/model_files/anchors.csv ${MODEL_DIR}/example_pipeline/build/ ``` -run the detection pipeline inside a docker container. X draw calls are forwarded -to the host so the detection results can be displayed in a X11 window. - -```bash -$ docker run \ - -it --rm \ - -v /tmp/.X11-unix:/tmp/.X11-unix:rw \ - -v ${HOME}/.Xauthority:${HOME}/.Xauthority:rw \ - -e XAUTHORITY=${HOME}/.Xauthority \ - -e DISPLAY=$DISPLAY \ - --net=host \ - -v ${MODEL_DIR}:${MODEL_DIR} \ - -w ${MODEL_DIR}/example_pipeline/build \ - --entrypoint "" \ - autoware/model-zoo-tvm-cli:latest \ - ./example_pipeline -``` +run the detection pipeline inside a docker container. The output result can be obtained in two ways: +- **Save as an image**: saves the result of the pipeline as an image file in the build directory, the filename `output.jpg` can be changed in the command if needed: + ```bash + $ docker run \ + -it --rm \ + --net=host \ + -v ${MODEL_DIR}:${MODEL_DIR} \ + -w ${MODEL_DIR}/example_pipeline/build \ + --entrypoint "" \ + autoware/model-zoo-tvm-cli:latest \ + ./example_pipeline output.jpg + ``` +- **Display in a X11 window**: X draw calls are forwarded to the host so the detection results can be displayed in a X11 window. + ```bash + $ docker run \ + -it --rm \ + -v /tmp/.X11-unix:/tmp/.X11-unix:rw \ + -v ${HOME}/.Xauthority:${HOME}/.Xauthority:rw \ + -e XAUTHORITY=${HOME}/.Xauthority \ + -e DISPLAY=$DISPLAY \ + --net=host \ + -v ${MODEL_DIR}:${MODEL_DIR} \ + -w ${MODEL_DIR}/example_pipeline/build \ + --entrypoint "" \ + autoware/model-zoo-tvm-cli:latest \ + ./example_pipeline + ``` For more information about getting the TVM docker image, see the TVM CLI [documentation](../../../../scripts/tvm_cli/README.md). \ No newline at end of file diff --git a/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/example_pipeline/main.cpp b/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/example_pipeline/main.cpp index 27a0277..4779935 100644 --- a/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/example_pipeline/main.cpp +++ b/perception/camera_obstacle_detection/yolo_v2_tiny/tensorflow_fp32_coco/example_pipeline/main.cpp @@ -1,5 +1,5 @@ // -// Copyright (c) 2020-2021, Arm Limited and Contributors. All rights reserved. +// Copyright (c) 2020-2022, Arm Limited and Contributors. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 // @@ -60,6 +60,17 @@ #define DISPLAY_WINDOW_NAME "YOLO Output" int main(int argc, char const *argv[]) { + bool save_as_image = false; + + if (argc == 2) { + save_as_image = true; + } else if (argc > 2) { + std::cerr << "Too many arguments have been provided. Please use " + << "only one argument to save the result of the pipeline as " + << "an image, or none to display it to an X11 window." << std::endl; + return 1; + } + // load compiled functions tvm::runtime::Module mod = tvm::runtime::Module::LoadFromFile(network_module_path); @@ -289,12 +300,21 @@ int main(int argc, char const *argv[]) { } } - // show in a pop up window the detection results - cv::namedWindow(DISPLAY_WINDOW_NAME, cv::WINDOW_AUTOSIZE); - cv::imshow(DISPLAY_WINDOW_NAME, image); + if (save_as_image) { + // save the detection results as an image + try { + cv::imwrite(argv[1], image); + } catch(cv::Exception& e) { + std::cerr << "An error has occurred while saving to file: " << e.err << std::endl; + } + } else { + // show in a pop up window the detection results + cv::namedWindow(DISPLAY_WINDOW_NAME, cv::WINDOW_AUTOSIZE); + cv::imshow(DISPLAY_WINDOW_NAME, image); - // wait for user to close the window - cv::waitKey(0); + // wait for user to close the window + cv::waitKey(0); + } // usually the detection results would be filtered again by a non-maximum // supression algorithm. It is omitted here for simplicity.