diff --git a/ros/src/computing/perception/detection/packages/cv_tracker/launch/yolo2.launch b/ros/src/computing/perception/detection/packages/cv_tracker/launch/yolo2.launch
index 40ae85e5471..1480bf5c15f 100644
--- a/ros/src/computing/perception/detection/packages/cv_tracker/launch/yolo2.launch
+++ b/ros/src/computing/perception/detection/packages/cv_tracker/launch/yolo2.launch
@@ -1,26 +1,21 @@
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/ros/src/computing/perception/detection/packages/cv_tracker/nodes/yolo2/src/yolo2_node.cpp b/ros/src/computing/perception/detection/packages/cv_tracker/nodes/yolo2/src/yolo2_node.cpp
index 11f919c34ce..c37edfe026f 100644
--- a/ros/src/computing/perception/detection/packages/cv_tracker/nodes/yolo2/src/yolo2_node.cpp
+++ b/ros/src/computing/perception/detection/packages/cv_tracker/nodes/yolo2/src/yolo2_node.cpp
@@ -191,7 +191,7 @@ class Yolo2DetectorNode
void image_callback(const sensor_msgs::ImageConstPtr& in_image_message)
{
std::vector< RectClassScore > detections;
- //darknet_image = yolo_detector_.convert_image(in_image_message);
+ //darknet_image_ = yolo_detector_.convert_image(in_image_message);
darknet_image = convert_ipl_to_image(in_image_message);
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/CMakeLists.txt b/ros/src/computing/perception/detection/packages/yolo3_detector/CMakeLists.txt
new file mode 100644
index 00000000000..58e8594b18a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/CMakeLists.txt
@@ -0,0 +1,154 @@
+cmake_minimum_required(VERSION 2.8.12)
+project(yolo3_detector)
+
+find_package(catkin REQUIRED COMPONENTS
+ cv_bridge
+ image_transport
+ roscpp
+ sensor_msgs
+ std_msgs
+ autoware_msgs
+ )
+
+find_package(CUDA)
+find_package(OpenCV REQUIRED)
+find_package(OpenMP)
+
+catkin_package(CATKIN_DEPENDS
+ cv_bridge
+ image_transport
+ roscpp
+ sensor_msgs
+ std_msgs
+ autoware_msgs
+ )
+
+set(CMAKE_CXX_FLAGS "-std=c++11 -O3 -g -Wall ${CMAKE_CXX_FLAGS}")
+
+IF (CUDA_FOUND)
+ list(APPEND CUDA_NVCC_FLAGS "--std=c++11 -I$${PROJECT_SOURCE_DIR}/darknet/src -I${PROJECT_SOURCE_DIR}/src -DGPU")
+ SET(CUDA_PROPAGATE_HOST_FLAGS OFF)
+
+ #darknet
+ cuda_add_library(yolo3_detector_lib SHARED
+ darknet/src/activation_kernels.cu
+ darknet/src/avgpool_layer_kernels.cu
+ darknet/src/convolutional_kernels.cu
+ darknet/src/crop_layer_kernels.cu
+ darknet/src/col2im_kernels.cu
+ darknet/src/blas_kernels.cu
+ darknet/src/deconvolutional_kernels.cu
+ darknet/src/dropout_layer_kernels.cu
+ darknet/src/im2col_kernels.cu
+ darknet/src/maxpool_layer_kernels.cu
+
+ darknet/src/gemm.c
+ darknet/src/utils.c
+ darknet/src/cuda.c
+ darknet/src/deconvolutional_layer.c
+ darknet/src/convolutional_layer.c
+ darknet/src/list.c
+ darknet/src/image.c
+ darknet/src/activations.c
+ darknet/src/im2col.c
+ darknet/src/col2im.c
+ darknet/src/blas.c
+ darknet/src/crop_layer.c
+ darknet/src/dropout_layer.c
+ darknet/src/maxpool_layer.c
+ darknet/src/softmax_layer.c
+ darknet/src/data.c
+ darknet/src/matrix.c
+ darknet/src/network.c
+ darknet/src/connected_layer.c
+ darknet/src/cost_layer.c
+ darknet/src/parser.c
+ darknet/src/option_list.c
+ darknet/src/detection_layer.c
+ darknet/src/route_layer.c
+ darknet/src/upsample_layer.c
+ darknet/src/box.c
+ darknet/src/normalization_layer.c
+ darknet/src/avgpool_layer.c
+ darknet/src/layer.c
+ darknet/src/local_layer.c
+ darknet/src/shortcut_layer.c
+ darknet/src/logistic_layer.c
+ darknet/src/activation_layer.c
+ darknet/src/rnn_layer.c
+ darknet/src/gru_layer.c
+ darknet/src/crnn_layer.c
+ darknet/src/batchnorm_layer.c
+ darknet/src/region_layer.c
+ darknet/src/reorg_layer.c
+ darknet/src/tree.c
+ darknet/src/lstm_layer.c
+ darknet/src/l2norm_layer.c
+ darknet/src/yolo_layer.c
+ )
+
+ target_compile_definitions(yolo3_detector_lib PUBLIC -DGPU)
+ cuda_add_cublas_to_target(yolo3_detector_lib)
+
+ if (OPENMP_FOUND)
+ set_target_properties(yolo3_detector_lib PROPERTIES
+ COMPILE_FLAGS ${OpenMP_CXX_FLAGS}
+ LINK_FLAGS ${OpenMP_CXX_FLAGS}
+ )
+ endif ()
+
+ target_include_directories(yolo3_detector_lib PRIVATE
+ ${OpenCV_INCLUDE_DIR}
+ ${catkin_INCLUDE_DIRS}
+ ${Boost_INCLUDE_DIRS}
+ ${CUDA_INCLUDE_DIRS}
+ ${PROJECT_SOURCE_DIR}/darknet
+ ${PROJECT_SOURCE_DIR}/darknet/src
+ ${PROJECT_SOURCE_DIR}/src
+ )
+
+ target_link_libraries(yolo3_detector_lib
+ ${OpenCV_LIBRARIES}
+ ${catkin_LIBRARIES}
+ ${PCL_LIBRARIES}
+ ${Qt5Core_LIBRARIES}
+ ${CUDA_LIBRARIES}
+ ${CUDA_CUBLAS_LIBRARIES}
+ ${CUDA_curand_LIBRARY}
+ )
+
+ add_dependencies(yolo3_detector_lib
+ ${catkin_EXPORTED_TARGETS}
+ )
+
+ #ros node
+ cuda_add_executable(yolo3_detector
+ src/yolo3_node.cpp
+ src/yolo3_detector.cpp
+ src/yolo3_detector.h
+ )
+
+ target_compile_definitions(yolo3_detector PUBLIC -DGPU)
+
+ target_include_directories(yolo3_detector PRIVATE
+ ${CUDA_INCLUDE_DIRS}
+ ${catkin_INCLUDE_DIRS}
+ ${PROJECT_SOURCE_DIR}/darknet
+ ${PROJECT_SOURCE_DIR}/darknet/src
+ ${PROJECT_SOURCE_DIR}/src
+ )
+
+ target_link_libraries(yolo3_detector
+ ${catkin_LIBRARIES}
+ ${OpenCV_LIBS}
+ ${CUDA_LIBRARIES}
+ ${CUDA_CUBLAS_LIBRARIES}
+ ${CUDA_curand_LIBRARY}
+ yolo3_detector_lib
+ )
+ add_dependencies(yolo3_detector
+ ${catkin_EXPORTED_TARGETS}
+ )
+ELSE()
+ message("YOLO3 won't be built, CUDA was not found.")
+ENDIF ()
\ No newline at end of file
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/README.md b/ros/src/computing/perception/detection/packages/yolo3_detector/README.md
new file mode 100644
index 00000000000..8b1e794c1f3
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/README.md
@@ -0,0 +1,49 @@
+# Yolo3 Detector
+
+Autoware package based on Yolov3 image detector.
+
+### Requirements
+
+* NVIDIA GPU with CUDA installed
+* Pretrained YOLOv3 model on COCO dataset.
+
+### How to launch
+
+* From a sourced terminal:
+
+`roslaunch yolo3_detector yolo3.launch`
+
+* From Runtime Manager:
+
+Computing Tab -> Detection/ cv_detector -> `yolo3_wa`
+
+### Parameters
+
+Launch file available parameters:
+
+|Parameter| Type| Description|
+----------|-----|--------
+|`score_threshold`|*Double* |Detections with a confidence value larger than this value will be displayed. Default `0.5`.|
+|`nms_threshold`|*Double*|Non-Maximum suppresion area threshold ratio to merge proposals. Default `0.45`.|
+|`network_definition_file`|*String*|Network architecture definition configuration file. Default `yolov3.cfg`.|
+|`pretrained_model_file`|*String*|Path to pretrained model. Default `yolov3.weights`.|
+|`camera_id`|*String*|Camera workspace. Default `/`.|
+|`image_src`|*String*|Image source topic. Default `/image_raw`.|
+
+### Subscribed topics
+
+|Topic|Type|Objective|
+------|----|---------
+|`/image_raw`|`sensor_msgs/Image`|Source image stream to perform detection.|
+|`/config/Yolo3`|`autoware_msgs/ConfigSsd`|Configuration adjustment for threshold.|
+
+### Published topics
+
+|Topic|Type|Objective|
+------|----|---------
+|`/obj_car/image_obj`|`autoware_msgs/image_obj`|Contains the coordinates of the bounding box in image coordinates for objects detected as vehicles.|
+|`/obj_person/image_obj`|`autoware_msgs/image_obj`|Contains the coordinates of the bounding box in image coordinates for objects detected as pedestrian.|
+
+### Video
+
+[![Yolo v3 Autoware](https://img.youtube.com/vi/pO4vM4ehI98/0.jpg)](https://www.youtube.com/watch?v=pO4vM4ehI98)
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3-voc.cfg b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3-voc.cfg
new file mode 100644
index 00000000000..3f3e8dfb31b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3-voc.cfg
@@ -0,0 +1,785 @@
+[net]
+# Testing
+ batch=1
+ subdivisions=1
+# Training
+# batch=64
+# subdivisions=16
+width=416
+height=416
+channels=3
+momentum=0.9
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.001
+burn_in=1000
+max_batches = 50200
+policy=steps
+steps=40000,45000
+scales=.1,.1
+
+
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=leaky
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+######################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=75
+activation=linear
+
+[yolo]
+mask = 6,7,8
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=20
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 61
+
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=75
+activation=linear
+
+[yolo]
+mask = 3,4,5
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=20
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 36
+
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=75
+activation=linear
+
+[yolo]
+mask = 0,1,2
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=20
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3.cfg b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3.cfg
new file mode 100644
index 00000000000..5f3ab621302
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/cfg/yolov3.cfg
@@ -0,0 +1,789 @@
+[net]
+# Testing
+batch=1
+subdivisions=1
+# Training
+# batch=64
+# subdivisions=16
+width=416
+height=416
+channels=3
+momentum=0.9
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.001
+burn_in=1000
+max_batches = 500200
+policy=steps
+steps=400000,450000
+scales=.1,.1
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=leaky
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=1
+pad=1
+activation=leaky
+
+[shortcut]
+from=-3
+activation=linear
+
+######################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 61
+
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = -1, 36
+
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .5
+truth_thresh = 1
+random=1
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_kernels.cu
new file mode 100644
index 00000000000..5852eb58e0e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_kernels.cu
@@ -0,0 +1,200 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "activations.h"
+#include "cuda.h"
+}
+
+
+__device__ float lhtan_activate_kernel(float x)
+{
+ if(x < 0) return .001f*x;
+ if(x > 1) return .001f*(x-1.f) + 1.f;
+ return x;
+}
+__device__ float lhtan_gradient_kernel(float x)
+{
+ if(x > 0 && x < 1) return 1;
+ return .001;
+}
+
+__device__ float hardtan_activate_kernel(float x)
+{
+ if (x < -1) return -1;
+ if (x > 1) return 1;
+ return x;
+}
+__device__ float linear_activate_kernel(float x){return x;}
+__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
+__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
+__device__ float relu_activate_kernel(float x){return x*(x>0);}
+__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
+__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
+__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
+__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
+__device__ float tanh_activate_kernel(float x){return (2.f/(1 + expf(-2*x)) - 1);}
+__device__ float plse_activate_kernel(float x)
+{
+ if(x < -4) return .01f * (x + 4);
+ if(x > 4) return .01f * (x - 4) + 1;
+ return .125f*x + .5f;
+}
+__device__ float stair_activate_kernel(float x)
+{
+ int n = floorf(x);
+ if (n%2 == 0) return floorf(x/2);
+ else return (x - n) + floorf(x/2);
+}
+
+
+__device__ float hardtan_gradient_kernel(float x)
+{
+ if (x > -1 && x < 1) return 1;
+ return 0;
+}
+__device__ float linear_gradient_kernel(float x){return 1;}
+__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
+__device__ float loggy_gradient_kernel(float x)
+{
+ float y = (x+1)/2;
+ return 2*(1-y)*y;
+}
+__device__ float relu_gradient_kernel(float x){return (x>0);}
+__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
+__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
+__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
+__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
+__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
+__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
+__device__ float stair_gradient_kernel(float x)
+{
+ if (floorf(x) == x) return 0;
+ return 1;
+}
+
+__device__ float activate_kernel(float x, ACTIVATION a)
+{
+ switch(a){
+ case LINEAR:
+ return linear_activate_kernel(x);
+ case LOGISTIC:
+ return logistic_activate_kernel(x);
+ case LOGGY:
+ return loggy_activate_kernel(x);
+ case RELU:
+ return relu_activate_kernel(x);
+ case ELU:
+ return elu_activate_kernel(x);
+ case RELIE:
+ return relie_activate_kernel(x);
+ case RAMP:
+ return ramp_activate_kernel(x);
+ case LEAKY:
+ return leaky_activate_kernel(x);
+ case TANH:
+ return tanh_activate_kernel(x);
+ case PLSE:
+ return plse_activate_kernel(x);
+ case STAIR:
+ return stair_activate_kernel(x);
+ case HARDTAN:
+ return hardtan_activate_kernel(x);
+ case LHTAN:
+ return lhtan_activate_kernel(x);
+ }
+ return 0;
+}
+
+__device__ float gradient_kernel(float x, ACTIVATION a)
+{
+ switch(a){
+ case LINEAR:
+ return linear_gradient_kernel(x);
+ case LOGISTIC:
+ return logistic_gradient_kernel(x);
+ case LOGGY:
+ return loggy_gradient_kernel(x);
+ case RELU:
+ return relu_gradient_kernel(x);
+ case ELU:
+ return elu_gradient_kernel(x);
+ case RELIE:
+ return relie_gradient_kernel(x);
+ case RAMP:
+ return ramp_gradient_kernel(x);
+ case LEAKY:
+ return leaky_gradient_kernel(x);
+ case TANH:
+ return tanh_gradient_kernel(x);
+ case PLSE:
+ return plse_gradient_kernel(x);
+ case STAIR:
+ return stair_gradient_kernel(x);
+ case HARDTAN:
+ return hardtan_gradient_kernel(x);
+ case LHTAN:
+ return lhtan_gradient_kernel(x);
+ }
+ return 0;
+}
+
+__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ int i = id % s;
+ int b = id / s;
+ float x1 = x[b*s + i];
+ float x2 = x[b*s + s/2 + i];
+ if(id < n) {
+ float de = dy[id];
+ dx[b*s + i] = x2*de;
+ dx[b*s + s/2 + i] = x1*de;
+ }
+}
+
+extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
+{
+ binary_gradient_array_kernel<<>>(x, dx, n/2, size, a, y);
+ check_error(cudaPeekAtLastError());
+}
+__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ int i = id % s;
+ int b = id / s;
+ float x1 = x[b*s + i];
+ float x2 = x[b*s + s/2 + i];
+ if(id < n) y[id] = x1*x2;
+}
+
+extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
+{
+ binary_activate_array_kernel<<>>(x, n/2, size, a, y);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n) x[i] = activate_kernel(x[i], a);
+}
+
+__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n) delta[i] *= gradient_kernel(x[i], a);
+}
+
+extern "C" void activate_array_gpu(float *x, int n, ACTIVATION a)
+{
+ activate_array_kernel<<>>(x, n, a);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta)
+{
+ gradient_array_kernel<<>>(x, n, a, delta);
+ check_error(cudaPeekAtLastError());
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.c
new file mode 100644
index 00000000000..b4ba953967b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.c
@@ -0,0 +1,63 @@
+#include "activation_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+#include
+#include
+
+layer make_activation_layer(int batch, int inputs, ACTIVATION activation)
+{
+ layer l = {0};
+ l.type = ACTIVE;
+
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.batch=batch;
+
+ l.output = calloc(batch*inputs, sizeof(float*));
+ l.delta = calloc(batch*inputs, sizeof(float*));
+
+ l.forward = forward_activation_layer;
+ l.backward = backward_activation_layer;
+#ifdef GPU
+ l.forward_gpu = forward_activation_layer_gpu;
+ l.backward_gpu = backward_activation_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, inputs*batch);
+ l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+#endif
+ l.activation = activation;
+ fprintf(stderr, "Activation Layer: %d inputs\n", inputs);
+ return l;
+}
+
+void forward_activation_layer(layer l, network net)
+{
+ copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+}
+
+void backward_activation_layer(layer l, network net)
+{
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+ copy_cpu(l.outputs*l.batch, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void forward_activation_layer_gpu(layer l, network net)
+{
+ copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
+}
+
+void backward_activation_layer_gpu(layer l, network net)
+{
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+ copy_gpu(l.outputs*l.batch, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.h
new file mode 100644
index 00000000000..42118a84e83
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activation_layer.h
@@ -0,0 +1,19 @@
+#ifndef ACTIVATION_LAYER_H
+#define ACTIVATION_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_activation_layer(int batch, int inputs, ACTIVATION activation);
+
+void forward_activation_layer(layer l, network net);
+void backward_activation_layer(layer l, network net);
+
+#ifdef GPU
+void forward_activation_layer_gpu(layer l, network net);
+void backward_activation_layer_gpu(layer l, network net);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.c
new file mode 100644
index 00000000000..0cbb2f5546e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.c
@@ -0,0 +1,143 @@
+#include "activations.h"
+
+#include
+#include
+#include
+#include
+
+char *get_activation_string(ACTIVATION a)
+{
+ switch(a){
+ case LOGISTIC:
+ return "logistic";
+ case LOGGY:
+ return "loggy";
+ case RELU:
+ return "relu";
+ case ELU:
+ return "elu";
+ case RELIE:
+ return "relie";
+ case RAMP:
+ return "ramp";
+ case LINEAR:
+ return "linear";
+ case TANH:
+ return "tanh";
+ case PLSE:
+ return "plse";
+ case LEAKY:
+ return "leaky";
+ case STAIR:
+ return "stair";
+ case HARDTAN:
+ return "hardtan";
+ case LHTAN:
+ return "lhtan";
+ default:
+ break;
+ }
+ return "relu";
+}
+
+ACTIVATION get_activation(char *s)
+{
+ if (strcmp(s, "logistic")==0) return LOGISTIC;
+ if (strcmp(s, "loggy")==0) return LOGGY;
+ if (strcmp(s, "relu")==0) return RELU;
+ if (strcmp(s, "elu")==0) return ELU;
+ if (strcmp(s, "relie")==0) return RELIE;
+ if (strcmp(s, "plse")==0) return PLSE;
+ if (strcmp(s, "hardtan")==0) return HARDTAN;
+ if (strcmp(s, "lhtan")==0) return LHTAN;
+ if (strcmp(s, "linear")==0) return LINEAR;
+ if (strcmp(s, "ramp")==0) return RAMP;
+ if (strcmp(s, "leaky")==0) return LEAKY;
+ if (strcmp(s, "tanh")==0) return TANH;
+ if (strcmp(s, "stair")==0) return STAIR;
+ fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
+ return RELU;
+}
+
+float activate(float x, ACTIVATION a)
+{
+ switch(a){
+ case LINEAR:
+ return linear_activate(x);
+ case LOGISTIC:
+ return logistic_activate(x);
+ case LOGGY:
+ return loggy_activate(x);
+ case RELU:
+ return relu_activate(x);
+ case ELU:
+ return elu_activate(x);
+ case RELIE:
+ return relie_activate(x);
+ case RAMP:
+ return ramp_activate(x);
+ case LEAKY:
+ return leaky_activate(x);
+ case TANH:
+ return tanh_activate(x);
+ case PLSE:
+ return plse_activate(x);
+ case STAIR:
+ return stair_activate(x);
+ case HARDTAN:
+ return hardtan_activate(x);
+ case LHTAN:
+ return lhtan_activate(x);
+ }
+ return 0;
+}
+
+void activate_array(float *x, const int n, const ACTIVATION a)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ x[i] = activate(x[i], a);
+ }
+}
+
+float gradient(float x, ACTIVATION a)
+{
+ switch(a){
+ case LINEAR:
+ return linear_gradient(x);
+ case LOGISTIC:
+ return logistic_gradient(x);
+ case LOGGY:
+ return loggy_gradient(x);
+ case RELU:
+ return relu_gradient(x);
+ case ELU:
+ return elu_gradient(x);
+ case RELIE:
+ return relie_gradient(x);
+ case RAMP:
+ return ramp_gradient(x);
+ case LEAKY:
+ return leaky_gradient(x);
+ case TANH:
+ return tanh_gradient(x);
+ case PLSE:
+ return plse_gradient(x);
+ case STAIR:
+ return stair_gradient(x);
+ case HARDTAN:
+ return hardtan_gradient(x);
+ case LHTAN:
+ return lhtan_gradient(x);
+ }
+ return 0;
+}
+
+void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ delta[i] *= gradient(x[i], a);
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.h
new file mode 100644
index 00000000000..d456dbe3c2b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/activations.h
@@ -0,0 +1,85 @@
+#ifndef ACTIVATIONS_H
+#define ACTIVATIONS_H
+#include "darknet.h"
+#include "cuda.h"
+#include "math.h"
+
+ACTIVATION get_activation(char *s);
+
+char *get_activation_string(ACTIVATION a);
+float activate(float x, ACTIVATION a);
+float gradient(float x, ACTIVATION a);
+void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta);
+void activate_array(float *x, const int n, const ACTIVATION a);
+#ifdef GPU
+void activate_array_gpu(float *x, int n, ACTIVATION a);
+void gradient_array_gpu(float *x, int n, ACTIVATION a, float *delta);
+#endif
+
+static inline float stair_activate(float x)
+{
+ int n = floor(x);
+ if (n%2 == 0) return floor(x/2.);
+ else return (x - n) + floor(x/2.);
+}
+static inline float hardtan_activate(float x)
+{
+ if (x < -1) return -1;
+ if (x > 1) return 1;
+ return x;
+}
+static inline float linear_activate(float x){return x;}
+static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
+static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
+static inline float relu_activate(float x){return x*(x>0);}
+static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
+static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
+static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
+static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
+static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
+static inline float plse_activate(float x)
+{
+ if(x < -4) return .01 * (x + 4);
+ if(x > 4) return .01 * (x - 4) + 1;
+ return .125*x + .5;
+}
+
+static inline float lhtan_activate(float x)
+{
+ if(x < 0) return .001*x;
+ if(x > 1) return .001*(x-1) + 1;
+ return x;
+}
+static inline float lhtan_gradient(float x)
+{
+ if(x > 0 && x < 1) return 1;
+ return .001;
+}
+
+static inline float hardtan_gradient(float x)
+{
+ if (x > -1 && x < 1) return 1;
+ return 0;
+}
+static inline float linear_gradient(float x){return 1;}
+static inline float logistic_gradient(float x){return (1-x)*x;}
+static inline float loggy_gradient(float x)
+{
+ float y = (x+1.)/2.;
+ return 2*(1-y)*y;
+}
+static inline float stair_gradient(float x)
+{
+ if (floor(x) == x) return 0;
+ return 1;
+}
+static inline float relu_gradient(float x){return (x>0);}
+static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
+static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
+static inline float ramp_gradient(float x){return (x>0)+.1;}
+static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
+static inline float tanh_gradient(float x){return 1-x*x;}
+static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.c
new file mode 100644
index 00000000000..83034dbecf4
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.c
@@ -0,0 +1,71 @@
+#include "avgpool_layer.h"
+#include "cuda.h"
+#include
+
+avgpool_layer make_avgpool_layer(int batch, int w, int h, int c)
+{
+ fprintf(stderr, "avg %4d x%4d x%4d -> %4d\n", w, h, c, c);
+ avgpool_layer l = {0};
+ l.type = AVGPOOL;
+ l.batch = batch;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.out_w = 1;
+ l.out_h = 1;
+ l.out_c = c;
+ l.outputs = l.out_c;
+ l.inputs = h*w*c;
+ int output_size = l.outputs * batch;
+ l.output = calloc(output_size, sizeof(float));
+ l.delta = calloc(output_size, sizeof(float));
+ l.forward = forward_avgpool_layer;
+ l.backward = backward_avgpool_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_avgpool_layer_gpu;
+ l.backward_gpu = backward_avgpool_layer_gpu;
+ l.output_gpu = cuda_make_array(l.output, output_size);
+ l.delta_gpu = cuda_make_array(l.delta, output_size);
+ #endif
+ return l;
+}
+
+void resize_avgpool_layer(avgpool_layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+ l->inputs = h*w*l->c;
+}
+
+void forward_avgpool_layer(const avgpool_layer l, network net)
+{
+ int b,i,k;
+
+ for(b = 0; b < l.batch; ++b){
+ for(k = 0; k < l.c; ++k){
+ int out_index = k + b*l.c;
+ l.output[out_index] = 0;
+ for(i = 0; i < l.h*l.w; ++i){
+ int in_index = i + l.h*l.w*(k + b*l.c);
+ l.output[out_index] += net.input[in_index];
+ }
+ l.output[out_index] /= l.h*l.w;
+ }
+ }
+}
+
+void backward_avgpool_layer(const avgpool_layer l, network net)
+{
+ int b,i,k;
+
+ for(b = 0; b < l.batch; ++b){
+ for(k = 0; k < l.c; ++k){
+ int out_index = k + b*l.c;
+ for(i = 0; i < l.h*l.w; ++i){
+ int in_index = i + l.h*l.w*(k + b*l.c);
+ net.delta[in_index] += l.delta[out_index] / (l.h*l.w);
+ }
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.h
new file mode 100644
index 00000000000..3bd356c4e39
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer.h
@@ -0,0 +1,23 @@
+#ifndef AVGPOOL_LAYER_H
+#define AVGPOOL_LAYER_H
+
+#include "image.h"
+#include "cuda.h"
+#include "layer.h"
+#include "network.h"
+
+typedef layer avgpool_layer;
+
+image get_avgpool_image(avgpool_layer l);
+avgpool_layer make_avgpool_layer(int batch, int w, int h, int c);
+void resize_avgpool_layer(avgpool_layer *l, int w, int h);
+void forward_avgpool_layer(const avgpool_layer l, network net);
+void backward_avgpool_layer(const avgpool_layer l, network net);
+
+#ifdef GPU
+void forward_avgpool_layer_gpu(avgpool_layer l, network net);
+void backward_avgpool_layer_gpu(avgpool_layer l, network net);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer_kernels.cu
new file mode 100644
index 00000000000..a7eca3aeae9
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/avgpool_layer_kernels.cu
@@ -0,0 +1,61 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "avgpool_layer.h"
+#include "cuda.h"
+}
+
+__global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= n) return;
+
+ int k = id % c;
+ id /= c;
+ int b = id;
+
+ int i;
+ int out_index = (k + c*b);
+ output[out_index] = 0;
+ for(i = 0; i < w*h; ++i){
+ int in_index = i + h*w*(k + b*c);
+ output[out_index] += input[in_index];
+ }
+ output[out_index] /= w*h;
+}
+
+__global__ void backward_avgpool_layer_kernel(int n, int w, int h, int c, float *in_delta, float *out_delta)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= n) return;
+
+ int k = id % c;
+ id /= c;
+ int b = id;
+
+ int i;
+ int out_index = (k + c*b);
+ for(i = 0; i < w*h; ++i){
+ int in_index = i + h*w*(k + b*c);
+ in_delta[in_index] += out_delta[out_index] / (w*h);
+ }
+}
+
+extern "C" void forward_avgpool_layer_gpu(avgpool_layer layer, network net)
+{
+ size_t n = layer.c*layer.batch;
+
+ forward_avgpool_layer_kernel<<>>(n, layer.w, layer.h, layer.c, net.input_gpu, layer.output_gpu);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void backward_avgpool_layer_gpu(avgpool_layer layer, network net)
+{
+ size_t n = layer.c*layer.batch;
+
+ backward_avgpool_layer_kernel<<>>(n, layer.w, layer.h, layer.c, net.delta_gpu, layer.delta_gpu);
+ check_error(cudaPeekAtLastError());
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.c
new file mode 100644
index 00000000000..ebff387cc4b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.c
@@ -0,0 +1,279 @@
+#include "convolutional_layer.h"
+#include "batchnorm_layer.h"
+#include "blas.h"
+#include
+
+layer make_batchnorm_layer(int batch, int w, int h, int c)
+{
+ fprintf(stderr, "Batch Normalization Layer: %d x %d x %d image\n", w,h,c);
+ layer l = {0};
+ l.type = BATCHNORM;
+ l.batch = batch;
+ l.h = l.out_h = h;
+ l.w = l.out_w = w;
+ l.c = l.out_c = c;
+ l.output = calloc(h * w * c * batch, sizeof(float));
+ l.delta = calloc(h * w * c * batch, sizeof(float));
+ l.inputs = w*h*c;
+ l.outputs = l.inputs;
+
+ l.scales = calloc(c, sizeof(float));
+ l.scale_updates = calloc(c, sizeof(float));
+ l.biases = calloc(c, sizeof(float));
+ l.bias_updates = calloc(c, sizeof(float));
+ int i;
+ for(i = 0; i < c; ++i){
+ l.scales[i] = 1;
+ }
+
+ l.mean = calloc(c, sizeof(float));
+ l.variance = calloc(c, sizeof(float));
+
+ l.rolling_mean = calloc(c, sizeof(float));
+ l.rolling_variance = calloc(c, sizeof(float));
+
+ l.forward = forward_batchnorm_layer;
+ l.backward = backward_batchnorm_layer;
+#ifdef GPU
+ l.forward_gpu = forward_batchnorm_layer_gpu;
+ l.backward_gpu = backward_batchnorm_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, h * w * c * batch);
+ l.delta_gpu = cuda_make_array(l.delta, h * w * c * batch);
+
+ l.biases_gpu = cuda_make_array(l.biases, c);
+ l.bias_updates_gpu = cuda_make_array(l.bias_updates, c);
+
+ l.scales_gpu = cuda_make_array(l.scales, c);
+ l.scale_updates_gpu = cuda_make_array(l.scale_updates, c);
+
+ l.mean_gpu = cuda_make_array(l.mean, c);
+ l.variance_gpu = cuda_make_array(l.variance, c);
+
+ l.rolling_mean_gpu = cuda_make_array(l.mean, c);
+ l.rolling_variance_gpu = cuda_make_array(l.variance, c);
+
+ l.mean_delta_gpu = cuda_make_array(l.mean, c);
+ l.variance_delta_gpu = cuda_make_array(l.variance, c);
+
+ l.x_gpu = cuda_make_array(l.output, l.batch*l.outputs);
+ l.x_norm_gpu = cuda_make_array(l.output, l.batch*l.outputs);
+ #ifdef CUDNN
+ cudnnCreateTensorDescriptor(&l.normTensorDesc);
+ cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+ cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w);
+ cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1);
+
+ #endif
+#endif
+ return l;
+}
+
+void backward_scale_cpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ int i,b,f;
+ for(f = 0; f < n; ++f){
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int index = i + size*(f + n*b);
+ sum += delta[index] * x_norm[index];
+ }
+ }
+ scale_updates[f] += sum;
+ }
+}
+
+void mean_delta_cpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+
+ int i,j,k;
+ for(i = 0; i < filters; ++i){
+ mean_delta[i] = 0;
+ for (j = 0; j < batch; ++j) {
+ for (k = 0; k < spatial; ++k) {
+ int index = j*filters*spatial + i*spatial + k;
+ mean_delta[i] += delta[index];
+ }
+ }
+ mean_delta[i] *= (-1./sqrt(variance[i] + .00001f));
+ }
+}
+void variance_delta_cpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
+{
+
+ int i,j,k;
+ for(i = 0; i < filters; ++i){
+ variance_delta[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ variance_delta[i] += delta[index]*(x[index] - mean[i]);
+ }
+ }
+ variance_delta[i] *= -.5 * pow(variance[i] + .00001f, (float)(-3./2.));
+ }
+}
+void normalize_delta_cpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
+{
+ int f, j, k;
+ for(j = 0; j < batch; ++j){
+ for(f = 0; f < filters; ++f){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + f*spatial + k;
+ delta[index] = delta[index] * 1./(sqrt(variance[f] + .00001f)) + variance_delta[f] * 2. * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
+ }
+ }
+ }
+}
+
+void resize_batchnorm_layer(layer *layer, int w, int h)
+{
+ fprintf(stderr, "Not implemented\n");
+}
+
+void forward_batchnorm_layer(layer l, network net)
+{
+ if(l.type == BATCHNORM) copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
+ copy_cpu(l.outputs*l.batch, l.output, 1, l.x, 1);
+ if(net.train){
+ mean_cpu(l.output, l.batch, l.out_c, l.out_h*l.out_w, l.mean);
+ variance_cpu(l.output, l.mean, l.batch, l.out_c, l.out_h*l.out_w, l.variance);
+
+ scal_cpu(l.out_c, .99, l.rolling_mean, 1);
+ axpy_cpu(l.out_c, .01, l.mean, 1, l.rolling_mean, 1);
+ scal_cpu(l.out_c, .99, l.rolling_variance, 1);
+ axpy_cpu(l.out_c, .01, l.variance, 1, l.rolling_variance, 1);
+
+ normalize_cpu(l.output, l.mean, l.variance, l.batch, l.out_c, l.out_h*l.out_w);
+ copy_cpu(l.outputs*l.batch, l.output, 1, l.x_norm, 1);
+ } else {
+ normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
+ }
+ scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
+ add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
+}
+
+void backward_batchnorm_layer(layer l, network net)
+{
+ if(!net.train){
+ l.mean = l.rolling_mean;
+ l.variance = l.rolling_variance;
+ }
+ backward_bias(l.bias_updates, l.delta, l.batch, l.out_c, l.out_w*l.out_h);
+ backward_scale_cpu(l.x_norm, l.delta, l.batch, l.out_c, l.out_w*l.out_h, l.scale_updates);
+
+ scale_bias(l.delta, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
+
+ mean_delta_cpu(l.delta, l.variance, l.batch, l.out_c, l.out_w*l.out_h, l.mean_delta);
+ variance_delta_cpu(l.x, l.delta, l.mean, l.variance, l.batch, l.out_c, l.out_w*l.out_h, l.variance_delta);
+ normalize_delta_cpu(l.x, l.mean, l.variance, l.mean_delta, l.variance_delta, l.batch, l.out_c, l.out_w*l.out_h, l.delta);
+ if(l.type == BATCHNORM) copy_cpu(l.outputs*l.batch, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void pull_batchnorm_layer(layer l)
+{
+ cuda_pull_array(l.scales_gpu, l.scales, l.c);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+void push_batchnorm_layer(layer l)
+{
+ cuda_push_array(l.scales_gpu, l.scales, l.c);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.c);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.c);
+}
+
+void forward_batchnorm_layer_gpu(layer l, network net)
+{
+ if(l.type == BATCHNORM) copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1);
+ if (net.train) {
+#ifdef CUDNN
+ float one = 1;
+ float zero = 0;
+ cudnnBatchNormalizationForwardTraining(cudnn_handle(),
+ CUDNN_BATCHNORM_SPATIAL,
+ &one,
+ &zero,
+ l.dstTensorDesc,
+ l.x_gpu,
+ l.dstTensorDesc,
+ l.output_gpu,
+ l.normTensorDesc,
+ l.scales_gpu,
+ l.biases_gpu,
+ .01,
+ l.rolling_mean_gpu,
+ l.rolling_variance_gpu,
+ .00001,
+ l.mean_gpu,
+ l.variance_gpu);
+#else
+ fast_mean_gpu(l.output_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.mean_gpu);
+ fast_variance_gpu(l.output_gpu, l.mean_gpu, l.batch, l.out_c, l.out_h*l.out_w, l.variance_gpu);
+
+ scal_gpu(l.out_c, .99, l.rolling_mean_gpu, 1);
+ axpy_gpu(l.out_c, .01, l.mean_gpu, 1, l.rolling_mean_gpu, 1);
+ scal_gpu(l.out_c, .99, l.rolling_variance_gpu, 1);
+ axpy_gpu(l.out_c, .01, l.variance_gpu, 1, l.rolling_variance_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.output_gpu, 1, l.x_gpu, 1);
+ normalize_gpu(l.output_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+ copy_gpu(l.outputs*l.batch, l.output_gpu, 1, l.x_norm_gpu, 1);
+
+ scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+ add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+#endif
+ } else {
+ normalize_gpu(l.output_gpu, l.rolling_mean_gpu, l.rolling_variance_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+ scale_bias_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+ add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+ }
+
+}
+
+void backward_batchnorm_layer_gpu(layer l, network net)
+{
+ if(!net.train){
+ l.mean_gpu = l.rolling_mean_gpu;
+ l.variance_gpu = l.rolling_variance_gpu;
+ }
+#ifdef CUDNN
+ float one = 1;
+ float zero = 0;
+ cudnnBatchNormalizationBackward(cudnn_handle(),
+ CUDNN_BATCHNORM_SPATIAL,
+ &one,
+ &zero,
+ &one,
+ &one,
+ l.dstTensorDesc,
+ l.x_gpu,
+ l.dstTensorDesc,
+ l.delta_gpu,
+ l.dstTensorDesc,
+ l.x_norm_gpu,
+ l.normTensorDesc,
+ l.scales_gpu,
+ l.scale_updates_gpu,
+ l.bias_updates_gpu,
+ .00001,
+ l.mean_gpu,
+ l.variance_gpu);
+ copy_gpu(l.outputs*l.batch, l.x_norm_gpu, 1, l.delta_gpu, 1);
+#else
+ backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+ backward_scale_gpu(l.x_norm_gpu, l.delta_gpu, l.batch, l.out_c, l.out_w*l.out_h, l.scale_updates_gpu);
+
+ scale_bias_gpu(l.delta_gpu, l.scales_gpu, l.batch, l.out_c, l.out_h*l.out_w);
+
+ fast_mean_delta_gpu(l.delta_gpu, l.variance_gpu, l.batch, l.out_c, l.out_w*l.out_h, l.mean_delta_gpu);
+ fast_variance_delta_gpu(l.x_gpu, l.delta_gpu, l.mean_gpu, l.variance_gpu, l.batch, l.out_c, l.out_w*l.out_h, l.variance_delta_gpu);
+ normalize_delta_gpu(l.x_gpu, l.mean_gpu, l.variance_gpu, l.mean_delta_gpu, l.variance_delta_gpu, l.batch, l.out_c, l.out_w*l.out_h, l.delta_gpu);
+#endif
+ if(l.type == BATCHNORM) copy_gpu(l.outputs*l.batch, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.h
new file mode 100644
index 00000000000..25a18a3c8f2
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/batchnorm_layer.h
@@ -0,0 +1,19 @@
+#ifndef BATCHNORM_LAYER_H
+#define BATCHNORM_LAYER_H
+
+#include "image.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_batchnorm_layer(int batch, int w, int h, int c);
+void forward_batchnorm_layer(layer l, network net);
+void backward_batchnorm_layer(layer l, network net);
+
+#ifdef GPU
+void forward_batchnorm_layer_gpu(layer l, network net);
+void backward_batchnorm_layer_gpu(layer l, network net);
+void pull_batchnorm_layer(layer l);
+void push_batchnorm_layer(layer l);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.c
new file mode 100644
index 00000000000..9e1604449ba
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.c
@@ -0,0 +1,351 @@
+#include "blas.h"
+
+#include
+#include
+#include
+#include
+#include
+#include
+void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
+{
+ int b,i,j,k;
+ int out_c = c/(stride*stride);
+
+ for(b = 0; b < batch; ++b){
+ for(k = 0; k < c; ++k){
+ for(j = 0; j < h; ++j){
+ for(i = 0; i < w; ++i){
+ int in_index = i + w*(j + h*(k + c*b));
+ int c2 = k % out_c;
+ int offset = k / out_c;
+ int w2 = i*stride + offset % stride;
+ int h2 = j*stride + offset / stride;
+ int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
+ if(forward) out[out_index] = x[in_index];
+ else out[in_index] = x[out_index];
+ }
+ }
+ }
+ }
+}
+
+void flatten(float *x, int size, int layers, int batch, int forward)
+{
+ float *swap = calloc(size*layers*batch, sizeof(float));
+ int i,c,b;
+ for(b = 0; b < batch; ++b){
+ for(c = 0; c < layers; ++c){
+ for(i = 0; i < size; ++i){
+ int i1 = b*layers*size + c*size + i;
+ int i2 = b*layers*size + i*layers + c;
+ if (forward) swap[i2] = x[i1];
+ else swap[i1] = x[i2];
+ }
+ }
+ }
+ memcpy(x, swap, size*layers*batch*sizeof(float));
+ free(swap);
+}
+
+void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
+ }
+}
+
+void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ if(da) da[i] += dc[i] * s[i];
+ if(db) db[i] += dc[i] * (1-s[i]);
+ ds[i] += dc[i] * (a[i] - b[i]);
+ }
+}
+
+void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
+{
+ int stride = w1/w2;
+ int sample = w2/w1;
+ assert(stride == h1/h2);
+ assert(sample == h2/h1);
+ if(stride < 1) stride = 1;
+ if(sample < 1) sample = 1;
+ int minw = (w1 < w2) ? w1 : w2;
+ int minh = (h1 < h2) ? h1 : h2;
+ int minc = (c1 < c2) ? c1 : c2;
+
+ int i,j,k,b;
+ for(b = 0; b < batch; ++b){
+ for(k = 0; k < minc; ++k){
+ for(j = 0; j < minh; ++j){
+ for(i = 0; i < minw; ++i){
+ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
+ int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
+ out[out_index] = s1*out[out_index] + s2*add[add_index];
+ }
+ }
+ }
+ }
+}
+
+void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
+{
+ float scale = 1./(batch * spatial);
+ int i,j,k;
+ for(i = 0; i < filters; ++i){
+ mean[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ mean[i] += x[index];
+ }
+ }
+ mean[i] *= scale;
+ }
+}
+
+void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ float scale = 1./(batch * spatial - 1);
+ int i,j,k;
+ for(i = 0; i < filters; ++i){
+ variance[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ variance[i] += pow((x[index] - mean[i]), 2);
+ }
+ }
+ variance[i] *= scale;
+ }
+}
+
+void l2normalize_cpu(float *x, float *dx, int batch, int filters, int spatial)
+{
+ int b,f,i;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < spatial; ++i){
+ float sum = 0;
+ for(f = 0; f < filters; ++f){
+ int index = b*filters*spatial + f*spatial + i;
+ sum += powf(x[index], 2);
+ }
+ sum = sqrtf(sum);
+ for(f = 0; f < filters; ++f){
+ int index = b*filters*spatial + f*spatial + i;
+ x[index] /= sum;
+ dx[index] = (1 - x[index]) / sum;
+ }
+ }
+ }
+}
+
+
+void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
+{
+ int b, f, i;
+ for(b = 0; b < batch; ++b){
+ for(f = 0; f < filters; ++f){
+ for(i = 0; i < spatial; ++i){
+ int index = b*filters*spatial + f*spatial + i;
+ x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
+ }
+ }
+ }
+}
+
+void const_cpu(int N, float ALPHA, float *X, int INCX)
+{
+ int i;
+ for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
+}
+
+void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
+{
+ int i;
+ for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
+}
+
+void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
+{
+ int i;
+ for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
+}
+
+void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
+{
+ int i;
+ for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
+}
+
+void scal_cpu(int N, float ALPHA, float *X, int INCX)
+{
+ int i;
+ for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
+}
+
+void fill_cpu(int N, float ALPHA, float *X, int INCX)
+{
+ int i;
+ for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
+}
+
+void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ int i, j;
+ int index = 0;
+ for(j = 0; j < B; ++j) {
+ for(i = 0; i < NX; ++i){
+ if(X) X[j*NX + i] += OUT[index];
+ ++index;
+ }
+ for(i = 0; i < NY; ++i){
+ if(Y) Y[j*NY + i] += OUT[index];
+ ++index;
+ }
+ }
+}
+
+void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ int i, j;
+ int index = 0;
+ for(j = 0; j < B; ++j) {
+ for(i = 0; i < NX; ++i){
+ OUT[index++] = X[j*NX + i];
+ }
+ for(i = 0; i < NY; ++i){
+ OUT[index++] = Y[j*NY + i];
+ }
+ }
+}
+
+void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
+{
+ int i;
+ for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
+}
+
+void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
+{
+ int i;
+ for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
+}
+
+void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float diff = truth[i] - pred[i];
+ float abs_val = fabs(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff < 0) ? 1 : -1;
+ }
+ }
+}
+
+void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float diff = truth[i] - pred[i];
+ error[i] = fabs(diff);
+ delta[i] = diff > 0 ? 1 : -1;
+ }
+}
+
+void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float t = truth[i];
+ float p = pred[i];
+ error[i] = (t) ? -log(p) : 0;
+ delta[i] = t-p;
+ }
+}
+
+void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float t = truth[i];
+ float p = pred[i];
+ error[i] = -t*log(p) - (1-t)*log(1-p);
+ delta[i] = t-p;
+ }
+}
+
+void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+}
+
+float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
+{
+ int i;
+ float dot = 0;
+ for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
+ return dot;
+}
+
+void softmax(float *input, int n, float temp, int stride, float *output)
+{
+ int i;
+ float sum = 0;
+ float largest = -FLT_MAX;
+ for(i = 0; i < n; ++i){
+ if(input[i*stride] > largest) largest = input[i*stride];
+ }
+ for(i = 0; i < n; ++i){
+ float e = exp(input[i*stride]/temp - largest/temp);
+ sum += e;
+ output[i*stride] = e;
+ }
+ for(i = 0; i < n; ++i){
+ output[i*stride] /= sum;
+ }
+}
+
+
+void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
+{
+ int g, b;
+ for(b = 0; b < batch; ++b){
+ for(g = 0; g < groups; ++g){
+ softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
+ }
+ }
+}
+
+void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
+{
+ int i, j, k, b;
+ for(b = 0; b < batch; ++b){
+ for(k = 0; k < c; ++k){
+ for(j = 0; j < h*stride; ++j){
+ for(i = 0; i < w*stride; ++i){
+ int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
+ int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
+ if(forward) out[out_index] = scale*in[in_index];
+ else in[in_index] += scale*out[out_index];
+ }
+ }
+ }
+ }
+}
+
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.h
new file mode 100644
index 00000000000..707291dead0
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas.h
@@ -0,0 +1,105 @@
+#ifndef BLAS_H
+#define BLAS_H
+#include "darknet.h"
+
+void flatten(float *x, int size, int layers, int batch, int forward);
+void pm(int M, int N, float *A);
+float *random_matrix(int rows, int cols);
+void time_random_matrix(int TA, int TB, int m, int k, int n);
+void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out);
+
+void test_blas();
+
+void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT);
+void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT);
+void mult_add_into_cpu(int N, float *X, float *Y, float *Z);
+
+void const_cpu(int N, float ALPHA, float *X, int INCX);
+void constrain_gpu(int N, float ALPHA, float * X, int INCX);
+void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
+void mul_cpu(int N, float *X, int INCX, float *Y, int INCY);
+
+int test_gpu_blas();
+void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out);
+
+void mean_cpu(float *x, int batch, int filters, int spatial, float *mean);
+void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance);
+
+void scale_bias(float *output, float *scales, int batch, int n, int size);
+void backward_scale_cpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates);
+void mean_delta_cpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta);
+void variance_delta_cpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta);
+void normalize_delta_cpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta);
+void l2normalize_cpu(float *x, float *dx, int batch, int filters, int spatial);
+
+void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void l2_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void l1_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error);
+void weighted_sum_cpu(float *a, float *b, float *s, int num, float *c);
+void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc);
+
+void softmax(float *input, int n, float temp, int stride, float *output);
+void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output);
+void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out);
+
+#ifdef GPU
+#include "cuda.h"
+#include "tree.h"
+
+void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY);
+void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY);
+void copy_gpu(int N, float * X, int INCX, float * Y, int INCY);
+void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY);
+void add_gpu(int N, float ALPHA, float * X, int INCX);
+void supp_gpu(int N, float ALPHA, float * X, int INCX);
+void mask_gpu(int N, float * X, float mask_num, float * mask, float val);
+void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale);
+void const_gpu(int N, float ALPHA, float *X, int INCX);
+void pow_gpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
+void mul_gpu(int N, float *X, int INCX, float *Y, int INCY);
+
+void mean_gpu(float *x, int batch, int filters, int spatial, float *mean);
+void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance);
+void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
+void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial);
+
+void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta);
+
+void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta);
+void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta);
+
+void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance);
+void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean);
+void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out);
+void scale_bias_gpu(float *output, float *biases, int batch, int n, int size);
+void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates);
+void scale_bias_gpu(float *output, float *biases, int batch, int n, int size);
+void add_bias_gpu(float *output, float *biases, int batch, int n, int size);
+void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size);
+
+void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void l2_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void l1_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error);
+void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc);
+void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c);
+void mult_add_into_gpu(int num, float *a, float *b, float *c);
+void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT);
+void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT);
+
+void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out);
+
+void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output);
+void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t);
+void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t);
+
+void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out);
+void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier);
+void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out);
+
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas_kernels.cu
new file mode 100644
index 00000000000..47e82179170
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/blas_kernels.cu
@@ -0,0 +1,1035 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+#include
+
+extern "C" {
+#include "blas.h"
+#include "cuda.h"
+#include "utils.h"
+}
+
+__global__ void scale_bias_kernel(float *output, float *biases, int n, int size)
+{
+ int offset = blockIdx.x * blockDim.x + threadIdx.x;
+ int filter = blockIdx.y;
+ int batch = blockIdx.z;
+
+ if(offset < size) output[(batch*n+filter)*size + offset] *= biases[filter];
+}
+
+void scale_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ dim3 dimGrid((size-1)/BLOCK + 1, n, batch);
+ dim3 dimBlock(BLOCK, 1, 1);
+
+ scale_bias_kernel<<>>(output, biases, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_scale_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index]*x_norm[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) scale_updates[filter] += part[i];
+ }
+}
+
+void backward_scale_gpu(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates)
+{
+ backward_scale_kernel<<>>(x_norm, delta, batch, n, size, scale_updates);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void add_bias_kernel(float *output, float *biases, int batch, int n, int size)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= n*size*batch) return;
+ int i = index % size;
+ index /= size;
+ int j = index % n;
+ index /= n;
+ int k = index;
+
+ output[(k*n+j)*size + i] += biases[j];
+}
+
+void add_bias_gpu(float *output, float *biases, int batch, int n, int size)
+{
+ int num = n*size*batch;
+
+ add_bias_kernel<<>>(output, biases, batch, n, size);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void backward_bias_conn_kernel(float *bias_updates, float *delta, int batch, int n)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= n) return;
+ int b;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ int i = b*n + index;
+ sum += delta[i];
+ }
+ bias_updates[index] += sum;
+}
+
+__global__ void backward_bias_kernel(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ __shared__ float part[BLOCK];
+ int i,b;
+ int filter = blockIdx.x;
+ int p = threadIdx.x;
+ float sum = 0;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; i += BLOCK){
+ int index = p + i + size*(filter + n*b);
+ sum += (p+i < size) ? delta[index] : 0;
+ }
+ }
+ part[p] = sum;
+ __syncthreads();
+ if (p == 0) {
+ for(i = 0; i < BLOCK; ++i) bias_updates[filter] += part[i];
+ }
+}
+
+void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ if(size == 1){
+ backward_bias_conn_kernel<<>>(bias_updates, delta, batch, n);
+ }else{
+ backward_bias_kernel<<>>(bias_updates, delta, batch, n, size);
+ }
+ check_error(cudaPeekAtLastError());
+}
+
+/*
+__global__ void dot_kernel(float *output, float scale, int batch, int n, int size, float *delta)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ int f1 = index / n;
+ int f2 = index % n;
+ if (f2 <= f1) return;
+
+ float sum = 0;
+ float norm1 = 0;
+ float norm2 = 0;
+ int b, i;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ sum += output[i1] * output[i2];
+ norm1 += output[i1] * output[i1];
+ norm2 += output[i2] * output[i2];
+ }
+ }
+ norm1 = sqrt(norm1);
+ norm2 = sqrt(norm2);
+ float norm = norm1 * norm2;
+ sum = sum / norm;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < size; ++i){
+ int i1 = b * size * n + f1 * size + i;
+ int i2 = b * size * n + f2 * size + i;
+ delta[i1] += - scale * sum * output[i2] / norm;
+ delta[i2] += - scale * sum * output[i1] / norm;
+ }
+ }
+}
+
+void dot_error_gpu(layer l)
+{
+ dot_kernel<<>>(l.output_gpu, l.dot, l.batch, l.n, l.out_w * l.out_h, l.delta_gpu);
+ check_error(cudaPeekAtLastError());
+}
+*/
+
+
+__global__ void adam_kernel(int N, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= N) return;
+
+ float mhat = m[index] / (1.f - powf(B1, t));
+ float vhat = v[index] / (1.f - powf(B2, t));
+
+ x[index] = x[index] + rate * mhat / (sqrtf(vhat) + eps);
+}
+
+extern "C" void adam_gpu(int n, float *x, float *m, float *v, float B1, float B2, float rate, float eps, int t)
+{
+ adam_kernel<<>>(n, x, m, v, B1, B2, rate, eps, t);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t)
+{
+ scal_gpu(n, B1, m, 1);
+ scal_gpu(n, B2, v, 1);
+ axpy_gpu(n, -decay*batch, w, 1, d, 1);
+
+ axpy_gpu(n, (1-B1), d, 1, m, 1);
+ mul_gpu(n, d, 1, d, 1);
+ axpy_gpu(n, (1-B2), d, 1, v, 1);
+
+ adam_gpu(n, w, m, v, B1, B2, rate, eps, t);
+ fill_gpu(n, 0, d, 1);
+}
+
+__global__ void normalize_kernel(int N, float *x, float *mean, float *variance, int batch, int filters, int spatial)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= N) return;
+ int f = (index/spatial)%filters;
+
+ x[index] = (x[index] - mean[f])/(sqrtf(variance[f] + .00001f));
+}
+
+__global__ void normalize_delta_kernel(int N, float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= N) return;
+ int f = (index/spatial)%filters;
+
+ delta[index] = delta[index] * 1.f/(sqrtf(variance[f] + .00001f)) + variance_delta[f] * 2.f * (x[index] - mean[f]) / (spatial * batch) + mean_delta[f]/(spatial*batch);
+}
+
+extern "C" void normalize_delta_gpu(float *x, float *mean, float *variance, float *mean_delta, float *variance_delta, int batch, int filters, int spatial, float *delta)
+{
+ size_t N = batch*filters*spatial;
+ normalize_delta_kernel<<>>(N, x, mean, variance, mean_delta, variance_delta, batch, filters, spatial, delta);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= filters) return;
+ int j,k;
+ variance_delta[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ variance_delta[i] += delta[index]*(x[index] - mean[i]);
+ }
+ }
+ variance_delta[i] *= -.5f * powf(variance[i] + .00001f, (float)(-3.f/2.f));
+}
+
+__global__ void accumulate_kernel(float *x, int n, int groups, float *sum)
+{
+ int k;
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= groups) return;
+ sum[i] = 0;
+ for(k = 0; k < n; ++k){
+ sum[i] += x[k*groups + i];
+ }
+}
+
+__global__ void fast_mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+ local[id] += (i+id < spatial) ? delta[index] : 0;
+ }
+ }
+
+ __syncthreads();
+
+ if(id == 0){
+ mean_delta[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ mean_delta[filter] += local[i];
+ }
+ mean_delta[filter] *= (-1.f/sqrtf(variance[filter] + .00001f));
+ }
+}
+
+__global__ void fast_variance_delta_kernel(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+
+ local[id] += (i+id < spatial) ? delta[index]*(x[index] - mean[filter]) : 0;
+ }
+ }
+
+ __syncthreads();
+
+ if(id == 0){
+ variance_delta[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ variance_delta[filter] += local[i];
+ }
+ variance_delta[filter] *= -.5f * powf(variance[filter] + .00001f, (float)(-3.f/2.f));
+ }
+}
+
+
+__global__ void mean_delta_kernel(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= filters) return;
+ int j,k;
+ mean_delta[i] = 0;
+ for (j = 0; j < batch; ++j) {
+ for (k = 0; k < spatial; ++k) {
+ int index = j*filters*spatial + i*spatial + k;
+ mean_delta[i] += delta[index];
+ }
+ }
+ mean_delta[i] *= (-1.f/sqrtf(variance[i] + .00001f));
+}
+
+extern "C" void mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+ mean_delta_kernel<<>>(delta, variance, batch, filters, spatial, mean_delta);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fast_mean_delta_gpu(float *delta, float *variance, int batch, int filters, int spatial, float *mean_delta)
+{
+ fast_mean_delta_kernel<<>>(delta, variance, batch, filters, spatial, mean_delta);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fast_variance_delta_gpu(float *x, float *delta, float *mean, float *variance, int batch, int filters, int spatial, float *variance_delta)
+{
+ fast_variance_delta_kernel<<>>(x, delta, mean, variance, batch, filters, spatial, variance_delta);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
+{
+ float scale = 1.f/(batch * spatial);
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= filters) return;
+ int j,k;
+ mean[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ mean[i] += x[index];
+ }
+ }
+ mean[i] *= scale;
+}
+
+__global__ void variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ float scale = 1.f/(batch * spatial - 1);
+ int j,k;
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= filters) return;
+ variance[i] = 0;
+ for(j = 0; j < batch; ++j){
+ for(k = 0; k < spatial; ++k){
+ int index = j*filters*spatial + i*spatial + k;
+ variance[i] += powf((x[index] - mean[i]), 2);
+ }
+ }
+ variance[i] *= scale;
+}
+
+__global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i >= N) return;
+ int in_index = i;
+ int in_w = i%w;
+ i = i/w;
+ int in_h = i%h;
+ i = i/h;
+ int in_c = i%c;
+ i = i/c;
+ int b = i%batch;
+
+ int out_c = c/(stride*stride);
+
+ int c2 = in_c % out_c;
+ int offset = in_c / out_c;
+ int w2 = in_w*stride + offset % stride;
+ int h2 = in_h*stride + offset / stride;
+ //printf("%d\n", offset);
+ int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
+
+ // printf("%d %d %d\n", w2, h2, c2);
+ //printf("%d %d\n", in_index, out_index);
+ //if(out_index >= N || out_index < 0) printf("bad bad bad \n");
+
+ if(forward) out[out_index] = x[in_index];
+ else out[in_index] = x[out_index];
+ //if(forward) out[1] = x[1];
+ //else out[0] = x[0];
+}
+
+__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
+}
+
+__global__ void pow_kernel(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) Y[i*INCY] = pow(X[i*INCX], ALPHA);
+}
+
+__global__ void const_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] = ALPHA;
+}
+
+__global__ void constrain_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] = fminf(ALPHA, fmaxf(-ALPHA, X[i*INCX]));
+}
+
+__global__ void supp_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) {
+ if((X[i*INCX] * X[i*INCX]) < (ALPHA * ALPHA)) X[i*INCX] = 0;
+ }
+}
+
+__global__ void add_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] += ALPHA;
+}
+
+__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] *= ALPHA;
+}
+
+__global__ void fill_kernel(int N, float ALPHA, float *X, int INCX)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) X[i*INCX] = ALPHA;
+}
+
+__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
+}
+
+__global__ void mul_kernel(int N, float *X, int INCX, float *Y, int INCY)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < N) Y[i*INCY] *= X[i*INCX];
+}
+
+
+extern "C" void normalize_gpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
+{
+ size_t N = batch*filters*spatial;
+ normalize_kernel<<>>(N, x, mean, variance, batch, filters, spatial);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l2norm_kernel(int N, float *x, float *dx, int batch, int filters, int spatial)
+{
+ int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (index >= N) return;
+ int b = index / spatial;
+ int i = index % spatial;
+ int f;
+ float sum = 0;
+ for(f = 0; f < filters; ++f){
+ int index = b*filters*spatial + f*spatial + i;
+ sum += powf(x[index], 2);
+ }
+ sum = sqrtf(sum);
+ if(sum == 0) sum = 1;
+ //printf("%f\n", sum);
+ for(f = 0; f < filters; ++f){
+ int index = b*filters*spatial + f*spatial + i;
+ x[index] /= sum;
+ dx[index] = (1 - x[index]) / sum;
+ }
+}
+
+extern "C" void l2normalize_gpu(float *x, float *dx, int batch, int filters, int spatial)
+{
+ size_t N = batch*spatial;
+ l2norm_kernel<<>>(N, x, dx, batch, filters, spatial);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void fast_mean_kernel(float *x, int batch, int filters, int spatial, float *mean)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+ local[id] += (i+id < spatial) ? x[index] : 0;
+ }
+ }
+
+ __syncthreads();
+
+ if(id == 0){
+ mean[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ mean[filter] += local[i];
+ }
+ mean[filter] /= spatial * batch;
+ }
+}
+
+__global__ void fast_variance_kernel(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ const int threads = BLOCK;
+ __shared__ float local[threads];
+
+ int id = threadIdx.x;
+ local[id] = 0;
+
+ int filter = blockIdx.x;
+
+ int i, j;
+ for(j = 0; j < batch; ++j){
+ for(i = 0; i < spatial; i += threads){
+ int index = j*spatial*filters + filter*spatial + i + id;
+
+ local[id] += (i+id < spatial) ? powf((x[index] - mean[filter]), 2) : 0;
+ }
+ }
+
+ __syncthreads();
+
+ if(id == 0){
+ variance[filter] = 0;
+ for(i = 0; i < threads; ++i){
+ variance[filter] += local[i];
+ }
+ variance[filter] /= (spatial * batch - 1);
+ }
+}
+
+extern "C" void fast_mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
+{
+ fast_mean_kernel<<>>(x, batch, filters, spatial, mean);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fast_variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ fast_variance_kernel<<>>(x, mean, batch, filters, spatial, variance);
+ check_error(cudaPeekAtLastError());
+}
+
+
+extern "C" void mean_gpu(float *x, int batch, int filters, int spatial, float *mean)
+{
+ mean_kernel<<>>(x, batch, filters, spatial, mean);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void variance_gpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
+{
+ variance_kernel<<>>(x, mean, batch, filters, spatial, variance);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
+{
+ axpy_gpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
+}
+
+extern "C" void pow_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
+{
+ pow_kernel<<>>(N, ALPHA, X, INCX, Y, INCY);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void axpy_gpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
+{
+ axpy_kernel<<>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void copy_gpu(int N, float * X, int INCX, float * Y, int INCY)
+{
+ copy_gpu_offset(N, X, 0, INCX, Y, 0, INCY);
+}
+
+extern "C" void mul_gpu(int N, float * X, int INCX, float * Y, int INCY)
+{
+ mul_kernel<<>>(N, X, INCX, Y, INCY);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void copy_gpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
+{
+ copy_kernel<<>>(N, X, OFFX, INCX, Y, OFFY, INCY);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void flatten_kernel(int N, float *x, int spatial, int layers, int batch, int forward, float *out)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i >= N) return;
+ int in_s = i%spatial;
+ i = i/spatial;
+ int in_c = i%layers;
+ i = i/layers;
+ int b = i;
+
+ int i1 = b*layers*spatial + in_c*spatial + in_s;
+ int i2 = b*layers*spatial + in_s*layers + in_c;
+
+ if (forward) out[i2] = x[i1];
+ else out[i1] = x[i2];
+}
+
+extern "C" void flatten_gpu(float *x, int spatial, int layers, int batch, int forward, float *out)
+{
+ int size = spatial*batch*layers;
+ flatten_kernel<<>>(size, x, spatial, layers, batch, forward, out);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void reorg_gpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
+{
+ int size = w*h*c*batch;
+ reorg_kernel<<>>(size, x, w, h, c, batch, stride, forward, out);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void mask_kernel(int n, float *x, float mask_num, float *mask, float val)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n && mask[i] == mask_num) x[i] = val;
+}
+
+extern "C" void mask_gpu(int N, float * X, float mask_num, float * mask, float val)
+{
+ mask_kernel<<>>(N, X, mask_num, mask, val);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void scale_mask_kernel(int n, float *x, float mask_num, float *mask, float scale)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n && mask[i] == mask_num) x[i] *= scale;
+}
+
+extern "C" void scale_mask_gpu(int N, float * X, float mask_num, float * mask, float scale)
+{
+ scale_mask_kernel<<>>(N, X, mask_num, mask, scale);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void const_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ const_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void constrain_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ constrain_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+
+extern "C" void add_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ add_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void scal_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ scal_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void supp_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ supp_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void fill_gpu(int N, float ALPHA, float * X, int INCX)
+{
+ fill_kernel<<>>(N, ALPHA, X, INCX);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void shortcut_kernel(int size, int minw, int minh, int minc, int stride, int sample, int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (id >= size) return;
+ int i = id % minw;
+ id /= minw;
+ int j = id % minh;
+ id /= minh;
+ int k = id % minc;
+ id /= minc;
+ int b = id % batch;
+
+ int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
+ int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
+ out[out_index] = s1*out[out_index] + s2*add[add_index];
+ //out[out_index] += add[add_index];
+}
+
+extern "C" void shortcut_gpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
+{
+ int minw = (w1 < w2) ? w1 : w2;
+ int minh = (h1 < h2) ? h1 : h2;
+ int minc = (c1 < c2) ? c1 : c2;
+
+ int stride = w1/w2;
+ int sample = w2/w1;
+ assert(stride == h1/h2);
+ assert(sample == h2/h1);
+ if(stride < 1) stride = 1;
+ if(sample < 1) sample = 1;
+
+ int size = batch * minw * minh * minc;
+ shortcut_kernel<<>>(size, minw, minh, minc, stride, sample, batch, w1, h1, c1, add, w2, h2, c2, s1, s2, out);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void smooth_l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ float abs_val = fabsf(diff);
+ if(abs_val < 1) {
+ error[i] = diff * diff;
+ delta[i] = diff;
+ }
+ else {
+ error[i] = 2*abs_val - 1;
+ delta[i] = (diff > 0) ? 1 : -1;
+ }
+ }
+}
+
+extern "C" void smooth_l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ smooth_l1_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void softmax_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float t = truth[i];
+ float p = pred[i];
+ error[i] = (t) ? -log(p) : 0;
+ delta[i] = t-p;
+ }
+}
+
+extern "C" void softmax_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ softmax_x_ent_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void logistic_x_ent_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float t = truth[i];
+ float p = pred[i];
+ error[i] = -t*log(p+.0000001) - (1-t)*log(1-p+.0000001);
+ delta[i] = t-p;
+ }
+}
+
+extern "C" void logistic_x_ent_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ logistic_x_ent_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l2_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ error[i] = diff * diff; //I know this is technically wrong, deal with it.
+ delta[i] = diff;
+ }
+}
+
+extern "C" void l2_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ l2_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void l1_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ float diff = truth[i] - pred[i];
+ error[i] = abs(diff);
+ delta[i] = (diff > 0) ? 1 : -1;
+ }
+}
+
+extern "C" void l1_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ l1_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void wgan_kernel(int n, float *pred, float *truth, float *delta, float *error)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ error[i] = truth[i] ? -pred[i] : pred[i];
+ delta[i] = (truth[i] > 0) ? 1 : -1;
+ }
+}
+
+extern "C" void wgan_gpu(int n, float *pred, float *truth, float *delta, float *error)
+{
+ wgan_kernel<<>>(n, pred, truth, delta, error);
+ check_error(cudaPeekAtLastError());
+}
+
+
+
+
+__global__ void weighted_sum_kernel(int n, float *a, float *b, float *s, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
+ }
+}
+
+__global__ void deinter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < (NX+NY)*B){
+ int b = i / (NX+NY);
+ int j = i % (NX+NY);
+ if (j < NX){
+ if(X) X[b*NX + j] += OUT[i];
+ } else {
+ if(Y) Y[b*NY + j - NX] += OUT[i];
+ }
+ }
+}
+
+extern "C" void deinter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ deinter_kernel<<>>(NX, X, NY, Y, B, OUT);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void inter_kernel(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < (NX+NY)*B){
+ int b = i / (NX+NY);
+ int j = i % (NX+NY);
+ if (j < NX){
+ OUT[i] = X[b*NX + j];
+ } else {
+ OUT[i] = Y[b*NY + j - NX];
+ }
+ }
+}
+
+extern "C" void inter_gpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
+{
+ inter_kernel<<>>(NX, X, NY, Y, B, OUT);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void weighted_sum_gpu(float *a, float *b, float *s, int num, float *c)
+{
+ weighted_sum_kernel<<>>(num, a, b, s, c);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void weighted_delta_kernel(int n, float *a, float *b, float *s, float *da, float *db, float *ds, float *dc)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ if(da) da[i] += dc[i] * s[i];
+ if(db) db[i] += dc[i] * (1-s[i]);
+ ds[i] += dc[i] * (a[i] - b[i]);
+ }
+}
+
+extern "C" void weighted_delta_gpu(float *a, float *b, float *s, float *da, float *db, float *ds, int num, float *dc)
+{
+ weighted_delta_kernel<<>>(num, a, b, s, da, db, ds, dc);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void mult_add_into_kernel(int n, float *a, float *b, float *c)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i < n){
+ c[i] += a[i]*b[i];
+ }
+}
+
+extern "C" void mult_add_into_gpu(int num, float *a, float *b, float *c)
+{
+ mult_add_into_kernel<<>>(num, a, b, c);
+ check_error(cudaPeekAtLastError());
+}
+
+
+__device__ void softmax_device(float *input, int n, float temp, int stride, float *output)
+{
+ int i;
+ float sum = 0;
+ float largest = -INFINITY;
+ for(i = 0; i < n; ++i){
+ int val = input[i*stride];
+ largest = (val>largest) ? val : largest;
+ }
+ for(i = 0; i < n; ++i){
+ float e = expf(input[i*stride]/temp - largest/temp);
+ sum += e;
+ output[i*stride] = e;
+ }
+ for(i = 0; i < n; ++i){
+ output[i*stride] /= sum;
+ }
+}
+
+
+__global__ void softmax_tree_kernel(float *input, int spatial, int batch, int stride, float temp, float *output, int groups, int *group_size, int *group_offset)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (id >= spatial*batch*groups) return;
+ int s = id % spatial;
+ id = id / spatial;
+ int g = id % groups;
+ int b = id / groups;
+ int goff = group_offset[g]*spatial;
+ int boff = b*stride;
+ softmax_device(input + goff + boff + s, group_size[g], temp, spatial, output + goff + boff + s);
+}
+
+extern "C" void softmax_tree(float *input, int spatial, int batch, int stride, float temp, float *output, tree hier)
+{
+ int *tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
+ int *tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
+ /*
+ static int *tree_groups_size = 0;
+ static int *tree_groups_offset = 0;
+ if(!tree_groups_size){
+ tree_groups_size = cuda_make_int_array(hier.group_size, hier.groups);
+ tree_groups_offset = cuda_make_int_array(hier.group_offset, hier.groups);
+ }
+ */
+ int num = spatial*batch*hier.groups;
+ softmax_tree_kernel<<>>(input, spatial, batch, stride, temp, output, hier.groups, tree_groups_size, tree_groups_offset);
+ check_error(cudaPeekAtLastError());
+ cuda_free((float *)tree_groups_size);
+ cuda_free((float *)tree_groups_offset);
+}
+
+__global__ void softmax_kernel(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (id >= batch*groups) return;
+ int b = id / groups;
+ int g = id % groups;
+ softmax_device(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
+}
+
+extern "C" void softmax_gpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
+{
+ softmax_kernel<<>>(input, n, batch, batch_offset, groups, group_offset, stride, temp, output);
+ check_error(cudaPeekAtLastError());
+}
+
+
+__global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
+{
+ size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(i >= N) return;
+ int out_index = i;
+ int out_w = i%(w*stride);
+ i = i/(w*stride);
+ int out_h = i%(h*stride);
+ i = i/(h*stride);
+ int out_c = i%c;
+ i = i/c;
+ int b = i%batch;
+
+ int in_w = out_w / stride;
+ int in_h = out_h / stride;
+ int in_c = out_c;
+
+ int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w;
+
+
+ if(forward) out[out_index] += scale * x[in_index];
+ else atomicAdd(x+in_index, scale * out[out_index]);
+}
+extern "C" void upsample_gpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
+{
+ size_t size = w*h*c*batch*stride*stride;
+ upsample_kernel<<>>(size, in, w, h, c, batch, stride, forward, scale, out);
+ check_error(cudaPeekAtLastError());
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.c
new file mode 100644
index 00000000000..8a1772c9ae0
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.c
@@ -0,0 +1,357 @@
+#include "box.h"
+#include
+#include
+#include
+
+int nms_comparator(const void *pa, const void *pb)
+{
+ detection a = *(detection *)pa;
+ detection b = *(detection *)pb;
+ float diff = 0;
+ if(b.sort_class >= 0){
+ diff = a.prob[b.sort_class] - b.prob[b.sort_class];
+ } else {
+ diff = a.objectness - b.objectness;
+ }
+ if(diff < 0) return 1;
+ else if(diff > 0) return -1;
+ return 0;
+}
+
+void do_nms_obj(detection *dets, int total, int classes, float thresh)
+{
+ int i, j, k;
+ k = total-1;
+ for(i = 0; i <= k; ++i){
+ if(dets[i].objectness == 0){
+ detection swap = dets[i];
+ dets[i] = dets[k];
+ dets[k] = swap;
+ --k;
+ --i;
+ }
+ }
+ total = k+1;
+
+ for(i = 0; i < total; ++i){
+ dets[i].sort_class = -1;
+ }
+
+ qsort(dets, total, sizeof(detection), nms_comparator);
+ for(i = 0; i < total; ++i){
+ if(dets[i].objectness == 0) continue;
+ box a = dets[i].bbox;
+ for(j = i+1; j < total; ++j){
+ if(dets[j].objectness == 0) continue;
+ box b = dets[j].bbox;
+ if (box_iou(a, b) > thresh){
+ dets[j].objectness = 0;
+ for(k = 0; k < classes; ++k){
+ dets[j].prob[k] = 0;
+ }
+ }
+ }
+ }
+}
+
+
+void do_nms_sort(detection *dets, int total, int classes, float thresh)
+{
+ int i, j, k;
+ k = total-1;
+ for(i = 0; i <= k; ++i){
+ if(dets[i].objectness == 0){
+ detection swap = dets[i];
+ dets[i] = dets[k];
+ dets[k] = swap;
+ --k;
+ --i;
+ }
+ }
+ total = k+1;
+
+ for(k = 0; k < classes; ++k){
+ for(i = 0; i < total; ++i){
+ dets[i].sort_class = k;
+ }
+ qsort(dets, total, sizeof(detection), nms_comparator);
+ for(i = 0; i < total; ++i){
+ if(dets[i].prob[k] == 0) continue;
+ box a = dets[i].bbox;
+ for(j = i+1; j < total; ++j){
+ box b = dets[j].bbox;
+ if (box_iou(a, b) > thresh){
+ dets[j].prob[k] = 0;
+ }
+ }
+ }
+ }
+}
+
+box float_to_box(float *f, int stride)
+{
+ box b = {0};
+ b.x = f[0];
+ b.y = f[1*stride];
+ b.w = f[2*stride];
+ b.h = f[3*stride];
+ return b;
+}
+
+dbox derivative(box a, box b)
+{
+ dbox d;
+ d.dx = 0;
+ d.dw = 0;
+ float l1 = a.x - a.w/2;
+ float l2 = b.x - b.w/2;
+ if (l1 > l2){
+ d.dx -= 1;
+ d.dw += .5;
+ }
+ float r1 = a.x + a.w/2;
+ float r2 = b.x + b.w/2;
+ if(r1 < r2){
+ d.dx += 1;
+ d.dw += .5;
+ }
+ if (l1 > r2) {
+ d.dx = -1;
+ d.dw = 0;
+ }
+ if (r1 < l2){
+ d.dx = 1;
+ d.dw = 0;
+ }
+
+ d.dy = 0;
+ d.dh = 0;
+ float t1 = a.y - a.h/2;
+ float t2 = b.y - b.h/2;
+ if (t1 > t2){
+ d.dy -= 1;
+ d.dh += .5;
+ }
+ float b1 = a.y + a.h/2;
+ float b2 = b.y + b.h/2;
+ if(b1 < b2){
+ d.dy += 1;
+ d.dh += .5;
+ }
+ if (t1 > b2) {
+ d.dy = -1;
+ d.dh = 0;
+ }
+ if (b1 < t2){
+ d.dy = 1;
+ d.dh = 0;
+ }
+ return d;
+}
+
+float overlap(float x1, float w1, float x2, float w2)
+{
+ float l1 = x1 - w1/2;
+ float l2 = x2 - w2/2;
+ float left = l1 > l2 ? l1 : l2;
+ float r1 = x1 + w1/2;
+ float r2 = x2 + w2/2;
+ float right = r1 < r2 ? r1 : r2;
+ return right - left;
+}
+
+float box_intersection(box a, box b)
+{
+ float w = overlap(a.x, a.w, b.x, b.w);
+ float h = overlap(a.y, a.h, b.y, b.h);
+ if(w < 0 || h < 0) return 0;
+ float area = w*h;
+ return area;
+}
+
+float box_union(box a, box b)
+{
+ float i = box_intersection(a, b);
+ float u = a.w*a.h + b.w*b.h - i;
+ return u;
+}
+
+float box_iou(box a, box b)
+{
+ return box_intersection(a, b)/box_union(a, b);
+}
+
+float box_rmse(box a, box b)
+{
+ return sqrt(pow(a.x-b.x, 2) +
+ pow(a.y-b.y, 2) +
+ pow(a.w-b.w, 2) +
+ pow(a.h-b.h, 2));
+}
+
+dbox dintersect(box a, box b)
+{
+ float w = overlap(a.x, a.w, b.x, b.w);
+ float h = overlap(a.y, a.h, b.y, b.h);
+ dbox dover = derivative(a, b);
+ dbox di;
+
+ di.dw = dover.dw*h;
+ di.dx = dover.dx*h;
+ di.dh = dover.dh*w;
+ di.dy = dover.dy*w;
+
+ return di;
+}
+
+dbox dunion(box a, box b)
+{
+ dbox du;
+
+ dbox di = dintersect(a, b);
+ du.dw = a.h - di.dw;
+ du.dh = a.w - di.dh;
+ du.dx = -di.dx;
+ du.dy = -di.dy;
+
+ return du;
+}
+
+
+void test_dunion()
+{
+ box a = {0, 0, 1, 1};
+ box dxa= {0+.0001, 0, 1, 1};
+ box dya= {0, 0+.0001, 1, 1};
+ box dwa= {0, 0, 1+.0001, 1};
+ box dha= {0, 0, 1, 1+.0001};
+
+ box b = {.5, .5, .2, .2};
+ dbox di = dunion(a,b);
+ printf("Union: %f %f %f %f\n", di.dx, di.dy, di.dw, di.dh);
+ float inter = box_union(a, b);
+ float xinter = box_union(dxa, b);
+ float yinter = box_union(dya, b);
+ float winter = box_union(dwa, b);
+ float hinter = box_union(dha, b);
+ xinter = (xinter - inter)/(.0001);
+ yinter = (yinter - inter)/(.0001);
+ winter = (winter - inter)/(.0001);
+ hinter = (hinter - inter)/(.0001);
+ printf("Union Manual %f %f %f %f\n", xinter, yinter, winter, hinter);
+}
+void test_dintersect()
+{
+ box a = {0, 0, 1, 1};
+ box dxa= {0+.0001, 0, 1, 1};
+ box dya= {0, 0+.0001, 1, 1};
+ box dwa= {0, 0, 1+.0001, 1};
+ box dha= {0, 0, 1, 1+.0001};
+
+ box b = {.5, .5, .2, .2};
+ dbox di = dintersect(a,b);
+ printf("Inter: %f %f %f %f\n", di.dx, di.dy, di.dw, di.dh);
+ float inter = box_intersection(a, b);
+ float xinter = box_intersection(dxa, b);
+ float yinter = box_intersection(dya, b);
+ float winter = box_intersection(dwa, b);
+ float hinter = box_intersection(dha, b);
+ xinter = (xinter - inter)/(.0001);
+ yinter = (yinter - inter)/(.0001);
+ winter = (winter - inter)/(.0001);
+ hinter = (hinter - inter)/(.0001);
+ printf("Inter Manual %f %f %f %f\n", xinter, yinter, winter, hinter);
+}
+
+void test_box()
+{
+ test_dintersect();
+ test_dunion();
+ box a = {0, 0, 1, 1};
+ box dxa= {0+.00001, 0, 1, 1};
+ box dya= {0, 0+.00001, 1, 1};
+ box dwa= {0, 0, 1+.00001, 1};
+ box dha= {0, 0, 1, 1+.00001};
+
+ box b = {.5, 0, .2, .2};
+
+ float iou = box_iou(a,b);
+ iou = (1-iou)*(1-iou);
+ printf("%f\n", iou);
+ dbox d = diou(a, b);
+ printf("%f %f %f %f\n", d.dx, d.dy, d.dw, d.dh);
+
+ float xiou = box_iou(dxa, b);
+ float yiou = box_iou(dya, b);
+ float wiou = box_iou(dwa, b);
+ float hiou = box_iou(dha, b);
+ xiou = ((1-xiou)*(1-xiou) - iou)/(.00001);
+ yiou = ((1-yiou)*(1-yiou) - iou)/(.00001);
+ wiou = ((1-wiou)*(1-wiou) - iou)/(.00001);
+ hiou = ((1-hiou)*(1-hiou) - iou)/(.00001);
+ printf("manual %f %f %f %f\n", xiou, yiou, wiou, hiou);
+}
+
+dbox diou(box a, box b)
+{
+ float u = box_union(a,b);
+ float i = box_intersection(a,b);
+ dbox di = dintersect(a,b);
+ dbox du = dunion(a,b);
+ dbox dd = {0,0,0,0};
+
+ if(i <= 0 || 1) {
+ dd.dx = b.x - a.x;
+ dd.dy = b.y - a.y;
+ dd.dw = b.w - a.w;
+ dd.dh = b.h - a.h;
+ return dd;
+ }
+
+ dd.dx = 2*pow((1-(i/u)),1)*(di.dx*u - du.dx*i)/(u*u);
+ dd.dy = 2*pow((1-(i/u)),1)*(di.dy*u - du.dy*i)/(u*u);
+ dd.dw = 2*pow((1-(i/u)),1)*(di.dw*u - du.dw*i)/(u*u);
+ dd.dh = 2*pow((1-(i/u)),1)*(di.dh*u - du.dh*i)/(u*u);
+ return dd;
+}
+
+
+void do_nms(box *boxes, float **probs, int total, int classes, float thresh)
+{
+ int i, j, k;
+ for(i = 0; i < total; ++i){
+ int any = 0;
+ for(k = 0; k < classes; ++k) any = any || (probs[i][k] > 0);
+ if(!any) {
+ continue;
+ }
+ for(j = i+1; j < total; ++j){
+ if (box_iou(boxes[i], boxes[j]) > thresh){
+ for(k = 0; k < classes; ++k){
+ if (probs[i][k] < probs[j][k]) probs[i][k] = 0;
+ else probs[j][k] = 0;
+ }
+ }
+ }
+ }
+}
+
+box encode_box(box b, box anchor)
+{
+ box encode;
+ encode.x = (b.x - anchor.x) / anchor.w;
+ encode.y = (b.y - anchor.y) / anchor.h;
+ encode.w = log2(b.w / anchor.w);
+ encode.h = log2(b.h / anchor.h);
+ return encode;
+}
+
+box decode_box(box b, box anchor)
+{
+ box decode;
+ decode.x = b.x * anchor.w + anchor.x;
+ decode.y = b.y * anchor.h + anchor.y;
+ decode.w = pow(2., b.w) * anchor.w;
+ decode.h = pow(2., b.h) * anchor.h;
+ return decode;
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.h
new file mode 100644
index 00000000000..dda3e59100c
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/box.h
@@ -0,0 +1,14 @@
+#ifndef BOX_H
+#define BOX_H
+#include "darknet.h"
+
+typedef struct{
+ float dx, dy, dw, dh;
+} dbox;
+
+float box_rmse(box a, box b);
+dbox diou(box a, box b);
+box decode_box(box b, box anchor);
+box encode_box(box b, box anchor);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/classifier.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/classifier.h
new file mode 100644
index 00000000000..8b137891791
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/classifier.h
@@ -0,0 +1 @@
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.c
new file mode 100644
index 00000000000..5c4605e1974
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.c
@@ -0,0 +1,39 @@
+#include
+#include
+void col2im_add_pixel(float *im, int height, int width, int channels,
+ int row, int col, int channel, int pad, float val)
+{
+ row -= pad;
+ col -= pad;
+
+ if (row < 0 || col < 0 ||
+ row >= height || col >= width) return;
+ im[col + width*(row + height*channel)] += val;
+}
+//This one might be too, can't remember.
+void col2im_cpu(float* data_col,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float* data_im)
+{
+ int c,h,w;
+ int height_col = (height + 2*pad - ksize) / stride + 1;
+ int width_col = (width + 2*pad - ksize) / stride + 1;
+
+ int channels_col = channels * ksize * ksize;
+ for (c = 0; c < channels_col; ++c) {
+ int w_offset = c % ksize;
+ int h_offset = (c / ksize) % ksize;
+ int c_im = c / ksize / ksize;
+ for (h = 0; h < height_col; ++h) {
+ for (w = 0; w < width_col; ++w) {
+ int im_row = h_offset + h * stride;
+ int im_col = w_offset + w * stride;
+ int col_index = (c * height_col + h) * width_col + w;
+ double val = data_col[col_index];
+ col2im_add_pixel(data_im, height, width, channels,
+ im_row, im_col, c_im, pad, val);
+ }
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.h
new file mode 100644
index 00000000000..3fbe05307db
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im.h
@@ -0,0 +1,13 @@
+#ifndef COL2IM_H
+#define COL2IM_H
+
+void col2im_cpu(float* data_col,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float* data_im);
+
+#ifdef GPU
+void col2im_gpu(float *data_col,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float *data_im);
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im_kernels.cu
new file mode 100644
index 00000000000..ba45e0fdec9
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/col2im_kernels.cu
@@ -0,0 +1,58 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "col2im.h"
+#include "cuda.h"
+}
+
+// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
+// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
+
+__global__ void col2im_gpu_kernel(const int n, const float* data_col,
+ const int height, const int width, const int ksize,
+ const int pad,
+ const int stride,
+ const int height_col, const int width_col,
+ float *data_im) {
+ int index = blockIdx.x*blockDim.x+threadIdx.x;
+ for(; index < n; index += blockDim.x*gridDim.x){
+ float val = 0;
+ int w = index % width + pad;
+ int h = (index / width) % height + pad;
+ int c = index / (width * height);
+ // compute the start and end of the output
+ int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
+ int w_col_end = min(w / stride + 1, width_col);
+ int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
+ int h_col_end = min(h / stride + 1, height_col);
+ // equivalent implementation
+ int offset =
+ (c * ksize * ksize + h * ksize + w) * height_col * width_col;
+ int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
+ int coeff_w_col = (1 - stride * height_col * width_col);
+ for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
+ for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
+ val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
+ }
+ }
+ data_im[index] += val;
+ }
+}
+
+void col2im_gpu(float *data_col,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float *data_im){
+ // We are going to launch channels * height_col * width_col kernels, each
+ // kernel responsible for copying a single-channel grid.
+ int height_col = (height + 2 * pad - ksize) / stride + 1;
+ int width_col = (width + 2 * pad - ksize) / stride + 1;
+ int num_kernels = channels * height * width;
+ col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
+ BLOCK>>>(
+ num_kernels, data_col, height, width, ksize, pad,
+ stride, height_col,
+ width_col, data_im);
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.c
new file mode 100644
index 00000000000..353f4e5677b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.c
@@ -0,0 +1,336 @@
+#include "connected_layer.h"
+#include "convolutional_layer.h"
+#include "batchnorm_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+#include
+#include
+
+layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam)
+{
+ int i;
+ layer l = {0};
+ l.learning_rate_scale = 1;
+ l.type = CONNECTED;
+
+ l.inputs = inputs;
+ l.outputs = outputs;
+ l.batch=batch;
+ l.batch_normalize = batch_normalize;
+ l.h = 1;
+ l.w = 1;
+ l.c = inputs;
+ l.out_h = 1;
+ l.out_w = 1;
+ l.out_c = outputs;
+
+ l.output = calloc(batch*outputs, sizeof(float));
+ l.delta = calloc(batch*outputs, sizeof(float));
+
+ l.weight_updates = calloc(inputs*outputs, sizeof(float));
+ l.bias_updates = calloc(outputs, sizeof(float));
+
+ l.weights = calloc(outputs*inputs, sizeof(float));
+ l.biases = calloc(outputs, sizeof(float));
+
+ l.forward = forward_connected_layer;
+ l.backward = backward_connected_layer;
+ l.update = update_connected_layer;
+
+ //float scale = 1./sqrt(inputs);
+ float scale = sqrt(2./inputs);
+ for(i = 0; i < outputs*inputs; ++i){
+ l.weights[i] = scale*rand_uniform(-1, 1);
+ }
+
+ for(i = 0; i < outputs; ++i){
+ l.biases[i] = 0;
+ }
+
+ if(adam){
+ l.m = calloc(l.inputs*l.outputs, sizeof(float));
+ l.v = calloc(l.inputs*l.outputs, sizeof(float));
+ l.bias_m = calloc(l.outputs, sizeof(float));
+ l.scale_m = calloc(l.outputs, sizeof(float));
+ l.bias_v = calloc(l.outputs, sizeof(float));
+ l.scale_v = calloc(l.outputs, sizeof(float));
+ }
+ if(batch_normalize){
+ l.scales = calloc(outputs, sizeof(float));
+ l.scale_updates = calloc(outputs, sizeof(float));
+ for(i = 0; i < outputs; ++i){
+ l.scales[i] = 1;
+ }
+
+ l.mean = calloc(outputs, sizeof(float));
+ l.mean_delta = calloc(outputs, sizeof(float));
+ l.variance = calloc(outputs, sizeof(float));
+ l.variance_delta = calloc(outputs, sizeof(float));
+
+ l.rolling_mean = calloc(outputs, sizeof(float));
+ l.rolling_variance = calloc(outputs, sizeof(float));
+
+ l.x = calloc(batch*outputs, sizeof(float));
+ l.x_norm = calloc(batch*outputs, sizeof(float));
+ }
+
+#ifdef GPU
+ l.forward_gpu = forward_connected_layer_gpu;
+ l.backward_gpu = backward_connected_layer_gpu;
+ l.update_gpu = update_connected_layer_gpu;
+
+ l.weights_gpu = cuda_make_array(l.weights, outputs*inputs);
+ l.biases_gpu = cuda_make_array(l.biases, outputs);
+
+ l.weight_updates_gpu = cuda_make_array(l.weight_updates, outputs*inputs);
+ l.bias_updates_gpu = cuda_make_array(l.bias_updates, outputs);
+
+ l.output_gpu = cuda_make_array(l.output, outputs*batch);
+ l.delta_gpu = cuda_make_array(l.delta, outputs*batch);
+ if (adam) {
+ l.m_gpu = cuda_make_array(0, inputs*outputs);
+ l.v_gpu = cuda_make_array(0, inputs*outputs);
+ l.bias_m_gpu = cuda_make_array(0, outputs);
+ l.bias_v_gpu = cuda_make_array(0, outputs);
+ l.scale_m_gpu = cuda_make_array(0, outputs);
+ l.scale_v_gpu = cuda_make_array(0, outputs);
+ }
+
+ if(batch_normalize){
+ l.mean_gpu = cuda_make_array(l.mean, outputs);
+ l.variance_gpu = cuda_make_array(l.variance, outputs);
+
+ l.rolling_mean_gpu = cuda_make_array(l.mean, outputs);
+ l.rolling_variance_gpu = cuda_make_array(l.variance, outputs);
+
+ l.mean_delta_gpu = cuda_make_array(l.mean, outputs);
+ l.variance_delta_gpu = cuda_make_array(l.variance, outputs);
+
+ l.scales_gpu = cuda_make_array(l.scales, outputs);
+ l.scale_updates_gpu = cuda_make_array(l.scale_updates, outputs);
+
+ l.x_gpu = cuda_make_array(l.output, l.batch*outputs);
+ l.x_norm_gpu = cuda_make_array(l.output, l.batch*outputs);
+#ifdef CUDNN
+ cudnnCreateTensorDescriptor(&l.normTensorDesc);
+ cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+ cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w);
+ cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1);
+#endif
+ }
+#endif
+ l.activation = activation;
+ fprintf(stderr, "connected %4d -> %4d\n", inputs, outputs);
+ return l;
+}
+
+void update_connected_layer(layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+ axpy_cpu(l.outputs, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+ scal_cpu(l.outputs, momentum, l.bias_updates, 1);
+
+ if(l.batch_normalize){
+ axpy_cpu(l.outputs, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
+ scal_cpu(l.outputs, momentum, l.scale_updates, 1);
+ }
+
+ axpy_cpu(l.inputs*l.outputs, -decay*batch, l.weights, 1, l.weight_updates, 1);
+ axpy_cpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+ scal_cpu(l.inputs*l.outputs, momentum, l.weight_updates, 1);
+}
+
+void forward_connected_layer(layer l, network net)
+{
+ fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+ int m = l.batch;
+ int k = l.inputs;
+ int n = l.outputs;
+ float *a = net.input;
+ float *b = l.weights;
+ float *c = l.output;
+ gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
+ if(l.batch_normalize){
+ forward_batchnorm_layer(l, net);
+ } else {
+ add_bias(l.output, l.biases, l.batch, l.outputs, 1);
+ }
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+}
+
+void backward_connected_layer(layer l, network net)
+{
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer(l, net);
+ } else {
+ backward_bias(l.bias_updates, l.delta, l.batch, l.outputs, 1);
+ }
+
+ int m = l.outputs;
+ int k = l.batch;
+ int n = l.inputs;
+ float *a = l.delta;
+ float *b = net.input;
+ float *c = l.weight_updates;
+ gemm(1,0,m,n,k,1,a,m,b,n,1,c,n);
+
+ m = l.batch;
+ k = l.outputs;
+ n = l.inputs;
+
+ a = l.delta;
+ b = l.weights;
+ c = net.delta;
+
+ if(c) gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
+}
+
+
+void denormalize_connected_layer(layer l)
+{
+ int i, j;
+ for(i = 0; i < l.outputs; ++i){
+ float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .000001);
+ for(j = 0; j < l.inputs; ++j){
+ l.weights[i*l.inputs + j] *= scale;
+ }
+ l.biases[i] -= l.rolling_mean[i] * scale;
+ l.scales[i] = 1;
+ l.rolling_mean[i] = 0;
+ l.rolling_variance[i] = 1;
+ }
+}
+
+
+void statistics_connected_layer(layer l)
+{
+ if(l.batch_normalize){
+ printf("Scales ");
+ print_statistics(l.scales, l.outputs);
+ /*
+ printf("Rolling Mean ");
+ print_statistics(l.rolling_mean, l.outputs);
+ printf("Rolling Variance ");
+ print_statistics(l.rolling_variance, l.outputs);
+ */
+ }
+ printf("Biases ");
+ print_statistics(l.biases, l.outputs);
+ printf("Weights ");
+ print_statistics(l.weights, l.outputs);
+}
+
+#ifdef GPU
+
+void pull_connected_layer(layer l)
+{
+ cuda_pull_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
+ cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
+ cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
+ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+ if (l.batch_normalize){
+ cuda_pull_array(l.scales_gpu, l.scales, l.outputs);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
+ }
+}
+
+void push_connected_layer(layer l)
+{
+ cuda_push_array(l.weights_gpu, l.weights, l.inputs*l.outputs);
+ cuda_push_array(l.biases_gpu, l.biases, l.outputs);
+ cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.inputs*l.outputs);
+ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+ if (l.batch_normalize){
+ cuda_push_array(l.scales_gpu, l.scales, l.outputs);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.outputs);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.outputs);
+ }
+}
+
+void update_connected_layer_gpu(layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+ if(a.adam){
+ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.inputs*l.outputs, batch, a.t);
+ adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.outputs, batch, a.t);
+ if(l.scales_gpu){
+ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.outputs, batch, a.t);
+ }
+ }else{
+ axpy_gpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
+ scal_gpu(l.outputs, momentum, l.bias_updates_gpu, 1);
+
+ if(l.batch_normalize){
+ axpy_gpu(l.outputs, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
+ scal_gpu(l.outputs, momentum, l.scale_updates_gpu, 1);
+ }
+
+ axpy_gpu(l.inputs*l.outputs, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
+ axpy_gpu(l.inputs*l.outputs, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
+ scal_gpu(l.inputs*l.outputs, momentum, l.weight_updates_gpu, 1);
+ }
+}
+
+void forward_connected_layer_gpu(layer l, network net)
+{
+ fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
+
+ int m = l.batch;
+ int k = l.inputs;
+ int n = l.outputs;
+ float * a = net.input_gpu;
+ float * b = l.weights_gpu;
+ float * c = l.output_gpu;
+ gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if (l.batch_normalize) {
+ forward_batchnorm_layer_gpu(l, net);
+ } else {
+ add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.outputs, 1);
+ }
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
+}
+
+void backward_connected_layer_gpu(layer l, network net)
+{
+ constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+ if(l.batch_normalize){
+ backward_batchnorm_layer_gpu(l, net);
+ } else {
+ backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.outputs, 1);
+ }
+
+ int m = l.outputs;
+ int k = l.batch;
+ int n = l.inputs;
+ float * a = l.delta_gpu;
+ float * b = net.input_gpu;
+ float * c = l.weight_updates_gpu;
+ gemm_gpu(1,0,m,n,k,1,a,m,b,n,1,c,n);
+
+ m = l.batch;
+ k = l.outputs;
+ n = l.inputs;
+
+ a = l.delta_gpu;
+ b = l.weights_gpu;
+ c = net.delta_gpu;
+
+ if(c) gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.h
new file mode 100644
index 00000000000..6727a964eaa
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/connected_layer.h
@@ -0,0 +1,23 @@
+#ifndef CONNECTED_LAYER_H
+#define CONNECTED_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_connected_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam);
+
+void forward_connected_layer(layer l, network net);
+void backward_connected_layer(layer l, network net);
+void update_connected_layer(layer l, update_args a);
+
+#ifdef GPU
+void forward_connected_layer_gpu(layer l, network net);
+void backward_connected_layer_gpu(layer l, network net);
+void update_connected_layer_gpu(layer l, update_args a);
+void push_connected_layer(layer l);
+void pull_connected_layer(layer l);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_kernels.cu
new file mode 100644
index 00000000000..8fa2ab2ef5d
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_kernels.cu
@@ -0,0 +1,322 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "convolutional_layer.h"
+#include "batchnorm_layer.h"
+#include "gemm.h"
+#include "blas.h"
+#include "im2col.h"
+#include "col2im.h"
+#include "utils.h"
+#include "cuda.h"
+}
+
+__global__ void binarize_kernel(float *x, int n, float *binary)
+{
+ int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (i >= n) return;
+ binary[i] = (x[i] >= 0) ? 1 : -1;
+}
+
+void binarize_gpu(float *x, int n, float *binary)
+{
+ binarize_kernel<<>>(x, n, binary);
+ check_error(cudaPeekAtLastError());
+}
+
+__global__ void binarize_input_kernel(float *input, int n, int size, float *binary)
+{
+ int s = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (s >= size) return;
+ int i = 0;
+ float mean = 0;
+ for(i = 0; i < n; ++i){
+ mean += fabsf(input[i*size + s]);
+ }
+ mean = mean / n;
+ for(i = 0; i < n; ++i){
+ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
+ }
+}
+
+void binarize_input_gpu(float *input, int n, int size, float *binary)
+{
+ binarize_input_kernel<<>>(input, n, size, binary);
+ check_error(cudaPeekAtLastError());
+}
+
+
+__global__ void binarize_weights_kernel(float *weights, int n, int size, float *binary)
+{
+ int f = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if (f >= n) return;
+ int i = 0;
+ float mean = 0;
+ for(i = 0; i < size; ++i){
+ mean += fabsf(weights[f*size + i]);
+ }
+ mean = mean / size;
+ for(i = 0; i < size; ++i){
+ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
+ //binary[f*size + i] = weights[f*size + i];
+ }
+}
+
+void binarize_weights_gpu(float *weights, int n, int size, float *binary)
+{
+ binarize_weights_kernel<<>>(weights, n, size, binary);
+ check_error(cudaPeekAtLastError());
+}
+
+void forward_convolutional_layer_gpu(convolutional_layer l, network net)
+{
+ fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
+ if(l.binary){
+ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
+ swap_binary(&l);
+ }
+
+ if(l.xnor){
+ binarize_weights_gpu(l.weights_gpu, l.n, l.c/l.groups*l.size*l.size, l.binary_weights_gpu);
+ swap_binary(&l);
+ binarize_gpu(net.input_gpu, l.c*l.h*l.w*l.batch, l.binary_input_gpu);
+ net.input_gpu = l.binary_input_gpu;
+ }
+
+#ifdef CUDNN
+ float one = 1;
+ cudnnConvolutionForward(cudnn_handle(),
+ &one,
+ l.srcTensorDesc,
+ net.input_gpu,
+ l.weightDesc,
+ l.weights_gpu,
+ l.convDesc,
+ l.fw_algo,
+ net.workspace,
+ l.workspace_size,
+ &one,
+ l.dstTensorDesc,
+ l.output_gpu);
+
+#else
+ int i, j;
+ int m = l.n/l.groups;
+ int k = l.size*l.size*l.c/l.groups;
+ int n = l.out_w*l.out_h;
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.weights_gpu + j*l.nweights/l.groups;
+ float *b = net.workspace;
+ float *c = l.output_gpu + (i*l.groups + j)*n*m;
+
+ im2col_gpu(net.input_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
+ l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
+ gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
+ }
+ }
+#endif
+
+ if (l.batch_normalize) {
+ forward_batchnorm_layer_gpu(l, net);
+ } else {
+ add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+ }
+
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
+ //if(l.dot > 0) dot_error_gpu(l);
+ if(l.binary || l.xnor) swap_binary(&l);
+}
+
+__global__ void smooth_kernel(float *x, int n, int w, int h, int c, int size, float rate, float *delta)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= n) return;
+
+ int j = id % w;
+ id /= w;
+ int i = id % h;
+ id /= h;
+ int k = id % c;
+ id /= c;
+ int b = id;
+
+ int w_offset = -(size/2.f);
+ int h_offset = -(size/2.f);
+
+ int out_index = j + w*(i + h*(k + c*b));
+ int l, m;
+ for(l = 0; l < size; ++l){
+ for(m = 0; m < size; ++m){
+ int cur_h = h_offset + i + l;
+ int cur_w = w_offset + j + m;
+ int index = cur_w + w*(cur_h + h*(k + b*c));
+ int valid = (cur_h >= 0 && cur_h < h &&
+ cur_w >= 0 && cur_w < w);
+ delta[out_index] += valid ? rate*(x[index] - x[out_index]) : 0;
+ }
+ }
+}
+
+extern "C" void smooth_layer(layer l, int size, float rate)
+{
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.out_c;
+
+ size_t n = h*w*c*l.batch;
+
+ smooth_kernel<<>>(l.output_gpu, n, l.w, l.h, l.c, size, rate, l.delta_gpu);
+ check_error(cudaPeekAtLastError());
+}
+
+void backward_convolutional_layer_gpu(convolutional_layer l, network net)
+{
+ if(l.smooth){
+ smooth_layer(l, 5, l.smooth);
+ }
+ //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer_gpu(l, net);
+ } else {
+ backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
+ }
+ float *original_input = net.input_gpu;
+
+ if(l.xnor) net.input_gpu = l.binary_input_gpu;
+#ifdef CUDNN
+ float one = 1;
+ cudnnConvolutionBackwardFilter(cudnn_handle(),
+ &one,
+ l.srcTensorDesc,
+ net.input_gpu,
+ l.ddstTensorDesc,
+ l.delta_gpu,
+ l.convDesc,
+ l.bf_algo,
+ net.workspace,
+ l.workspace_size,
+ &one,
+ l.dweightDesc,
+ l.weight_updates_gpu);
+
+ if(net.delta_gpu){
+ if(l.binary || l.xnor) swap_binary(&l);
+ cudnnConvolutionBackwardData(cudnn_handle(),
+ &one,
+ l.weightDesc,
+ l.weights_gpu,
+ l.ddstTensorDesc,
+ l.delta_gpu,
+ l.convDesc,
+ l.bd_algo,
+ net.workspace,
+ l.workspace_size,
+ &one,
+ l.dsrcTensorDesc,
+ net.delta_gpu);
+ if(l.binary || l.xnor) swap_binary(&l);
+ if(l.xnor) gradient_array_gpu(original_input, l.batch*l.c*l.h*l.w, HARDTAN, net.delta_gpu);
+ }
+
+#else
+ int m = l.n/l.groups;
+ int n = l.size*l.size*l.c/l.groups;
+ int k = l.out_w*l.out_h;
+
+ int i, j;
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.delta_gpu + (i*l.groups + j)*m*k;
+ float *b = net.workspace;
+ float *c = l.weight_updates_gpu + j*l.nweights/l.groups;
+
+ float *im = net.input_gpu+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
+
+ im2col_gpu(im, l.c/l.groups, l.h, l.w,
+ l.size, l.stride, l.pad, b);
+ gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if(net.delta_gpu){
+ if(l.binary || l.xnor) swap_binary(&l);
+ a = l.weights_gpu + j*l.nweights/l.groups;
+ b = l.delta_gpu + (i*l.groups + j)*m*k;
+ c = net.workspace;
+
+ gemm_gpu(1,0,n,k,m,1,a,n,b,k,0,c,k);
+
+ col2im_gpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride,
+ l.pad, net.delta_gpu + (i*l.groups + j)*l.c/l.groups*l.h*l.w);
+ if(l.binary || l.xnor) {
+ swap_binary(&l);
+ }
+ }
+ if(l.xnor) gradient_array_gpu(original_input + i*l.c*l.h*l.w, l.c*l.h*l.w, HARDTAN, net.delta_gpu + i*l.c*l.h*l.w);
+ }
+ }
+#endif
+}
+
+void pull_convolutional_layer(layer l)
+{
+ cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
+ cuda_pull_array(l.biases_gpu, l.biases, l.n);
+ cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
+ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ if (l.batch_normalize){
+ cuda_pull_array(l.scales_gpu, l.scales, l.n);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
+ }
+}
+
+void push_convolutional_layer(layer l)
+{
+ cuda_push_array(l.weights_gpu, l.weights, l.nweights);
+ cuda_push_array(l.biases_gpu, l.biases, l.n);
+ cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
+ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ if (l.batch_normalize){
+ cuda_push_array(l.scales_gpu, l.scales, l.n);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
+ }
+}
+
+void update_convolutional_layer_gpu(layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ if(a.adam){
+ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
+ adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
+ if(l.scales_gpu){
+ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
+ }
+ }else{
+ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
+ axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
+ scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1);
+
+ axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
+ scal_gpu(l.n, momentum, l.bias_updates_gpu, 1);
+
+ if(l.scales_gpu){
+ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
+ scal_gpu(l.n, momentum, l.scale_updates_gpu, 1);
+ }
+ }
+ if(l.clip){
+ constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1);
+ }
+}
+
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.c
new file mode 100644
index 00000000000..e4fb9bde047
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.c
@@ -0,0 +1,608 @@
+#include "convolutional_layer.h"
+#include "utils.h"
+#include "batchnorm_layer.h"
+#include "im2col.h"
+#include "col2im.h"
+#include "blas.h"
+#include "gemm.h"
+#include
+#include
+
+#ifdef AI2
+#include "xnor_layer.h"
+#endif
+
+void swap_binary(convolutional_layer *l)
+{
+ float *swap = l->weights;
+ l->weights = l->binary_weights;
+ l->binary_weights = swap;
+
+#ifdef GPU
+ swap = l->weights_gpu;
+ l->weights_gpu = l->binary_weights_gpu;
+ l->binary_weights_gpu = swap;
+#endif
+}
+
+void binarize_weights(float *weights, int n, int size, float *binary)
+{
+ int i, f;
+ for(f = 0; f < n; ++f){
+ float mean = 0;
+ for(i = 0; i < size; ++i){
+ mean += fabs(weights[f*size + i]);
+ }
+ mean = mean / size;
+ for(i = 0; i < size; ++i){
+ binary[f*size + i] = (weights[f*size + i] > 0) ? mean : -mean;
+ }
+ }
+}
+
+void binarize_cpu(float *input, int n, float *binary)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ binary[i] = (input[i] > 0) ? 1 : -1;
+ }
+}
+
+void binarize_input(float *input, int n, int size, float *binary)
+{
+ int i, s;
+ for(s = 0; s < size; ++s){
+ float mean = 0;
+ for(i = 0; i < n; ++i){
+ mean += fabs(input[i*size + s]);
+ }
+ mean = mean / n;
+ for(i = 0; i < n; ++i){
+ binary[i*size + s] = (input[i*size + s] > 0) ? mean : -mean;
+ }
+ }
+}
+
+int convolutional_out_height(convolutional_layer l)
+{
+ return (l.h + 2*l.pad - l.size) / l.stride + 1;
+}
+
+int convolutional_out_width(convolutional_layer l)
+{
+ return (l.w + 2*l.pad - l.size) / l.stride + 1;
+}
+
+image get_convolutional_image(convolutional_layer l)
+{
+ return float_to_image(l.out_w,l.out_h,l.out_c,l.output);
+}
+
+image get_convolutional_delta(convolutional_layer l)
+{
+ return float_to_image(l.out_w,l.out_h,l.out_c,l.delta);
+}
+
+static size_t get_workspace_size(layer l){
+#ifdef CUDNN
+ if(gpu_index >= 0){
+ size_t most = 0;
+ size_t s = 0;
+ cudnnGetConvolutionForwardWorkspaceSize(cudnn_handle(),
+ l.srcTensorDesc,
+ l.weightDesc,
+ l.convDesc,
+ l.dstTensorDesc,
+ l.fw_algo,
+ &s);
+ if (s > most) most = s;
+ cudnnGetConvolutionBackwardFilterWorkspaceSize(cudnn_handle(),
+ l.srcTensorDesc,
+ l.ddstTensorDesc,
+ l.convDesc,
+ l.dweightDesc,
+ l.bf_algo,
+ &s);
+ if (s > most) most = s;
+ cudnnGetConvolutionBackwardDataWorkspaceSize(cudnn_handle(),
+ l.weightDesc,
+ l.ddstTensorDesc,
+ l.convDesc,
+ l.dsrcTensorDesc,
+ l.bd_algo,
+ &s);
+ if (s > most) most = s;
+ return most;
+ }
+#endif
+ return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
+}
+
+#ifdef GPU
+#ifdef CUDNN
+void cudnn_convolutional_setup(layer *l)
+{
+ cudnnSetTensor4dDescriptor(l->dsrcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w);
+ cudnnSetTensor4dDescriptor(l->ddstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
+
+ cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w);
+ cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
+ cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1);
+
+ cudnnSetFilter4dDescriptor(l->dweightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c/l->groups, l->size, l->size);
+ cudnnSetFilter4dDescriptor(l->weightDesc, CUDNN_DATA_FLOAT, CUDNN_TENSOR_NCHW, l->n, l->c/l->groups, l->size, l->size);
+ #if CUDNN_MAJOR >= 6
+ cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT);
+ #else
+ cudnnSetConvolution2dDescriptor(l->convDesc, l->pad, l->pad, l->stride, l->stride, 1, 1, CUDNN_CROSS_CORRELATION);
+ #endif
+
+ #if CUDNN_MAJOR >= 7
+ cudnnSetConvolutionGroupCount(l->convDesc, l->groups);
+ #else
+ if(l->groups > 1){
+ error("CUDNN < 7 doesn't support groups, please upgrade!");
+ }
+ #endif
+
+ cudnnGetConvolutionForwardAlgorithm(cudnn_handle(),
+ l->srcTensorDesc,
+ l->weightDesc,
+ l->convDesc,
+ l->dstTensorDesc,
+ CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
+ 4000000000,
+ &l->fw_algo);
+ cudnnGetConvolutionBackwardDataAlgorithm(cudnn_handle(),
+ l->weightDesc,
+ l->ddstTensorDesc,
+ l->convDesc,
+ l->dsrcTensorDesc,
+ CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
+ 4000000000,
+ &l->bd_algo);
+ cudnnGetConvolutionBackwardFilterAlgorithm(cudnn_handle(),
+ l->srcTensorDesc,
+ l->ddstTensorDesc,
+ l->convDesc,
+ l->dweightDesc,
+ CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
+ 4000000000,
+ &l->bf_algo);
+}
+#endif
+#endif
+
+convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
+{
+ int i;
+ convolutional_layer l = {0};
+ l.type = CONVOLUTIONAL;
+
+ l.groups = groups;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.n = n;
+ l.binary = binary;
+ l.xnor = xnor;
+ l.batch = batch;
+ l.stride = stride;
+ l.size = size;
+ l.pad = padding;
+ l.batch_normalize = batch_normalize;
+
+ l.weights = calloc(c/groups*n*size*size, sizeof(float));
+ l.weight_updates = calloc(c/groups*n*size*size, sizeof(float));
+
+ l.biases = calloc(n, sizeof(float));
+ l.bias_updates = calloc(n, sizeof(float));
+
+ l.nweights = c/groups*n*size*size;
+ l.nbiases = n;
+
+ // float scale = 1./sqrt(size*size*c);
+ float scale = sqrt(2./(size*size*c/l.groups));
+ //printf("convscale %f\n", scale);
+ //scale = .02;
+ //for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1, 1);
+ for(i = 0; i < l.nweights; ++i) l.weights[i] = scale*rand_normal();
+ int out_w = convolutional_out_width(l);
+ int out_h = convolutional_out_height(l);
+ l.out_h = out_h;
+ l.out_w = out_w;
+ l.out_c = n;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+ l.inputs = l.w * l.h * l.c;
+
+ l.output = calloc(l.batch*l.outputs, sizeof(float));
+ l.delta = calloc(l.batch*l.outputs, sizeof(float));
+
+ l.forward = forward_convolutional_layer;
+ l.backward = backward_convolutional_layer;
+ l.update = update_convolutional_layer;
+ if(binary){
+ l.binary_weights = calloc(l.nweights, sizeof(float));
+ l.cweights = calloc(l.nweights, sizeof(char));
+ l.scales = calloc(n, sizeof(float));
+ }
+ if(xnor){
+ l.binary_weights = calloc(l.nweights, sizeof(float));
+ l.binary_input = calloc(l.inputs*l.batch, sizeof(float));
+ }
+
+ if(batch_normalize){
+ l.scales = calloc(n, sizeof(float));
+ l.scale_updates = calloc(n, sizeof(float));
+ for(i = 0; i < n; ++i){
+ l.scales[i] = 1;
+ }
+
+ l.mean = calloc(n, sizeof(float));
+ l.variance = calloc(n, sizeof(float));
+
+ l.mean_delta = calloc(n, sizeof(float));
+ l.variance_delta = calloc(n, sizeof(float));
+
+ l.rolling_mean = calloc(n, sizeof(float));
+ l.rolling_variance = calloc(n, sizeof(float));
+ l.x = calloc(l.batch*l.outputs, sizeof(float));
+ l.x_norm = calloc(l.batch*l.outputs, sizeof(float));
+ }
+ if(adam){
+ l.m = calloc(l.nweights, sizeof(float));
+ l.v = calloc(l.nweights, sizeof(float));
+ l.bias_m = calloc(n, sizeof(float));
+ l.scale_m = calloc(n, sizeof(float));
+ l.bias_v = calloc(n, sizeof(float));
+ l.scale_v = calloc(n, sizeof(float));
+ }
+
+#ifdef GPU
+ l.forward_gpu = forward_convolutional_layer_gpu;
+ l.backward_gpu = backward_convolutional_layer_gpu;
+ l.update_gpu = update_convolutional_layer_gpu;
+
+ if(gpu_index >= 0){
+ if (adam) {
+ l.m_gpu = cuda_make_array(l.m, l.nweights);
+ l.v_gpu = cuda_make_array(l.v, l.nweights);
+ l.bias_m_gpu = cuda_make_array(l.bias_m, n);
+ l.bias_v_gpu = cuda_make_array(l.bias_v, n);
+ l.scale_m_gpu = cuda_make_array(l.scale_m, n);
+ l.scale_v_gpu = cuda_make_array(l.scale_v, n);
+ }
+
+ l.weights_gpu = cuda_make_array(l.weights, l.nweights);
+ l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);
+
+ l.biases_gpu = cuda_make_array(l.biases, n);
+ l.bias_updates_gpu = cuda_make_array(l.bias_updates, n);
+
+ l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n);
+ l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
+
+ if(binary){
+ l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);
+ }
+ if(xnor){
+ l.binary_weights_gpu = cuda_make_array(l.weights, l.nweights);
+ l.binary_input_gpu = cuda_make_array(0, l.inputs*l.batch);
+ }
+
+ if(batch_normalize){
+ l.mean_gpu = cuda_make_array(l.mean, n);
+ l.variance_gpu = cuda_make_array(l.variance, n);
+
+ l.rolling_mean_gpu = cuda_make_array(l.mean, n);
+ l.rolling_variance_gpu = cuda_make_array(l.variance, n);
+
+ l.mean_delta_gpu = cuda_make_array(l.mean, n);
+ l.variance_delta_gpu = cuda_make_array(l.variance, n);
+
+ l.scales_gpu = cuda_make_array(l.scales, n);
+ l.scale_updates_gpu = cuda_make_array(l.scale_updates, n);
+
+ l.x_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
+ l.x_norm_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
+ }
+#ifdef CUDNN
+ cudnnCreateTensorDescriptor(&l.normTensorDesc);
+ cudnnCreateTensorDescriptor(&l.srcTensorDesc);
+ cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+ cudnnCreateFilterDescriptor(&l.weightDesc);
+ cudnnCreateTensorDescriptor(&l.dsrcTensorDesc);
+ cudnnCreateTensorDescriptor(&l.ddstTensorDesc);
+ cudnnCreateFilterDescriptor(&l.dweightDesc);
+ cudnnCreateConvolutionDescriptor(&l.convDesc);
+ cudnn_convolutional_setup(&l);
+#endif
+ }
+#endif
+ l.workspace_size = get_workspace_size(l);
+ l.activation = activation;
+
+ fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
+
+ return l;
+}
+
+void denormalize_convolutional_layer(convolutional_layer l)
+{
+ int i, j;
+ for(i = 0; i < l.n; ++i){
+ float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);
+ for(j = 0; j < l.c/l.groups*l.size*l.size; ++j){
+ l.weights[i*l.c/l.groups*l.size*l.size + j] *= scale;
+ }
+ l.biases[i] -= l.rolling_mean[i] * scale;
+ l.scales[i] = 1;
+ l.rolling_mean[i] = 0;
+ l.rolling_variance[i] = 1;
+ }
+}
+
+/*
+void test_convolutional_layer()
+{
+ convolutional_layer l = make_convolutional_layer(1, 5, 5, 3, 2, 5, 2, 1, LEAKY, 1, 0, 0, 0);
+ l.batch_normalize = 1;
+ float data[] = {1,1,1,1,1,
+ 1,1,1,1,1,
+ 1,1,1,1,1,
+ 1,1,1,1,1,
+ 1,1,1,1,1,
+ 2,2,2,2,2,
+ 2,2,2,2,2,
+ 2,2,2,2,2,
+ 2,2,2,2,2,
+ 2,2,2,2,2,
+ 3,3,3,3,3,
+ 3,3,3,3,3,
+ 3,3,3,3,3,
+ 3,3,3,3,3,
+ 3,3,3,3,3};
+ //net.input = data;
+ //forward_convolutional_layer(l);
+}
+*/
+
+void resize_convolutional_layer(convolutional_layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+ int out_w = convolutional_out_width(*l);
+ int out_h = convolutional_out_height(*l);
+
+ l->out_w = out_w;
+ l->out_h = out_h;
+
+ l->outputs = l->out_h * l->out_w * l->out_c;
+ l->inputs = l->w * l->h * l->c;
+
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
+ if(l->batch_normalize){
+ l->x = realloc(l->x, l->batch*l->outputs*sizeof(float));
+ l->x_norm = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));
+ }
+
+#ifdef GPU
+ cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
+
+ l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
+ l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+
+ if(l->batch_normalize){
+ cuda_free(l->x_gpu);
+ cuda_free(l->x_norm_gpu);
+
+ l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+ l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+ }
+#ifdef CUDNN
+ cudnn_convolutional_setup(l);
+#endif
+#endif
+ l->workspace_size = get_workspace_size(*l);
+}
+
+void add_bias(float *output, float *biases, int batch, int n, int size)
+{
+ int i,j,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ for(j = 0; j < size; ++j){
+ output[(b*n + i)*size + j] += biases[i];
+ }
+ }
+ }
+}
+
+void scale_bias(float *output, float *scales, int batch, int n, int size)
+{
+ int i,j,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ for(j = 0; j < size; ++j){
+ output[(b*n + i)*size + j] *= scales[i];
+ }
+ }
+ }
+}
+
+void backward_bias(float *bias_updates, float *delta, int batch, int n, int size)
+{
+ int i,b;
+ for(b = 0; b < batch; ++b){
+ for(i = 0; i < n; ++i){
+ bias_updates[i] += sum_array(delta+size*(i+b*n), size);
+ }
+ }
+}
+
+void forward_convolutional_layer(convolutional_layer l, network net)
+{
+ int i, j;
+
+ fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+
+ if(l.xnor){
+ binarize_weights(l.weights, l.n, l.c/l.groups*l.size*l.size, l.binary_weights);
+ swap_binary(&l);
+ binarize_cpu(net.input, l.c*l.h*l.w*l.batch, l.binary_input);
+ net.input = l.binary_input;
+ }
+
+ int m = l.n/l.groups;
+ int k = l.size*l.size*l.c/l.groups;
+ int n = l.out_w*l.out_h;
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.weights + j*l.nweights/l.groups;
+ float *b = net.workspace;
+ float *c = l.output + (i*l.groups + j)*n*m;
+
+ im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
+ l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
+ gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
+ }
+ }
+
+ if(l.batch_normalize){
+ forward_batchnorm_layer(l, net);
+ } else {
+ add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
+ }
+
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+ if(l.binary || l.xnor) swap_binary(&l);
+}
+
+void backward_convolutional_layer(convolutional_layer l, network net)
+{
+ int i, j;
+ int m = l.n/l.groups;
+ int n = l.size*l.size*l.c/l.groups;
+ int k = l.out_w*l.out_h;
+
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer(l, net);
+ } else {
+ backward_bias(l.bias_updates, l.delta, l.batch, l.n, k);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ for(j = 0; j < l.groups; ++j){
+ float *a = l.delta + (i*l.groups + j)*m*k;
+ float *b = net.workspace;
+ float *c = l.weight_updates + j*l.nweights/l.groups;
+
+ float *im = net.input+(i*l.groups + j)*l.c/l.groups*l.h*l.w;
+
+ im2col_cpu(im, l.c/l.groups, l.h, l.w,
+ l.size, l.stride, l.pad, b);
+ gemm(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if(net.delta){
+ a = l.weights + j*l.nweights/l.groups;
+ b = l.delta + (i*l.groups + j)*m*k;
+ c = net.workspace;
+
+ gemm(1,0,n,k,m,1,a,n,b,k,0,c,k);
+
+ col2im_cpu(net.workspace, l.c/l.groups, l.h, l.w, l.size, l.stride,
+ l.pad, net.delta + (i*l.groups + j)*l.c/l.groups*l.h*l.w);
+ }
+ }
+ }
+}
+
+void update_convolutional_layer(convolutional_layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+ scal_cpu(l.n, momentum, l.bias_updates, 1);
+
+ if(l.scales){
+ axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
+ scal_cpu(l.n, momentum, l.scale_updates, 1);
+ }
+
+ axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
+ axpy_cpu(l.nweights, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+ scal_cpu(l.nweights, momentum, l.weight_updates, 1);
+}
+
+
+image get_convolutional_weight(convolutional_layer l, int i)
+{
+ int h = l.size;
+ int w = l.size;
+ int c = l.c/l.groups;
+ return float_to_image(w,h,c,l.weights+i*h*w*c);
+}
+
+void rgbgr_weights(convolutional_layer l)
+{
+ int i;
+ for(i = 0; i < l.n; ++i){
+ image im = get_convolutional_weight(l, i);
+ if (im.c == 3) {
+ rgbgr_image(im);
+ }
+ }
+}
+
+void rescale_weights(convolutional_layer l, float scale, float trans)
+{
+ int i;
+ for(i = 0; i < l.n; ++i){
+ image im = get_convolutional_weight(l, i);
+ if (im.c == 3) {
+ scale_image(im, scale);
+ float sum = sum_array(im.data, im.w*im.h*im.c);
+ l.biases[i] += sum*trans;
+ }
+ }
+}
+
+image *get_weights(convolutional_layer l)
+{
+ image *weights = calloc(l.n, sizeof(image));
+ int i;
+ for(i = 0; i < l.n; ++i){
+ weights[i] = copy_image(get_convolutional_weight(l, i));
+ normalize_image(weights[i]);
+ /*
+ char buff[256];
+ sprintf(buff, "filter%d", i);
+ save_image(weights[i], buff);
+ */
+ }
+ //error("hey");
+ return weights;
+}
+
+image *visualize_convolutional_layer(convolutional_layer l, char *window, image *prev_weights)
+{
+ image *single_weights = get_weights(l);
+ show_images(single_weights, l.n, window);
+
+ image delta = get_convolutional_image(l);
+ image dc = collapse_image_layers(delta, 1);
+ char buff[256];
+ sprintf(buff, "%s: Output", window);
+ //show_image(dc, buff);
+ //save_image(dc, buff);
+ free_image(dc);
+ return single_weights;
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.h
new file mode 100644
index 00000000000..6c261f5fc23
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/convolutional_layer.h
@@ -0,0 +1,50 @@
+#ifndef CONVOLUTIONAL_LAYER_H
+#define CONVOLUTIONAL_LAYER_H
+
+#include "cuda.h"
+#include "image.h"
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+typedef layer convolutional_layer;
+
+#ifdef GPU
+void forward_convolutional_layer_gpu(convolutional_layer layer, network net);
+void backward_convolutional_layer_gpu(convolutional_layer layer, network net);
+void update_convolutional_layer_gpu(convolutional_layer layer, update_args a);
+
+void push_convolutional_layer(convolutional_layer layer);
+void pull_convolutional_layer(convolutional_layer layer);
+
+void add_bias_gpu(float *output, float *biases, int batch, int n, int size);
+void backward_bias_gpu(float *bias_updates, float *delta, int batch, int n, int size);
+void adam_update_gpu(float *w, float *d, float *m, float *v, float B1, float B2, float eps, float decay, float rate, int n, int batch, int t);
+#ifdef CUDNN
+void cudnn_convolutional_setup(layer *l);
+#endif
+#endif
+
+convolutional_layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam);
+void resize_convolutional_layer(convolutional_layer *layer, int w, int h);
+void forward_convolutional_layer(const convolutional_layer layer, network net);
+void update_convolutional_layer(convolutional_layer layer, update_args a);
+image *visualize_convolutional_layer(convolutional_layer layer, char *window, image *prev_weights);
+void binarize_weights(float *weights, int n, int size, float *binary);
+void swap_binary(convolutional_layer *l);
+void binarize_weights2(float *weights, int n, int size, char *binary, float *scales);
+
+void backward_convolutional_layer(convolutional_layer layer, network net);
+
+void add_bias(float *output, float *biases, int batch, int n, int size);
+void backward_bias(float *bias_updates, float *delta, int batch, int n, int size);
+
+image get_convolutional_image(convolutional_layer layer);
+image get_convolutional_delta(convolutional_layer layer);
+image get_convolutional_weight(convolutional_layer layer, int i);
+
+int convolutional_out_height(convolutional_layer layer);
+int convolutional_out_width(convolutional_layer layer);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.c
new file mode 100644
index 00000000000..2138ff2617a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.c
@@ -0,0 +1,176 @@
+#include "cost_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include
+#include
+#include
+#include
+
+COST_TYPE get_cost_type(char *s)
+{
+ if (strcmp(s, "seg")==0) return SEG;
+ if (strcmp(s, "sse")==0) return SSE;
+ if (strcmp(s, "masked")==0) return MASKED;
+ if (strcmp(s, "smooth")==0) return SMOOTH;
+ if (strcmp(s, "L1")==0) return L1;
+ if (strcmp(s, "wgan")==0) return WGAN;
+ fprintf(stderr, "Couldn't find cost type %s, going with SSE\n", s);
+ return SSE;
+}
+
+char *get_cost_string(COST_TYPE a)
+{
+ switch(a){
+ case SEG:
+ return "seg";
+ case SSE:
+ return "sse";
+ case MASKED:
+ return "masked";
+ case SMOOTH:
+ return "smooth";
+ case L1:
+ return "L1";
+ case WGAN:
+ return "wgan";
+ }
+ return "sse";
+}
+
+cost_layer make_cost_layer(int batch, int inputs, COST_TYPE cost_type, float scale)
+{
+ fprintf(stderr, "cost %4d\n", inputs);
+ cost_layer l = {0};
+ l.type = COST;
+
+ l.scale = scale;
+ l.batch = batch;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.cost_type = cost_type;
+ l.delta = calloc(inputs*batch, sizeof(float));
+ l.output = calloc(inputs*batch, sizeof(float));
+ l.cost = calloc(1, sizeof(float));
+
+ l.forward = forward_cost_layer;
+ l.backward = backward_cost_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_cost_layer_gpu;
+ l.backward_gpu = backward_cost_layer_gpu;
+
+ l.delta_gpu = cuda_make_array(l.output, inputs*batch);
+ l.output_gpu = cuda_make_array(l.delta, inputs*batch);
+ #endif
+ return l;
+}
+
+void resize_cost_layer(cost_layer *l, int inputs)
+{
+ l->inputs = inputs;
+ l->outputs = inputs;
+ l->delta = realloc(l->delta, inputs*l->batch*sizeof(float));
+ l->output = realloc(l->output, inputs*l->batch*sizeof(float));
+#ifdef GPU
+ cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
+ l->delta_gpu = cuda_make_array(l->delta, inputs*l->batch);
+ l->output_gpu = cuda_make_array(l->output, inputs*l->batch);
+#endif
+}
+
+void forward_cost_layer(cost_layer l, network net)
+{
+ if (!net.truth) return;
+ if(l.cost_type == MASKED){
+ int i;
+ for(i = 0; i < l.batch*l.inputs; ++i){
+ if(net.truth[i] == SECRET_NUM) net.input[i] = SECRET_NUM;
+ }
+ }
+ if(l.cost_type == SMOOTH){
+ smooth_l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
+ }else if(l.cost_type == L1){
+ l1_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
+ } else {
+ l2_cpu(l.batch*l.inputs, net.input, net.truth, l.delta, l.output);
+ }
+ l.cost[0] = sum_array(l.output, l.batch*l.inputs);
+}
+
+void backward_cost_layer(const cost_layer l, network net)
+{
+ axpy_cpu(l.batch*l.inputs, l.scale, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void pull_cost_layer(cost_layer l)
+{
+ cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+}
+
+void push_cost_layer(cost_layer l)
+{
+ cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+}
+
+int float_abs_compare (const void * a, const void * b)
+{
+ float fa = *(const float*) a;
+ if(fa < 0) fa = -fa;
+ float fb = *(const float*) b;
+ if(fb < 0) fb = -fb;
+ return (fa > fb) - (fa < fb);
+}
+
+void forward_cost_layer_gpu(cost_layer l, network net)
+{
+ if (!net.truth) return;
+ if(l.smooth){
+ scal_gpu(l.batch*l.inputs, (1-l.smooth), net.truth_gpu, 1);
+ add_gpu(l.batch*l.inputs, l.smooth * 1./l.inputs, net.truth_gpu, 1);
+ }
+
+ if(l.cost_type == SMOOTH){
+ smooth_l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
+ } else if (l.cost_type == L1){
+ l1_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
+ } else if (l.cost_type == WGAN){
+ wgan_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
+ } else {
+ l2_gpu(l.batch*l.inputs, net.input_gpu, net.truth_gpu, l.delta_gpu, l.output_gpu);
+ }
+
+ if (l.cost_type == SEG && l.noobject_scale != 1) {
+ scale_mask_gpu(l.batch*l.inputs, l.delta_gpu, 0, net.truth_gpu, l.noobject_scale);
+ scale_mask_gpu(l.batch*l.inputs, l.output_gpu, 0, net.truth_gpu, l.noobject_scale);
+ }
+ if (l.cost_type == MASKED) {
+ mask_gpu(l.batch*l.inputs, net.delta_gpu, SECRET_NUM, net.truth_gpu, 0);
+ }
+
+ if(l.ratio){
+ cuda_pull_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+ qsort(l.delta, l.batch*l.inputs, sizeof(float), float_abs_compare);
+ int n = (1-l.ratio) * l.batch*l.inputs;
+ float thresh = l.delta[n];
+ thresh = 0;
+ printf("%f\n", thresh);
+ supp_gpu(l.batch*l.inputs, thresh, l.delta_gpu, 1);
+ }
+
+ if(l.thresh){
+ supp_gpu(l.batch*l.inputs, l.thresh*1./l.inputs, l.delta_gpu, 1);
+ }
+
+ cuda_pull_array(l.output_gpu, l.output, l.batch*l.inputs);
+ l.cost[0] = sum_array(l.output, l.batch*l.inputs);
+}
+
+void backward_cost_layer_gpu(const cost_layer l, network net)
+{
+ axpy_gpu(l.batch*l.inputs, l.scale, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.h
new file mode 100644
index 00000000000..ceb64de00bf
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cost_layer.h
@@ -0,0 +1,20 @@
+#ifndef COST_LAYER_H
+#define COST_LAYER_H
+#include "layer.h"
+#include "network.h"
+
+typedef layer cost_layer;
+
+COST_TYPE get_cost_type(char *s);
+char *get_cost_string(COST_TYPE a);
+cost_layer make_cost_layer(int batch, int inputs, COST_TYPE type, float scale);
+void forward_cost_layer(const cost_layer l, network net);
+void backward_cost_layer(const cost_layer l, network net);
+void resize_cost_layer(cost_layer *l, int inputs);
+
+#ifdef GPU
+void forward_cost_layer_gpu(cost_layer l, network net);
+void backward_cost_layer_gpu(const cost_layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.c
new file mode 100644
index 00000000000..7dd29f62b7a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.c
@@ -0,0 +1,283 @@
+#include "crnn_layer.h"
+#include "convolutional_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+#include
+#include
+
+static void increment_layer(layer *l, int steps)
+{
+ int num = l->outputs*l->batch*steps;
+ l->output += num;
+ l->delta += num;
+ l->x += num;
+ l->x_norm += num;
+
+#ifdef GPU
+ l->output_gpu += num;
+ l->delta_gpu += num;
+ l->x_gpu += num;
+ l->x_norm_gpu += num;
+#endif
+}
+
+layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize)
+{
+ fprintf(stderr, "CRNN Layer: %d x %d x %d image, %d filters\n", h,w,c,output_filters);
+ batch = batch / steps;
+ layer l = {0};
+ l.batch = batch;
+ l.type = CRNN;
+ l.steps = steps;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.out_h = h;
+ l.out_w = w;
+ l.out_c = output_filters;
+ l.inputs = h*w*c;
+ l.hidden = h * w * hidden_filters;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+
+ l.state = calloc(l.hidden*batch*(steps+1), sizeof(float));
+
+ l.input_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.input_layer) = make_convolutional_layer(batch*steps, h, w, c, hidden_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
+ l.input_layer->batch = batch;
+
+ l.self_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.self_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, hidden_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
+ l.self_layer->batch = batch;
+
+ l.output_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.output_layer) = make_convolutional_layer(batch*steps, h, w, hidden_filters, output_filters, 1, 3, 1, 1, activation, batch_normalize, 0, 0, 0);
+ l.output_layer->batch = batch;
+
+ l.output = l.output_layer->output;
+ l.delta = l.output_layer->delta;
+
+ l.forward = forward_crnn_layer;
+ l.backward = backward_crnn_layer;
+ l.update = update_crnn_layer;
+
+#ifdef GPU
+ l.forward_gpu = forward_crnn_layer_gpu;
+ l.backward_gpu = backward_crnn_layer_gpu;
+ l.update_gpu = update_crnn_layer_gpu;
+
+ l.state_gpu = cuda_make_array(l.state, l.hidden*batch*(steps+1));
+ l.output_gpu = l.output_layer->output_gpu;
+ l.delta_gpu = l.output_layer->delta_gpu;
+#endif
+
+ return l;
+}
+
+void update_crnn_layer(layer l, update_args a)
+{
+ update_convolutional_layer(*(l.input_layer), a);
+ update_convolutional_layer(*(l.self_layer), a);
+ update_convolutional_layer(*(l.output_layer), a);
+}
+
+void forward_crnn_layer(layer l, network net)
+{
+ network s = net;
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, output_layer.delta, 1);
+ fill_cpu(l.hidden * l.batch * l.steps, 0, self_layer.delta, 1);
+ fill_cpu(l.hidden * l.batch * l.steps, 0, input_layer.delta, 1);
+ if(net.train) fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = net.input;
+ forward_convolutional_layer(input_layer, s);
+
+ s.input = l.state;
+ forward_convolutional_layer(self_layer, s);
+
+ float *old_state = l.state;
+ if(net.train) l.state += l.hidden*l.batch;
+ if(l.shortcut){
+ copy_cpu(l.hidden * l.batch, old_state, 1, l.state, 1);
+ }else{
+ fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+ }
+ axpy_cpu(l.hidden * l.batch, 1, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ forward_convolutional_layer(output_layer, s);
+
+ net.input += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_crnn_layer(layer l, network net)
+{
+ network s = net;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ increment_layer(&input_layer, l.steps-1);
+ increment_layer(&self_layer, l.steps-1);
+ increment_layer(&output_layer, l.steps-1);
+
+ l.state += l.hidden*l.batch*l.steps;
+ for (i = l.steps-1; i >= 0; --i) {
+ copy_cpu(l.hidden * l.batch, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ s.delta = self_layer.delta;
+ backward_convolutional_layer(output_layer, s);
+
+ l.state -= l.hidden*l.batch;
+ /*
+ if(i > 0){
+ copy_cpu(l.hidden * l.batch, input_layer.output - l.hidden*l.batch, 1, l.state, 1);
+ axpy_cpu(l.hidden * l.batch, 1, self_layer.output - l.hidden*l.batch, 1, l.state, 1);
+ }else{
+ fill_cpu(l.hidden * l.batch, 0, l.state, 1);
+ }
+ */
+
+ s.input = l.state;
+ s.delta = self_layer.delta - l.hidden*l.batch;
+ if (i == 0) s.delta = 0;
+ backward_convolutional_layer(self_layer, s);
+
+ copy_cpu(l.hidden*l.batch, self_layer.delta, 1, input_layer.delta, 1);
+ if (i > 0 && l.shortcut) axpy_cpu(l.hidden*l.batch, 1, self_layer.delta, 1, self_layer.delta - l.hidden*l.batch, 1);
+ s.input = net.input + i*l.inputs*l.batch;
+ if(net.delta) s.delta = net.delta + i*l.inputs*l.batch;
+ else s.delta = 0;
+ backward_convolutional_layer(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+}
+
+#ifdef GPU
+
+void pull_crnn_layer(layer l)
+{
+ pull_convolutional_layer(*(l.input_layer));
+ pull_convolutional_layer(*(l.self_layer));
+ pull_convolutional_layer(*(l.output_layer));
+}
+
+void push_crnn_layer(layer l)
+{
+ push_convolutional_layer(*(l.input_layer));
+ push_convolutional_layer(*(l.self_layer));
+ push_convolutional_layer(*(l.output_layer));
+}
+
+void update_crnn_layer_gpu(layer l, update_args a)
+{
+ update_convolutional_layer_gpu(*(l.input_layer), a);
+ update_convolutional_layer_gpu(*(l.self_layer), a);
+ update_convolutional_layer_gpu(*(l.output_layer), a);
+}
+
+void forward_crnn_layer_gpu(layer l, network net)
+{
+ network s = net;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, output_layer.delta_gpu, 1);
+ fill_gpu(l.hidden * l.batch * l.steps, 0, self_layer.delta_gpu, 1);
+ fill_gpu(l.hidden * l.batch * l.steps, 0, input_layer.delta_gpu, 1);
+ if(net.train) fill_gpu(l.hidden * l.batch, 0, l.state_gpu, 1);
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input_gpu = net.input_gpu;
+ forward_convolutional_layer_gpu(input_layer, s);
+
+ s.input_gpu = l.state_gpu;
+ forward_convolutional_layer_gpu(self_layer, s);
+
+ float *old_state = l.state_gpu;
+ if(net.train) l.state_gpu += l.hidden*l.batch;
+ if(l.shortcut){
+ copy_gpu(l.hidden * l.batch, old_state, 1, l.state_gpu, 1);
+ }else{
+ fill_gpu(l.hidden * l.batch, 0, l.state_gpu, 1);
+ }
+ axpy_gpu(l.hidden * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_gpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ forward_convolutional_layer_gpu(output_layer, s);
+
+ net.input_gpu += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_crnn_layer_gpu(layer l, network net)
+{
+ network s = net;
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+ increment_layer(&input_layer, l.steps - 1);
+ increment_layer(&self_layer, l.steps - 1);
+ increment_layer(&output_layer, l.steps - 1);
+ l.state_gpu += l.hidden*l.batch*l.steps;
+ for (i = l.steps-1; i >= 0; --i) {
+ copy_gpu(l.hidden * l.batch, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_gpu(l.hidden * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ s.delta_gpu = self_layer.delta_gpu;
+ backward_convolutional_layer_gpu(output_layer, s);
+
+ l.state_gpu -= l.hidden*l.batch;
+
+ s.input_gpu = l.state_gpu;
+ s.delta_gpu = self_layer.delta_gpu - l.hidden*l.batch;
+ if (i == 0) s.delta_gpu = 0;
+ backward_convolutional_layer_gpu(self_layer, s);
+
+ copy_gpu(l.hidden*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);
+ if (i > 0 && l.shortcut) axpy_gpu(l.hidden*l.batch, 1, self_layer.delta_gpu, 1, self_layer.delta_gpu - l.hidden*l.batch, 1);
+ s.input_gpu = net.input_gpu + i*l.inputs*l.batch;
+ if(net.delta_gpu) s.delta_gpu = net.delta_gpu + i*l.inputs*l.batch;
+ else s.delta_gpu = 0;
+ backward_convolutional_layer_gpu(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.h
new file mode 100644
index 00000000000..515f378354e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crnn_layer.h
@@ -0,0 +1,24 @@
+
+#ifndef CRNN_LAYER_H
+#define CRNN_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_crnn_layer(int batch, int h, int w, int c, int hidden_filters, int output_filters, int steps, ACTIVATION activation, int batch_normalize);
+
+void forward_crnn_layer(layer l, network net);
+void backward_crnn_layer(layer l, network net);
+void update_crnn_layer(layer l, update_args a);
+
+#ifdef GPU
+void forward_crnn_layer_gpu(layer l, network net);
+void backward_crnn_layer_gpu(layer l, network net);
+void update_crnn_layer_gpu(layer l, update_args a);
+void push_crnn_layer(layer l);
+void pull_crnn_layer(layer l);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.c
new file mode 100644
index 00000000000..3b918529e64
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.c
@@ -0,0 +1,103 @@
+#include "crop_layer.h"
+#include "cuda.h"
+#include
+
+image get_crop_image(crop_layer l)
+{
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.out_c;
+ return float_to_image(w,h,c,l.output);
+}
+
+void backward_crop_layer(const crop_layer l, network net){}
+void backward_crop_layer_gpu(const crop_layer l, network net){}
+
+crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure)
+{
+ fprintf(stderr, "Crop Layer: %d x %d -> %d x %d x %d image\n", h,w,crop_height,crop_width,c);
+ crop_layer l = {0};
+ l.type = CROP;
+ l.batch = batch;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.scale = (float)crop_height / h;
+ l.flip = flip;
+ l.angle = angle;
+ l.saturation = saturation;
+ l.exposure = exposure;
+ l.out_w = crop_width;
+ l.out_h = crop_height;
+ l.out_c = c;
+ l.inputs = l.w * l.h * l.c;
+ l.outputs = l.out_w * l.out_h * l.out_c;
+ l.output = calloc(l.outputs*batch, sizeof(float));
+ l.forward = forward_crop_layer;
+ l.backward = backward_crop_layer;
+
+ #ifdef GPU
+ l.forward_gpu = forward_crop_layer_gpu;
+ l.backward_gpu = backward_crop_layer_gpu;
+ l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
+ l.rand_gpu = cuda_make_array(0, l.batch*8);
+ #endif
+ return l;
+}
+
+void resize_crop_layer(layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+
+ l->out_w = l->scale*w;
+ l->out_h = l->scale*h;
+
+ l->inputs = l->w * l->h * l->c;
+ l->outputs = l->out_h * l->out_w * l->out_c;
+
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ #ifdef GPU
+ cuda_free(l->output_gpu);
+ l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
+ #endif
+}
+
+
+void forward_crop_layer(const crop_layer l, network net)
+{
+ int i,j,c,b,row,col;
+ int index;
+ int count = 0;
+ int flip = (l.flip && rand()%2);
+ int dh = rand()%(l.h - l.out_h + 1);
+ int dw = rand()%(l.w - l.out_w + 1);
+ float scale = 2;
+ float trans = -1;
+ if(l.noadjust){
+ scale = 1;
+ trans = 0;
+ }
+ if(!net.train){
+ flip = 0;
+ dh = (l.h - l.out_h)/2;
+ dw = (l.w - l.out_w)/2;
+ }
+ for(b = 0; b < l.batch; ++b){
+ for(c = 0; c < l.c; ++c){
+ for(i = 0; i < l.out_h; ++i){
+ for(j = 0; j < l.out_w; ++j){
+ if(flip){
+ col = l.w - dw - j - 1;
+ }else{
+ col = j + dw;
+ }
+ row = i + dh;
+ index = col+l.w*(row+l.h*(c + l.c*b));
+ l.output[count++] = net.input[index]*scale + trans;
+ }
+ }
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.h
new file mode 100644
index 00000000000..3b5883c47d6
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer.h
@@ -0,0 +1,20 @@
+#ifndef CROP_LAYER_H
+#define CROP_LAYER_H
+
+#include "image.h"
+#include "layer.h"
+#include "network.h"
+
+typedef layer crop_layer;
+
+image get_crop_image(crop_layer l);
+crop_layer make_crop_layer(int batch, int h, int w, int c, int crop_height, int crop_width, int flip, float angle, float saturation, float exposure);
+void forward_crop_layer(const crop_layer l, network net);
+void resize_crop_layer(layer *l, int w, int h);
+
+#ifdef GPU
+void forward_crop_layer_gpu(crop_layer l, network net);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer_kernels.cu
new file mode 100644
index 00000000000..b5b9f554627
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/crop_layer_kernels.cu
@@ -0,0 +1,225 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "crop_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "image.h"
+}
+
+__device__ float get_pixel_kernel(float *image, int w, int h, int x, int y, int c)
+{
+ if(x < 0 || x >= w || y < 0 || y >= h) return 0;
+ return image[x + w*(y + c*h)];
+}
+
+__device__ float3 rgb_to_hsv_kernel(float3 rgb)
+{
+ float r = rgb.x;
+ float g = rgb.y;
+ float b = rgb.z;
+
+ float h, s, v;
+ float max = (r > g) ? ( (r > b) ? r : b) : ( (g > b) ? g : b);
+ float min = (r < g) ? ( (r < b) ? r : b) : ( (g < b) ? g : b);
+ float delta = max - min;
+ v = max;
+ if(max == 0){
+ s = 0;
+ h = -1;
+ }else{
+ s = delta/max;
+ if(r == max){
+ h = (g - b) / delta;
+ } else if (g == max) {
+ h = 2 + (b - r) / delta;
+ } else {
+ h = 4 + (r - g) / delta;
+ }
+ if (h < 0) h += 6;
+ }
+ return make_float3(h, s, v);
+}
+
+__device__ float3 hsv_to_rgb_kernel(float3 hsv)
+{
+ float h = hsv.x;
+ float s = hsv.y;
+ float v = hsv.z;
+
+ float r, g, b;
+ float f, p, q, t;
+
+ if (s == 0) {
+ r = g = b = v;
+ } else {
+ int index = (int) floorf(h);
+ f = h - index;
+ p = v*(1-s);
+ q = v*(1-s*f);
+ t = v*(1-s*(1-f));
+ if(index == 0){
+ r = v; g = t; b = p;
+ } else if(index == 1){
+ r = q; g = v; b = p;
+ } else if(index == 2){
+ r = p; g = v; b = t;
+ } else if(index == 3){
+ r = p; g = q; b = v;
+ } else if(index == 4){
+ r = t; g = p; b = v;
+ } else {
+ r = v; g = p; b = q;
+ }
+ }
+ r = (r < 0) ? 0 : ((r > 1) ? 1 : r);
+ g = (g < 0) ? 0 : ((g > 1) ? 1 : g);
+ b = (b < 0) ? 0 : ((b > 1) ? 1 : b);
+ return make_float3(r, g, b);
+}
+
+__device__ float bilinear_interpolate_kernel(float *image, int w, int h, float x, float y, int c)
+{
+ int ix = (int) floorf(x);
+ int iy = (int) floorf(y);
+
+ float dx = x - ix;
+ float dy = y - iy;
+
+ float val = (1-dy) * (1-dx) * get_pixel_kernel(image, w, h, ix, iy, c) +
+ dy * (1-dx) * get_pixel_kernel(image, w, h, ix, iy+1, c) +
+ (1-dy) * dx * get_pixel_kernel(image, w, h, ix+1, iy, c) +
+ dy * dx * get_pixel_kernel(image, w, h, ix+1, iy+1, c);
+ return val;
+}
+
+__global__ void levels_image_kernel(float *image, float *rand, int batch, int w, int h, int train, float saturation, float exposure, float translate, float scale, float shift)
+{
+ int size = batch * w * h;
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= size) return;
+ int x = id % w;
+ id /= w;
+ int y = id % h;
+ id /= h;
+ float rshift = rand[0];
+ float gshift = rand[1];
+ float bshift = rand[2];
+ float r0 = rand[8*id + 0];
+ float r1 = rand[8*id + 1];
+ float r2 = rand[8*id + 2];
+ float r3 = rand[8*id + 3];
+
+ saturation = r0*(saturation - 1) + 1;
+ saturation = (r1 > .5f) ? 1.f/saturation : saturation;
+ exposure = r2*(exposure - 1) + 1;
+ exposure = (r3 > .5f) ? 1.f/exposure : exposure;
+
+ size_t offset = id * h * w * 3;
+ image += offset;
+ float r = image[x + w*(y + h*0)];
+ float g = image[x + w*(y + h*1)];
+ float b = image[x + w*(y + h*2)];
+ float3 rgb = make_float3(r,g,b);
+ if(train){
+ float3 hsv = rgb_to_hsv_kernel(rgb);
+ hsv.y *= saturation;
+ hsv.z *= exposure;
+ rgb = hsv_to_rgb_kernel(hsv);
+ } else {
+ shift = 0;
+ }
+ image[x + w*(y + h*0)] = rgb.x*scale + translate + (rshift - .5f)*shift;
+ image[x + w*(y + h*1)] = rgb.y*scale + translate + (gshift - .5f)*shift;
+ image[x + w*(y + h*2)] = rgb.z*scale + translate + (bshift - .5f)*shift;
+}
+
+__global__ void forward_crop_layer_kernel(float *input, float *rand, int size, int c, int h, int w, int crop_height, int crop_width, int train, int flip, float angle, float *output)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= size) return;
+
+ float cx = w/2.f;
+ float cy = h/2.f;
+
+ int count = id;
+ int j = id % crop_width;
+ id /= crop_width;
+ int i = id % crop_height;
+ id /= crop_height;
+ int k = id % c;
+ id /= c;
+ int b = id;
+
+ float r4 = rand[8*b + 4];
+ float r5 = rand[8*b + 5];
+ float r6 = rand[8*b + 6];
+ float r7 = rand[8*b + 7];
+
+ float dw = (w - crop_width)*r4;
+ float dh = (h - crop_height)*r5;
+ flip = (flip && (r6 > .5f));
+ angle = 2*angle*r7 - angle;
+ if(!train){
+ dw = (w - crop_width)/2.f;
+ dh = (h - crop_height)/2.f;
+ flip = 0;
+ angle = 0;
+ }
+
+ input += w*h*c*b;
+
+ float x = (flip) ? w - dw - j - 1 : j + dw;
+ float y = i + dh;
+
+ float rx = cosf(angle)*(x-cx) - sinf(angle)*(y-cy) + cx;
+ float ry = sinf(angle)*(x-cx) + cosf(angle)*(y-cy) + cy;
+
+ output[count] = bilinear_interpolate_kernel(input, w, h, rx, ry, k);
+}
+
+extern "C" void forward_crop_layer_gpu(crop_layer layer, network net)
+{
+ cuda_random(layer.rand_gpu, layer.batch*8);
+
+ float radians = layer.angle*3.14159265f/180.f;
+
+ float scale = 2;
+ float translate = -1;
+ if(layer.noadjust){
+ scale = 1;
+ translate = 0;
+ }
+
+ int size = layer.batch * layer.w * layer.h;
+
+ levels_image_kernel<<>>(net.input_gpu, layer.rand_gpu, layer.batch, layer.w, layer.h, net.train, layer.saturation, layer.exposure, translate, scale, layer.shift);
+ check_error(cudaPeekAtLastError());
+
+ size = layer.batch*layer.c*layer.out_w*layer.out_h;
+
+ forward_crop_layer_kernel<<>>(net.input_gpu, layer.rand_gpu, size, layer.c, layer.h, layer.w, layer.out_h, layer.out_w, net.train, layer.flip, radians, layer.output_gpu);
+ check_error(cudaPeekAtLastError());
+
+/*
+ cuda_pull_array(layer.output_gpu, layer.output, size);
+ image im = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 0*(size/layer.batch));
+ image im2 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 1*(size/layer.batch));
+ image im3 = float_to_image(layer.crop_width, layer.crop_height, layer.c, layer.output + 2*(size/layer.batch));
+
+ translate_image(im, -translate);
+ scale_image(im, 1/scale);
+ translate_image(im2, -translate);
+ scale_image(im2, 1/scale);
+ translate_image(im3, -translate);
+ scale_image(im3, 1/scale);
+
+ show_image(im, "cropped");
+ show_image(im2, "cropped2");
+ show_image(im3, "cropped3");
+ cvWaitKey(0);
+ */
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.c
new file mode 100644
index 00000000000..48aba6e4012
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.c
@@ -0,0 +1,178 @@
+int gpu_index = 0;
+
+#ifdef GPU
+
+#include "cuda.h"
+#include "utils.h"
+#include "blas.h"
+#include
+#include
+#include
+
+void cuda_set_device(int n)
+{
+ gpu_index = n;
+ cudaError_t status = cudaSetDevice(n);
+ check_error(status);
+}
+
+int cuda_get_device()
+{
+ int n = 0;
+ cudaError_t status = cudaGetDevice(&n);
+ check_error(status);
+ return n;
+}
+
+void check_error(cudaError_t status)
+{
+ //cudaDeviceSynchronize();
+ cudaError_t status2 = cudaGetLastError();
+ if (status != cudaSuccess)
+ {
+ const char *s = cudaGetErrorString(status);
+ char buffer[256];
+ printf("CUDA Error: %s\n", s);
+ assert(0);
+ snprintf(buffer, 256, "CUDA Error: %s", s);
+ error(buffer);
+ }
+ if (status2 != cudaSuccess)
+ {
+ const char *s = cudaGetErrorString(status);
+ char buffer[256];
+ printf("CUDA Error Prev: %s\n", s);
+ assert(0);
+ snprintf(buffer, 256, "CUDA Error Prev: %s", s);
+ error(buffer);
+ }
+}
+
+dim3 cuda_gridsize(size_t n){
+ size_t k = (n-1) / BLOCK + 1;
+ size_t x = k;
+ size_t y = 1;
+ if(x > 65535){
+ x = ceil(sqrt(k));
+ y = (n-1)/(x*BLOCK) + 1;
+ }
+ dim3 d = {x, y, 1};
+ //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK);
+ return d;
+}
+
+#ifdef CUDNN
+cudnnHandle_t cudnn_handle()
+{
+ static int init[16] = {0};
+ static cudnnHandle_t handle[16];
+ int i = cuda_get_device();
+ if(!init[i]) {
+ cudnnCreate(&handle[i]);
+ init[i] = 1;
+ }
+ return handle[i];
+}
+#endif
+
+cublasHandle_t blas_handle()
+{
+ static int init[16] = {0};
+ static cublasHandle_t handle[16];
+ int i = cuda_get_device();
+ if(!init[i]) {
+ cublasCreate(&handle[i]);
+ init[i] = 1;
+ }
+ return handle[i];
+}
+
+float *cuda_make_array(float *x, size_t n)
+{
+ float *x_gpu;
+ size_t size = sizeof(float)*n;
+ cudaError_t status = cudaMalloc((void **)&x_gpu, size);
+ check_error(status);
+ if(x){
+ status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ check_error(status);
+ } else {
+ fill_gpu(n, 0, x_gpu, 1);
+ }
+ if(!x_gpu) error("Cuda malloc failed\n");
+ return x_gpu;
+}
+
+void cuda_random(float *x_gpu, size_t n)
+{
+ static curandGenerator_t gen[16];
+ static int init[16] = {0};
+ int i = cuda_get_device();
+ if(!init[i]){
+ curandCreateGenerator(&gen[i], CURAND_RNG_PSEUDO_DEFAULT);
+ curandSetPseudoRandomGeneratorSeed(gen[i], time(0));
+ init[i] = 1;
+ }
+ curandGenerateUniform(gen[i], x_gpu, n);
+ check_error(cudaPeekAtLastError());
+}
+
+float cuda_compare(float *x_gpu, float *x, size_t n, char *s)
+{
+ float *tmp = calloc(n, sizeof(float));
+ cuda_pull_array(x_gpu, tmp, n);
+ //int i;
+ //for(i = 0; i < n; ++i) printf("%f %f\n", tmp[i], x[i]);
+ axpy_cpu(n, -1, x, 1, tmp, 1);
+ float err = dot_cpu(n, tmp, 1, tmp, 1);
+ printf("Error %s: %f\n", s, sqrt(err/n));
+ free(tmp);
+ return err;
+}
+
+int *cuda_make_int_array(int *x, size_t n)
+{
+ int *x_gpu;
+ size_t size = sizeof(int)*n;
+ cudaError_t status = cudaMalloc((void **)&x_gpu, size);
+ check_error(status);
+ if(x){
+ status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ check_error(status);
+ }
+ if(!x_gpu) error("Cuda malloc failed\n");
+ return x_gpu;
+}
+
+void cuda_free(float *x_gpu)
+{
+ cudaError_t status = cudaFree(x_gpu);
+ check_error(status);
+}
+
+void cuda_push_array(float *x_gpu, float *x, size_t n)
+{
+ size_t size = sizeof(float)*n;
+ cudaError_t status = cudaMemcpy(x_gpu, x, size, cudaMemcpyHostToDevice);
+ check_error(status);
+}
+
+void cuda_pull_array(float *x_gpu, float *x, size_t n)
+{
+ size_t size = sizeof(float)*n;
+ cudaError_t status = cudaMemcpy(x, x_gpu, size, cudaMemcpyDeviceToHost);
+ check_error(status);
+}
+
+float cuda_mag_array(float *x_gpu, size_t n)
+{
+ float *temp = calloc(n, sizeof(float));
+ cuda_pull_array(x_gpu, temp, n);
+ float m = mag_array(temp, n);
+ free(temp);
+ return m;
+}
+#else
+void cuda_set_device(int n){}
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.h
new file mode 100644
index 00000000000..a1bc2160ded
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/cuda.h
@@ -0,0 +1,20 @@
+#ifndef CUDA_H
+#define CUDA_H
+
+#include "darknet.h"
+
+#ifdef GPU
+
+void check_error(cudaError_t status);
+cublasHandle_t blas_handle();
+int *cuda_make_int_array(int *x, size_t n);
+void cuda_random(float *x_gpu, size_t n);
+float cuda_compare(float *x_gpu, float *x, size_t n, char *s);
+dim3 cuda_gridsize(size_t n);
+
+#ifdef CUDNN
+cudnnHandle_t cudnn_handle();
+#endif
+
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/darknet.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/darknet.h
new file mode 100644
index 00000000000..9327ea8ca06
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/darknet.h
@@ -0,0 +1,800 @@
+#ifndef DARKNET_API
+#define DARKNET_API
+#include
+#include
+#include
+#include
+
+#define SECRET_NUM -1234
+extern int gpu_index;
+
+#ifdef GPU
+ #define BLOCK 512
+
+ #include "cuda_runtime.h"
+ #include "curand.h"
+ #include "cublas_v2.h"
+
+ #ifdef CUDNN
+ #include "cudnn.h"
+ #endif
+#endif
+
+#ifndef __cplusplus
+ #ifdef OPENCV
+ #include "opencv2/highgui/highgui_c.h"
+ #include "opencv2/imgproc/imgproc_c.h"
+ #include "opencv2/core/version.hpp"
+ #if CV_MAJOR_VERSION == 3
+ #include "opencv2/videoio/videoio_c.h"
+ #include "opencv2/imgcodecs/imgcodecs_c.h"
+ #endif
+ #endif
+#endif
+
+typedef struct{
+ int classes;
+ char **names;
+} metadata;
+
+metadata get_metadata(char *file);
+
+typedef struct{
+ int *leaf;
+ int n;
+ int *parent;
+ int *child;
+ int *group;
+ char **name;
+
+ int groups;
+ int *group_size;
+ int *group_offset;
+} tree;
+tree *read_tree(char *filename);
+
+typedef enum{
+ LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
+} ACTIVATION;
+
+typedef enum{
+ MULT, ADD, SUB, DIV
+} BINARY_ACTIVATION;
+
+typedef enum {
+ CONVOLUTIONAL,
+ DECONVOLUTIONAL,
+ CONNECTED,
+ MAXPOOL,
+ SOFTMAX,
+ DETECTION,
+ DROPOUT,
+ CROP,
+ ROUTE,
+ COST,
+ NORMALIZATION,
+ AVGPOOL,
+ LOCAL,
+ SHORTCUT,
+ ACTIVE,
+ RNN,
+ GRU,
+ LSTM,
+ CRNN,
+ BATCHNORM,
+ NETWORK,
+ XNOR,
+ REGION,
+ YOLO,
+ REORG,
+ UPSAMPLE,
+ LOGXENT,
+ L2NORM,
+ BLANK
+} LAYER_TYPE;
+
+typedef enum{
+ SSE, MASKED, L1, SEG, SMOOTH,WGAN
+} COST_TYPE;
+
+typedef struct{
+ int batch;
+ float learning_rate;
+ float momentum;
+ float decay;
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+ int t;
+} update_args;
+
+struct network;
+typedef struct network network;
+
+struct layer;
+typedef struct layer layer;
+
+struct layer{
+ LAYER_TYPE type;
+ ACTIVATION activation;
+ COST_TYPE cost_type;
+ void (*forward) (struct layer, struct network);
+ void (*backward) (struct layer, struct network);
+ void (*update) (struct layer, update_args);
+ void (*forward_gpu) (struct layer, struct network);
+ void (*backward_gpu) (struct layer, struct network);
+ void (*update_gpu) (struct layer, update_args);
+ int batch_normalize;
+ int shortcut;
+ int batch;
+ int forced;
+ int flipped;
+ int inputs;
+ int outputs;
+ int nweights;
+ int nbiases;
+ int extra;
+ int truths;
+ int h,w,c;
+ int out_h, out_w, out_c;
+ int n;
+ int max_boxes;
+ int groups;
+ int size;
+ int side;
+ int stride;
+ int reverse;
+ int flatten;
+ int spatial;
+ int pad;
+ int sqrt;
+ int flip;
+ int index;
+ int binary;
+ int xnor;
+ int steps;
+ int hidden;
+ int truth;
+ float smooth;
+ float dot;
+ float angle;
+ float jitter;
+ float saturation;
+ float exposure;
+ float shift;
+ float ratio;
+ float learning_rate_scale;
+ float clip;
+ int softmax;
+ int classes;
+ int coords;
+ int background;
+ int rescore;
+ int objectness;
+ int joint;
+ int noadjust;
+ int reorg;
+ int log;
+ int tanh;
+ int *mask;
+ int total;
+
+ float alpha;
+ float beta;
+ float kappa;
+
+ float coord_scale;
+ float object_scale;
+ float noobject_scale;
+ float mask_scale;
+ float class_scale;
+ int bias_match;
+ int random;
+ float ignore_thresh;
+ float truth_thresh;
+ float thresh;
+ float focus;
+ int classfix;
+ int absolute;
+
+ int onlyforward;
+ int stopbackward;
+ int dontload;
+ int dontsave;
+ int dontloadscales;
+
+ float temperature;
+ float probability;
+ float scale;
+
+ char * cweights;
+ int * indexes;
+ int * input_layers;
+ int * input_sizes;
+ int * map;
+ float * rand;
+ float * cost;
+ float * state;
+ float * prev_state;
+ float * forgot_state;
+ float * forgot_delta;
+ float * state_delta;
+ float * combine_cpu;
+ float * combine_delta_cpu;
+
+ float * concat;
+ float * concat_delta;
+
+ float * binary_weights;
+
+ float * biases;
+ float * bias_updates;
+
+ float * scales;
+ float * scale_updates;
+
+ float * weights;
+ float * weight_updates;
+
+ float * delta;
+ float * output;
+ float * loss;
+ float * squared;
+ float * norms;
+
+ float * spatial_mean;
+ float * mean;
+ float * variance;
+
+ float * mean_delta;
+ float * variance_delta;
+
+ float * rolling_mean;
+ float * rolling_variance;
+
+ float * x;
+ float * x_norm;
+
+ float * m;
+ float * v;
+
+ float * bias_m;
+ float * bias_v;
+ float * scale_m;
+ float * scale_v;
+
+
+ float *z_cpu;
+ float *r_cpu;
+ float *h_cpu;
+ float * prev_state_cpu;
+
+ float *temp_cpu;
+ float *temp2_cpu;
+ float *temp3_cpu;
+
+ float *dh_cpu;
+ float *hh_cpu;
+ float *prev_cell_cpu;
+ float *cell_cpu;
+ float *f_cpu;
+ float *i_cpu;
+ float *g_cpu;
+ float *o_cpu;
+ float *c_cpu;
+ float *dc_cpu;
+
+ float * binary_input;
+
+ struct layer *input_layer;
+ struct layer *self_layer;
+ struct layer *output_layer;
+
+ struct layer *reset_layer;
+ struct layer *update_layer;
+ struct layer *state_layer;
+
+ struct layer *input_gate_layer;
+ struct layer *state_gate_layer;
+ struct layer *input_save_layer;
+ struct layer *state_save_layer;
+ struct layer *input_state_layer;
+ struct layer *state_state_layer;
+
+ struct layer *input_z_layer;
+ struct layer *state_z_layer;
+
+ struct layer *input_r_layer;
+ struct layer *state_r_layer;
+
+ struct layer *input_h_layer;
+ struct layer *state_h_layer;
+
+ struct layer *wz;
+ struct layer *uz;
+ struct layer *wr;
+ struct layer *ur;
+ struct layer *wh;
+ struct layer *uh;
+ struct layer *uo;
+ struct layer *wo;
+ struct layer *uf;
+ struct layer *wf;
+ struct layer *ui;
+ struct layer *wi;
+ struct layer *ug;
+ struct layer *wg;
+
+ tree *softmax_tree;
+
+ size_t workspace_size;
+
+#ifdef GPU
+ int *indexes_gpu;
+
+ float *z_gpu;
+ float *r_gpu;
+ float *h_gpu;
+
+ float *temp_gpu;
+ float *temp2_gpu;
+ float *temp3_gpu;
+
+ float *dh_gpu;
+ float *hh_gpu;
+ float *prev_cell_gpu;
+ float *cell_gpu;
+ float *f_gpu;
+ float *i_gpu;
+ float *g_gpu;
+ float *o_gpu;
+ float *c_gpu;
+ float *dc_gpu;
+
+ float *m_gpu;
+ float *v_gpu;
+ float *bias_m_gpu;
+ float *scale_m_gpu;
+ float *bias_v_gpu;
+ float *scale_v_gpu;
+
+ float * combine_gpu;
+ float * combine_delta_gpu;
+
+ float * prev_state_gpu;
+ float * forgot_state_gpu;
+ float * forgot_delta_gpu;
+ float * state_gpu;
+ float * state_delta_gpu;
+ float * gate_gpu;
+ float * gate_delta_gpu;
+ float * save_gpu;
+ float * save_delta_gpu;
+ float * concat_gpu;
+ float * concat_delta_gpu;
+
+ float * binary_input_gpu;
+ float * binary_weights_gpu;
+
+ float * mean_gpu;
+ float * variance_gpu;
+
+ float * rolling_mean_gpu;
+ float * rolling_variance_gpu;
+
+ float * variance_delta_gpu;
+ float * mean_delta_gpu;
+
+ float * x_gpu;
+ float * x_norm_gpu;
+ float * weights_gpu;
+ float * weight_updates_gpu;
+ float * weight_change_gpu;
+
+ float * biases_gpu;
+ float * bias_updates_gpu;
+ float * bias_change_gpu;
+
+ float * scales_gpu;
+ float * scale_updates_gpu;
+ float * scale_change_gpu;
+
+ float * output_gpu;
+ float * loss_gpu;
+ float * delta_gpu;
+ float * rand_gpu;
+ float * squared_gpu;
+ float * norms_gpu;
+#ifdef CUDNN
+ cudnnTensorDescriptor_t srcTensorDesc, dstTensorDesc;
+ cudnnTensorDescriptor_t dsrcTensorDesc, ddstTensorDesc;
+ cudnnTensorDescriptor_t normTensorDesc;
+ cudnnFilterDescriptor_t weightDesc;
+ cudnnFilterDescriptor_t dweightDesc;
+ cudnnConvolutionDescriptor_t convDesc;
+ cudnnConvolutionFwdAlgo_t fw_algo;
+ cudnnConvolutionBwdDataAlgo_t bd_algo;
+ cudnnConvolutionBwdFilterAlgo_t bf_algo;
+#endif
+#endif
+};
+
+void free_layer(layer);
+
+typedef enum {
+ CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
+} learning_rate_policy;
+
+typedef struct network{
+ int n;
+ int batch;
+ size_t *seen;
+ int *t;
+ float epoch;
+ int subdivisions;
+ layer *layers;
+ float *output;
+ learning_rate_policy policy;
+
+ float learning_rate;
+ float momentum;
+ float decay;
+ float gamma;
+ float scale;
+ float power;
+ int time_steps;
+ int step;
+ int max_batches;
+ float *scales;
+ int *steps;
+ int num_steps;
+ int burn_in;
+
+ int adam;
+ float B1;
+ float B2;
+ float eps;
+
+ int inputs;
+ int outputs;
+ int truths;
+ int notruth;
+ int h, w, c;
+ int max_crop;
+ int min_crop;
+ float max_ratio;
+ float min_ratio;
+ int center;
+ float angle;
+ float aspect;
+ float exposure;
+ float saturation;
+ float hue;
+ int random;
+
+ int gpu_index;
+ tree *hierarchy;
+
+ float *input;
+ float *truth;
+ float *delta;
+ float *workspace;
+ int train;
+ int index;
+ float *cost;
+ float clip;
+
+#ifdef GPU
+ float *input_gpu;
+ float *truth_gpu;
+ float *delta_gpu;
+ float *output_gpu;
+#endif
+
+} network;
+
+typedef struct {
+ int w;
+ int h;
+ float scale;
+ float rad;
+ float dx;
+ float dy;
+ float aspect;
+} augment_args;
+
+typedef struct {
+ int w;
+ int h;
+ int c;
+ float *data;
+} image;
+
+typedef struct{
+ float x, y, w, h;
+} box;
+
+typedef struct detection{
+ box bbox;
+ int classes;
+ float *prob;
+ float *mask;
+ float objectness;
+ int sort_class;
+} detection;
+
+typedef struct matrix{
+ int rows, cols;
+ float **vals;
+} matrix;
+
+
+typedef struct{
+ int w, h;
+ matrix X;
+ matrix y;
+ int shallow;
+ int *num_boxes;
+ box **boxes;
+} data;
+
+typedef enum {
+ CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
+} data_type;
+
+typedef struct load_args{
+ int threads;
+ char **paths;
+ char *path;
+ int n;
+ int m;
+ char **labels;
+ int h;
+ int w;
+ int out_w;
+ int out_h;
+ int nh;
+ int nw;
+ int num_boxes;
+ int min, max, size;
+ int classes;
+ int background;
+ int scale;
+ int center;
+ int coords;
+ float jitter;
+ float angle;
+ float aspect;
+ float saturation;
+ float exposure;
+ float hue;
+ data *d;
+ image *im;
+ image *resized;
+ data_type type;
+ tree *hierarchy;
+} load_args;
+
+typedef struct{
+ int id;
+ float x,y,w,h;
+ float left, right, top, bottom;
+} box_label;
+
+
+network *load_network(char *cfg, char *weights, int clear);
+load_args get_base_args(network *net);
+
+void free_data(data d);
+
+typedef struct node{
+ void *val;
+ struct node *next;
+ struct node *prev;
+} node;
+
+typedef struct list{
+ int size;
+ node *front;
+ node *back;
+} list;
+
+pthread_t load_data(load_args args);
+list *read_data_cfg(char *filename);
+list *read_cfg(char *filename);
+unsigned char *read_file(char *filename);
+data resize_data(data orig, int w, int h);
+data *tile_data(data orig, int divs, int size);
+data select_data(data *orig, int *inds);
+
+void forward_network(network *net);
+void backward_network(network *net);
+void update_network(network *net);
+
+
+float dot_cpu(int N, float *X, int INCX, float *Y, int INCY);
+void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY);
+void copy_cpu(int N, float *X, int INCX, float *Y, int INCY);
+void scal_cpu(int N, float ALPHA, float *X, int INCX);
+void fill_cpu(int N, float ALPHA, float * X, int INCX);
+void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial);
+void softmax(float *input, int n, float temp, int stride, float *output);
+
+int best_3d_shift_r(image a, image b, int min, int max);
+#ifdef GPU
+void axpy_gpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY);
+void fill_gpu(int N, float ALPHA, float * X, int INCX);
+void scal_gpu(int N, float ALPHA, float * X, int INCX);
+void copy_gpu(int N, float * X, int INCX, float * Y, int INCY);
+
+void cuda_set_device(int n);
+void cuda_free(float *x_gpu);
+float *cuda_make_array(float *x, size_t n);
+void cuda_pull_array(float *x_gpu, float *x, size_t n);
+float cuda_mag_array(float *x_gpu, size_t n);
+void cuda_push_array(float *x_gpu, float *x, size_t n);
+
+void forward_network_gpu(network *net);
+void backward_network_gpu(network *net);
+void update_network_gpu(network *net);
+
+float train_networks(network **nets, int n, data d, int interval);
+void sync_nets(network **nets, int n, int interval);
+void harmless_update_network_gpu(network *net);
+#endif
+image get_label(image **characters, char *string, int size);
+void draw_label(image a, int r, int c, image label, const float *rgb);
+void save_image_png(image im, const char *name);
+void get_next_batch(data d, int n, int offset, float *X, float *y);
+void grayscale_image_3c(image im);
+void normalize_image(image p);
+void matrix_to_csv(matrix m);
+float train_network_sgd(network *net, data d, int n);
+void rgbgr_image(image im);
+data copy_data(data d);
+data concat_data(data d1, data d2);
+data load_cifar10_data(char *filename);
+float matrix_topk_accuracy(matrix truth, matrix guess, int k);
+void matrix_add_matrix(matrix from, matrix to);
+void scale_matrix(matrix m, float scale);
+matrix csv_to_matrix(char *filename);
+float *network_accuracies(network *net, data d, int n);
+float train_network_datum(network *net);
+image make_random_image(int w, int h, int c);
+
+void denormalize_connected_layer(layer l);
+void denormalize_convolutional_layer(layer l);
+void statistics_connected_layer(layer l);
+void rescale_weights(layer l, float scale, float trans);
+void rgbgr_weights(layer l);
+image *get_weights(layer l);
+
+void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int frame_skip, char *prefix, int avg, float hier_thresh, int w, int h, int fps, int fullscreen);
+void get_detection_detections(layer l, int w, int h, float thresh, detection *dets);
+
+char *option_find_str(list *l, char *key, char *def);
+int option_find_int(list *l, char *key, int def);
+int option_find_int_quiet(list *l, char *key, int def);
+
+network *parse_network_cfg(char *filename);
+void save_weights(network *net, char *filename);
+void load_weights(network *net, char *filename);
+void save_weights_upto(network *net, char *filename, int cutoff);
+void load_weights_upto(network *net, char *filename, int start, int cutoff);
+
+void zero_objectness(layer l);
+void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets);
+int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets);
+void free_network(network *net);
+void set_batch_network(network *net, int b);
+void set_temp_network(network *net, float t);
+image load_image(char *filename, int w, int h, int c);
+image load_image_color(char *filename, int w, int h);
+image make_image(int w, int h, int c);
+image resize_image(image im, int w, int h);
+void censor_image(image im, int dx, int dy, int w, int h);
+image letterbox_image(image im, int w, int h);
+image crop_image(image im, int dx, int dy, int w, int h);
+image center_crop_image(image im, int w, int h);
+image resize_min(image im, int min);
+image resize_max(image im, int max);
+image threshold_image(image im, float thresh);
+image mask_to_rgb(image mask);
+int resize_network(network *net, int w, int h);
+void free_matrix(matrix m);
+void test_resize(char *filename);
+void save_image(image p, const char *name);
+void show_image(image p, const char *name);
+image copy_image(image p);
+void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b);
+float get_current_rate(network *net);
+void composite_3d(char *f1, char *f2, char *out, int delta);
+data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h);
+size_t get_current_batch(network *net);
+void constrain_image(image im);
+image get_network_image_layer(network *net, int i);
+layer get_network_output_layer(network *net);
+void top_predictions(network *net, int n, int *index);
+void flip_image(image a);
+image float_to_image(int w, int h, int c, float *data);
+void ghost_image(image source, image dest, int dx, int dy);
+float network_accuracy(network *net, data d);
+void random_distort_image(image im, float hue, float saturation, float exposure);
+void fill_image(image m, float s);
+image grayscale_image(image im);
+void rotate_image_cw(image im, int times);
+double what_time_is_it_now();
+image rotate_image(image m, float rad);
+void visualize_network(network *net);
+float box_iou(box a, box b);
+data load_all_cifar10();
+box_label *read_boxes(char *filename, int *n);
+box float_to_box(float *f, int stride);
+void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes);
+
+matrix network_predict_data(network *net, data test);
+image **load_alphabet();
+image get_network_image(network *net);
+float *network_predict(network *net, float *input);
+
+int network_width(network *net);
+int network_height(network *net);
+float *network_predict_image(network *net, image im);
+void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets);
+detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
+void free_detections(detection *dets, int n);
+
+void reset_network_state(network *net, int b);
+
+char **get_labels(char *filename);
+void do_nms_obj(detection *dets, int total, int classes, float thresh);
+void do_nms_sort(detection *dets, int total, int classes, float thresh);
+
+matrix make_matrix(int rows, int cols);
+
+#ifndef __cplusplus
+#ifdef OPENCV
+image get_image_from_stream(CvCapture *cap);
+#endif
+#endif
+void free_image(image m);
+float train_network(network *net, data d);
+pthread_t load_data_in_thread(load_args args);
+void load_data_blocking(load_args args);
+list *get_paths(char *filename);
+void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride);
+void change_leaves(tree *t, char *leaf_list);
+
+int find_int_arg(int argc, char **argv, char *arg, int def);
+float find_float_arg(int argc, char **argv, char *arg, float def);
+int find_arg(int argc, char* argv[], char *arg);
+char *find_char_arg(int argc, char **argv, char *arg, char *def);
+char *basecfg(char *cfgfile);
+void find_replace(char *str, char *orig, char *rep, char *output);
+void free_ptrs(void **ptrs, int n);
+char *fgetl(FILE *fp);
+void strip(char *s);
+float sec(clock_t clocks);
+void **list_to_array(list *l);
+void top_k(float *a, int n, int k, int *index);
+int *read_map(char *filename);
+void error(const char *s);
+int max_index(float *a, int n);
+int max_int_index(int *a, int n);
+int sample_array(float *a, int n);
+int *random_index_order(int min, int max);
+void free_list(list *l);
+float mse_array(float *a, int n);
+float variance_array(float *a, int n);
+float mag_array(float *a, int n);
+void scale_array(float *a, int n, float s);
+float mean_array(float *a, int n);
+float sum_array(float *a, int n);
+void normalize_array(float *a, int n);
+int *read_intlist(char *s, int *n, int d);
+size_t rand_size_t();
+float rand_normal();
+float rand_uniform(float min, float max);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.c
new file mode 100644
index 00000000000..51900f2600f
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.c
@@ -0,0 +1,1603 @@
+#include "data.h"
+#include "utils.h"
+#include "image.h"
+#include "cuda.h"
+
+#include
+#include
+#include
+
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+
+list *get_paths(char *filename)
+{
+ char *path;
+ FILE *file = fopen(filename, "r");
+ if(!file) file_error(filename);
+ list *lines = make_list();
+ while((path=fgetl(file))){
+ list_insert(lines, path);
+ }
+ fclose(file);
+ return lines;
+}
+
+/*
+char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
+{
+ char **random_paths = calloc(n, sizeof(char*));
+ int i;
+ pthread_mutex_lock(&mutex);
+ for(i = 0; i < n; ++i){
+ int index = rand()%m;
+ indexes[i] = index;
+ random_paths[i] = paths[index];
+ if(i == 0) printf("%s\n", paths[index]);
+ }
+ pthread_mutex_unlock(&mutex);
+ return random_paths;
+}
+*/
+
+char **get_random_paths(char **paths, int n, int m)
+{
+ char **random_paths = calloc(n, sizeof(char*));
+ int i;
+ pthread_mutex_lock(&mutex);
+ for(i = 0; i < n; ++i){
+ int index = rand()%m;
+ random_paths[i] = paths[index];
+ //if(i == 0) printf("%s\n", paths[index]);
+ }
+ pthread_mutex_unlock(&mutex);
+ return random_paths;
+}
+
+char **find_replace_paths(char **paths, int n, char *find, char *replace)
+{
+ char **replace_paths = calloc(n, sizeof(char*));
+ int i;
+ for(i = 0; i < n; ++i){
+ char replaced[4096];
+ find_replace(paths[i], find, replace, replaced);
+ replace_paths[i] = copy_string(replaced);
+ }
+ return replace_paths;
+}
+
+matrix load_image_paths_gray(char **paths, int n, int w, int h)
+{
+ int i;
+ matrix X;
+ X.rows = n;
+ X.vals = calloc(X.rows, sizeof(float*));
+ X.cols = 0;
+
+ for(i = 0; i < n; ++i){
+ image im = load_image(paths[i], w, h, 3);
+
+ image gray = grayscale_image(im);
+ free_image(im);
+ im = gray;
+
+ X.vals[i] = im.data;
+ X.cols = im.h*im.w*im.c;
+ }
+ return X;
+}
+
+matrix load_image_paths(char **paths, int n, int w, int h)
+{
+ int i;
+ matrix X;
+ X.rows = n;
+ X.vals = calloc(X.rows, sizeof(float*));
+ X.cols = 0;
+
+ for(i = 0; i < n; ++i){
+ image im = load_image_color(paths[i], w, h);
+ X.vals[i] = im.data;
+ X.cols = im.h*im.w*im.c;
+ }
+ return X;
+}
+
+matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
+{
+ int i;
+ matrix X;
+ X.rows = n;
+ X.vals = calloc(X.rows, sizeof(float*));
+ X.cols = 0;
+
+ for(i = 0; i < n; ++i){
+ image im = load_image_color(paths[i], 0, 0);
+ image crop;
+ if(center){
+ crop = center_crop_image(im, size, size);
+ } else {
+ crop = random_augment_image(im, angle, aspect, min, max, size, size);
+ }
+ int flip = rand()%2;
+ if (flip) flip_image(crop);
+ random_distort_image(crop, hue, saturation, exposure);
+
+ /*
+ show_image(im, "orig");
+ show_image(crop, "crop");
+ cvWaitKey(0);
+ */
+ free_image(im);
+ X.vals[i] = crop.data;
+ X.cols = crop.h*crop.w*crop.c;
+ }
+ return X;
+}
+
+
+box_label *read_boxes(char *filename, int *n)
+{
+ FILE *file = fopen(filename, "r");
+ if(!file) file_error(filename);
+ float x, y, h, w;
+ int id;
+ int count = 0;
+ int size = 64;
+ box_label *boxes = calloc(size, sizeof(box_label));
+ while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
+ if(count == size) {
+ size = size * 2;
+ boxes = realloc(boxes, size*sizeof(box_label));
+ }
+ boxes[count].id = id;
+ boxes[count].x = x;
+ boxes[count].y = y;
+ boxes[count].h = h;
+ boxes[count].w = w;
+ boxes[count].left = x - w/2;
+ boxes[count].right = x + w/2;
+ boxes[count].top = y - h/2;
+ boxes[count].bottom = y + h/2;
+ ++count;
+ }
+ fclose(file);
+ *n = count;
+ return boxes;
+}
+
+void randomize_boxes(box_label *b, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ box_label swap = b[i];
+ int index = rand()%n;
+ b[i] = b[index];
+ b[index] = swap;
+ }
+}
+
+void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ if(boxes[i].x == 0 && boxes[i].y == 0) {
+ boxes[i].x = 999999;
+ boxes[i].y = 999999;
+ boxes[i].w = 999999;
+ boxes[i].h = 999999;
+ continue;
+ }
+ boxes[i].left = boxes[i].left * sx - dx;
+ boxes[i].right = boxes[i].right * sx - dx;
+ boxes[i].top = boxes[i].top * sy - dy;
+ boxes[i].bottom = boxes[i].bottom* sy - dy;
+
+ if(flip){
+ float swap = boxes[i].left;
+ boxes[i].left = 1. - boxes[i].right;
+ boxes[i].right = 1. - swap;
+ }
+
+ boxes[i].left = constrain(0, 1, boxes[i].left);
+ boxes[i].right = constrain(0, 1, boxes[i].right);
+ boxes[i].top = constrain(0, 1, boxes[i].top);
+ boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
+
+ boxes[i].x = (boxes[i].left+boxes[i].right)/2;
+ boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
+ boxes[i].w = (boxes[i].right - boxes[i].left);
+ boxes[i].h = (boxes[i].bottom - boxes[i].top);
+
+ boxes[i].w = constrain(0, 1, boxes[i].w);
+ boxes[i].h = constrain(0, 1, boxes[i].h);
+ }
+}
+
+void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "labels", labelpath);
+ find_replace(labelpath, "JPEGImages", "labels", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+
+ int count = 0;
+ box_label *boxes = read_boxes(labelpath, &count);
+ randomize_boxes(boxes, count);
+ correct_boxes(boxes, count, dx, dy, sx, sy, flip);
+ float x,y,w,h;
+ int id;
+ int i;
+
+ for (i = 0; i < count && i < 90; ++i) {
+ x = boxes[i].x;
+ y = boxes[i].y;
+ w = boxes[i].w;
+ h = boxes[i].h;
+ id = boxes[i].id;
+
+ if (w < .0 || h < .0) continue;
+
+ int index = (4+classes) * i;
+
+ truth[index++] = x;
+ truth[index++] = y;
+ truth[index++] = w;
+ truth[index++] = h;
+
+ if (id < classes) truth[index+id] = 1;
+ }
+ free(boxes);
+}
+
+void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "labels", labelpath);
+ find_replace(labelpath, "JPEGImages", "labels", labelpath);
+
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".png", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ int count = 0;
+ box_label *boxes = read_boxes(labelpath, &count);
+ randomize_boxes(boxes, count);
+ correct_boxes(boxes, count, dx, dy, sx, sy, flip);
+ float x,y,w,h;
+ int id;
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ x = boxes[i].x;
+ y = boxes[i].y;
+ w = boxes[i].w;
+ h = boxes[i].h;
+ id = boxes[i].id;
+
+ if (w < .005 || h < .005) continue;
+
+ int col = (int)(x*num_boxes);
+ int row = (int)(y*num_boxes);
+
+ x = x*num_boxes - col;
+ y = y*num_boxes - row;
+
+ int index = (col+row*num_boxes)*(5+classes);
+ if (truth[index]) continue;
+ truth[index++] = 1;
+
+ if (id < classes) truth[index+id] = 1;
+ index += classes;
+
+ truth[index++] = x;
+ truth[index++] = y;
+ truth[index++] = w;
+ truth[index++] = h;
+ }
+ free(boxes);
+}
+
+void load_rle(image im, int *rle, int n)
+{
+ int count = 0;
+ int curr = 0;
+ int i,j;
+ for(i = 0; i < n; ++i){
+ for(j = 0; j < rle[i]; ++j){
+ im.data[count++] = curr;
+ }
+ curr = 1 - curr;
+ }
+ for(; count < im.h*im.w*im.c; ++count){
+ im.data[count] = curr;
+ }
+}
+
+void or_image(image src, image dest, int c)
+{
+ int i;
+ for(i = 0; i < src.w*src.h; ++i){
+ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
+ }
+}
+
+void exclusive_image(image src)
+{
+ int k, j, i;
+ int s = src.w*src.h;
+ for(k = 0; k < src.c-1; ++k){
+ for(i = 0; i < s; ++i){
+ if (src.data[k*s + i]){
+ for(j = k+1; j < src.c; ++j){
+ src.data[j*s + i] = 0;
+ }
+ }
+ }
+ }
+}
+
+box bound_image(image im)
+{
+ int x,y;
+ int minx = im.w;
+ int miny = im.h;
+ int maxx = 0;
+ int maxy = 0;
+ for(y = 0; y < im.h; ++y){
+ for(x = 0; x < im.w; ++x){
+ if(im.data[y*im.w + x]){
+ minx = (x < minx) ? x : minx;
+ miny = (y < miny) ? y : miny;
+ maxx = (x > maxx) ? x : maxx;
+ maxy = (y > maxy) ? y : maxy;
+ }
+ }
+ }
+ box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
+ //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
+ return b;
+}
+
+void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "mask", labelpath);
+ find_replace(labelpath, "JPEGImages", "mask", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ FILE *file = fopen(labelpath, "r");
+ if(!file) file_error(labelpath);
+ char buff[32788];
+ int id;
+ int i = 0;
+ image part = make_image(w, h, 1);
+ while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
+ int n = 0;
+ int *rle = read_intlist(buff, &n, 0);
+ load_rle(part, rle, n);
+ image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
+ if(flip) flip_image(sized);
+ box b = bound_image(sized);
+ if(b.w > 0){
+ image crop = crop_image(sized, b.x, b.y, b.w, b.h);
+ image mask = resize_image(crop, mw, mh);
+ truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
+ truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
+ truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
+ truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
+ int j;
+ for(j = 0; j < mw*mh; ++j){
+ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
+ }
+ truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
+ free_image(crop);
+ free_image(mask);
+ ++i;
+ }
+ free_image(sized);
+ free(rle);
+ }
+ fclose(file);
+ free_image(part);
+}
+
+
+void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "labels", labelpath);
+ find_replace(labelpath, "JPEGImages", "labels", labelpath);
+
+ find_replace(labelpath, "raw", "labels", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".png", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ int count = 0;
+ box_label *boxes = read_boxes(labelpath, &count);
+ randomize_boxes(boxes, count);
+ correct_boxes(boxes, count, dx, dy, sx, sy, flip);
+ if(count > num_boxes) count = num_boxes;
+ float x,y,w,h;
+ int id;
+ int i;
+ int sub = 0;
+
+ for (i = 0; i < count; ++i) {
+ x = boxes[i].x;
+ y = boxes[i].y;
+ w = boxes[i].w;
+ h = boxes[i].h;
+ id = boxes[i].id;
+
+ if ((w < .001 || h < .001)) {
+ ++sub;
+ continue;
+ }
+
+ truth[(i-sub)*5+0] = x;
+ truth[(i-sub)*5+1] = y;
+ truth[(i-sub)*5+2] = w;
+ truth[(i-sub)*5+3] = h;
+ truth[(i-sub)*5+4] = id;
+ }
+ free(boxes);
+}
+
+#define NUMCHARS 37
+
+void print_letters(float *pred, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ int index = max_index(pred+i*NUMCHARS, NUMCHARS);
+ printf("%c", int_to_alphanum(index));
+ }
+ printf("\n");
+}
+
+void fill_truth_captcha(char *path, int n, float *truth)
+{
+ char *begin = strrchr(path, '/');
+ ++begin;
+ int i;
+ for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
+ int index = alphanum_to_int(begin[i]);
+ if(index > 35) printf("Bad %c\n", begin[i]);
+ truth[i*NUMCHARS+index] = 1;
+ }
+ for(;i < n; ++i){
+ truth[i*NUMCHARS + NUMCHARS-1] = 1;
+ }
+}
+
+data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+ d.X = load_image_paths(paths, n, w, h);
+ d.y = make_matrix(n, k*NUMCHARS);
+ int i;
+ for(i = 0; i < n; ++i){
+ fill_truth_captcha(paths[i], k, d.y.vals[i]);
+ }
+ if(m) free(paths);
+ return d;
+}
+
+data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+ d.X = load_image_paths(paths, n, w, h);
+ d.X.cols = 17100;
+ d.y = d.X;
+ if(m) free(paths);
+ return d;
+}
+
+void fill_truth(char *path, char **labels, int k, float *truth)
+{
+ int i;
+ memset(truth, 0, k*sizeof(float));
+ int count = 0;
+ for(i = 0; i < k; ++i){
+ if(strstr(path, labels[i])){
+ truth[i] = 1;
+ ++count;
+ //printf("%s %s %d\n", path, labels[i], i);
+ }
+ }
+ if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
+}
+
+void fill_hierarchy(float *truth, int k, tree *hierarchy)
+{
+ int j;
+ for(j = 0; j < k; ++j){
+ if(truth[j]){
+ int parent = hierarchy->parent[j];
+ while(parent >= 0){
+ truth[parent] = 1;
+ parent = hierarchy->parent[parent];
+ }
+ }
+ }
+ int i;
+ int count = 0;
+ for(j = 0; j < hierarchy->groups; ++j){
+ //printf("%d\n", count);
+ int mask = 1;
+ for(i = 0; i < hierarchy->group_size[j]; ++i){
+ if(truth[count + i]){
+ mask = 0;
+ break;
+ }
+ }
+ if (mask) {
+ for(i = 0; i < hierarchy->group_size[j]; ++i){
+ truth[count + i] = SECRET_NUM;
+ }
+ }
+ count += hierarchy->group_size[j];
+ }
+}
+
+matrix load_regression_labels_paths(char **paths, int n, int k)
+{
+ matrix y = make_matrix(n, k);
+ int i,j;
+ for(i = 0; i < n; ++i){
+ char labelpath[4096];
+ find_replace(paths[i], "images", "labels", labelpath);
+ find_replace(labelpath, "JPEGImages", "labels", labelpath);
+ find_replace(labelpath, ".BMP", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPeG", ".txt", labelpath);
+ find_replace(labelpath, ".Jpeg", ".txt", labelpath);
+ find_replace(labelpath, ".PNG", ".txt", labelpath);
+ find_replace(labelpath, ".TIF", ".txt", labelpath);
+ find_replace(labelpath, ".bmp", ".txt", labelpath);
+ find_replace(labelpath, ".jpeg", ".txt", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".png", ".txt", labelpath);
+ find_replace(labelpath, ".tif", ".txt", labelpath);
+
+ FILE *file = fopen(labelpath, "r");
+ for(j = 0; j < k; ++j){
+ fscanf(file, "%f", &(y.vals[i][j]));
+ }
+ fclose(file);
+ }
+ return y;
+}
+
+matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
+{
+ matrix y = make_matrix(n, k);
+ int i;
+ for(i = 0; i < n && labels; ++i){
+ fill_truth(paths[i], labels, k, y.vals[i]);
+ if(hierarchy){
+ fill_hierarchy(y.vals[i], k, hierarchy);
+ }
+ }
+ return y;
+}
+
+matrix load_tags_paths(char **paths, int n, int k)
+{
+ matrix y = make_matrix(n, k);
+ int i;
+ //int count = 0;
+ for(i = 0; i < n; ++i){
+ char label[4096];
+ find_replace(paths[i], "images", "labels", label);
+ find_replace(label, ".jpg", ".txt", label);
+ FILE *file = fopen(label, "r");
+ if (!file) continue;
+ //++count;
+ int tag;
+ while(fscanf(file, "%d", &tag) == 1){
+ if(tag < k){
+ y.vals[i][tag] = 1;
+ }
+ }
+ fclose(file);
+ }
+ //printf("%d/%d\n", count, n);
+ return y;
+}
+
+char **get_labels(char *filename)
+{
+ list *plist = get_paths(filename);
+ char **labels = (char **)list_to_array(plist);
+ free_list(plist);
+ return labels;
+}
+
+void free_data(data d)
+{
+ if(!d.shallow){
+ free_matrix(d.X);
+ free_matrix(d.y);
+ }else{
+ free(d.X.vals);
+ free(d.y.vals);
+ }
+}
+
+image get_segmentation_image(char *path, int w, int h, int classes)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "mask", labelpath);
+ find_replace(labelpath, "JPEGImages", "mask", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ image mask = make_image(w, h, classes);
+ FILE *file = fopen(labelpath, "r");
+ if(!file) file_error(labelpath);
+ char buff[32788];
+ int id;
+ image part = make_image(w, h, 1);
+ while(fscanf(file, "%d %s", &id, buff) == 2){
+ int n = 0;
+ int *rle = read_intlist(buff, &n, 0);
+ load_rle(part, rle, n);
+ or_image(part, mask, id);
+ free(rle);
+ }
+ //exclusive_image(mask);
+ fclose(file);
+ free_image(part);
+ return mask;
+}
+
+image get_segmentation_image2(char *path, int w, int h, int classes)
+{
+ char labelpath[4096];
+ find_replace(path, "images", "mask", labelpath);
+ find_replace(labelpath, "JPEGImages", "mask", labelpath);
+ find_replace(labelpath, ".jpg", ".txt", labelpath);
+ find_replace(labelpath, ".JPG", ".txt", labelpath);
+ find_replace(labelpath, ".JPEG", ".txt", labelpath);
+ image mask = make_image(w, h, classes+1);
+ int i;
+ for(i = 0; i < w*h; ++i){
+ mask.data[w*h*classes + i] = 1;
+ }
+ FILE *file = fopen(labelpath, "r");
+ if(!file) file_error(labelpath);
+ char buff[32788];
+ int id;
+ image part = make_image(w, h, 1);
+ while(fscanf(file, "%d %s", &id, buff) == 2){
+ int n = 0;
+ int *rle = read_intlist(buff, &n, 0);
+ load_rle(part, rle, n);
+ or_image(part, mask, id);
+ for(i = 0; i < w*h; ++i){
+ if(part.data[i]) mask.data[w*h*classes + i] = 0;
+ }
+ free(rle);
+ }
+ //exclusive_image(mask);
+ fclose(file);
+ free_image(part);
+ return mask;
+}
+
+data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
+{
+ char **random_paths = get_random_paths(paths, n, m);
+ int i;
+ data d = {0};
+ d.shallow = 0;
+
+ d.X.rows = n;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*3;
+
+
+ d.y.rows = n;
+ d.y.cols = h*w*classes/div/div;
+ d.y.vals = calloc(d.X.rows, sizeof(float*));
+
+ for(i = 0; i < n; ++i){
+ image orig = load_image_color(random_paths[i], 0, 0);
+ augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
+ image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+
+ int flip = rand()%2;
+ if(flip) flip_image(sized);
+ random_distort_image(sized, hue, saturation, exposure);
+ d.X.vals[i] = sized.data;
+
+ image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
+ //image mask = make_image(orig.w, orig.h, classes+1);
+ image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
+
+ if(flip) flip_image(sized_m);
+ d.y.vals[i] = sized_m.data;
+
+ free_image(orig);
+ free_image(mask);
+
+ /*
+ image rgb = mask_to_rgb(sized_m, classes);
+ show_image(rgb, "part");
+ show_image(sized, "orig");
+ cvWaitKey(0);
+ free_image(rgb);
+ */
+ }
+ free(random_paths);
+ return d;
+}
+
+data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
+{
+ char **random_paths = get_random_paths(paths, n, m);
+ int i;
+ data d = {0};
+ d.shallow = 0;
+
+ d.X.rows = n;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*3;
+
+ d.y = make_matrix(n, (coords+1)*boxes);
+
+ for(i = 0; i < n; ++i){
+ image orig = load_image_color(random_paths[i], 0, 0);
+ augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
+ image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+
+ int flip = rand()%2;
+ if(flip) flip_image(sized);
+ random_distort_image(sized, hue, saturation, exposure);
+ d.X.vals[i] = sized.data;
+ //show_image(sized, "image");
+
+ fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
+
+ free_image(orig);
+
+ /*
+ image rgb = mask_to_rgb(sized_m, classes);
+ show_image(rgb, "part");
+ show_image(sized, "orig");
+ cvWaitKey(0);
+ free_image(rgb);
+ */
+ }
+ free(random_paths);
+ return d;
+}
+
+data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
+{
+ char **random_paths = get_random_paths(paths, n, m);
+ int i;
+ data d = {0};
+ d.shallow = 0;
+
+ d.X.rows = n;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*3;
+
+
+ int k = size*size*(5+classes);
+ d.y = make_matrix(n, k);
+ for(i = 0; i < n; ++i){
+ image orig = load_image_color(random_paths[i], 0, 0);
+
+ int oh = orig.h;
+ int ow = orig.w;
+
+ int dw = (ow*jitter);
+ int dh = (oh*jitter);
+
+ int pleft = rand_uniform(-dw, dw);
+ int pright = rand_uniform(-dw, dw);
+ int ptop = rand_uniform(-dh, dh);
+ int pbot = rand_uniform(-dh, dh);
+
+ int swidth = ow - pleft - pright;
+ int sheight = oh - ptop - pbot;
+
+ float sx = (float)swidth / ow;
+ float sy = (float)sheight / oh;
+
+ int flip = rand()%2;
+ image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
+
+ float dx = ((float)pleft/ow)/sx;
+ float dy = ((float)ptop /oh)/sy;
+
+ image sized = resize_image(cropped, w, h);
+ if(flip) flip_image(sized);
+ random_distort_image(sized, hue, saturation, exposure);
+ d.X.vals[i] = sized.data;
+
+ fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
+
+ free_image(orig);
+ free_image(cropped);
+ }
+ free(random_paths);
+ return d;
+}
+
+data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
+{
+ if(m) paths = get_random_paths(paths, 2*n, m);
+ int i,j;
+ data d = {0};
+ d.shallow = 0;
+
+ d.X.rows = n;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*6;
+
+ int k = 2*(classes);
+ d.y = make_matrix(n, k);
+ for(i = 0; i < n; ++i){
+ image im1 = load_image_color(paths[i*2], w, h);
+ image im2 = load_image_color(paths[i*2+1], w, h);
+
+ d.X.vals[i] = calloc(d.X.cols, sizeof(float));
+ memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
+ memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
+
+ int id;
+ float iou;
+
+ char imlabel1[4096];
+ char imlabel2[4096];
+ find_replace(paths[i*2], "imgs", "labels", imlabel1);
+ find_replace(imlabel1, "jpg", "txt", imlabel1);
+ FILE *fp1 = fopen(imlabel1, "r");
+
+ while(fscanf(fp1, "%d %f", &id, &iou) == 2){
+ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
+ }
+
+ find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
+ find_replace(imlabel2, "jpg", "txt", imlabel2);
+ FILE *fp2 = fopen(imlabel2, "r");
+
+ while(fscanf(fp2, "%d %f", &id, &iou) == 2){
+ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
+ }
+
+ for (j = 0; j < classes; ++j){
+ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
+ d.y.vals[i][2*j] = 1;
+ d.y.vals[i][2*j+1] = 0;
+ } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
+ d.y.vals[i][2*j] = 0;
+ d.y.vals[i][2*j+1] = 1;
+ } else {
+ d.y.vals[i][2*j] = SECRET_NUM;
+ d.y.vals[i][2*j+1] = SECRET_NUM;
+ }
+ }
+ fclose(fp1);
+ fclose(fp2);
+
+ free_image(im1);
+ free_image(im2);
+ }
+ if(m) free(paths);
+ return d;
+}
+
+data load_data_swag(char **paths, int n, int classes, float jitter)
+{
+ int index = rand()%n;
+ char *random_path = paths[index];
+
+ image orig = load_image_color(random_path, 0, 0);
+ int h = orig.h;
+ int w = orig.w;
+
+ data d = {0};
+ d.shallow = 0;
+ d.w = w;
+ d.h = h;
+
+ d.X.rows = 1;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*3;
+
+ int k = (4+classes)*90;
+ d.y = make_matrix(1, k);
+
+ int dw = w*jitter;
+ int dh = h*jitter;
+
+ int pleft = rand_uniform(-dw, dw);
+ int pright = rand_uniform(-dw, dw);
+ int ptop = rand_uniform(-dh, dh);
+ int pbot = rand_uniform(-dh, dh);
+
+ int swidth = w - pleft - pright;
+ int sheight = h - ptop - pbot;
+
+ float sx = (float)swidth / w;
+ float sy = (float)sheight / h;
+
+ int flip = rand()%2;
+ image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
+
+ float dx = ((float)pleft/w)/sx;
+ float dy = ((float)ptop /h)/sy;
+
+ image sized = resize_image(cropped, w, h);
+ if(flip) flip_image(sized);
+ d.X.vals[0] = sized.data;
+
+ fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
+
+ free_image(orig);
+ free_image(cropped);
+
+ return d;
+}
+
+data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
+{
+ char **random_paths = get_random_paths(paths, n, m);
+ int i;
+ data d = {0};
+ d.shallow = 0;
+
+ d.X.rows = n;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+ d.X.cols = h*w*3;
+
+ d.y = make_matrix(n, 5*boxes);
+ for(i = 0; i < n; ++i){
+ image orig = load_image_color(random_paths[i], 0, 0);
+ image sized = make_image(w, h, orig.c);
+ fill_image(sized, .5);
+
+ float dw = jitter * orig.w;
+ float dh = jitter * orig.h;
+
+ float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
+ float scale = rand_uniform(.25, 2);
+
+ float nw, nh;
+
+ if(new_ar < 1){
+ nh = scale * h;
+ nw = nh * new_ar;
+ } else {
+ nw = scale * w;
+ nh = nw / new_ar;
+ }
+
+ float dx = rand_uniform(0, w - nw);
+ float dy = rand_uniform(0, h - nh);
+
+ place_image(orig, nw, nh, dx, dy, sized);
+
+ random_distort_image(sized, hue, saturation, exposure);
+
+ int flip = rand()%2;
+ if(flip) flip_image(sized);
+ d.X.vals[i] = sized.data;
+
+
+ fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
+
+ free_image(orig);
+ }
+ free(random_paths);
+ return d;
+}
+
+void *load_thread(void *ptr)
+{
+ //printf("Loading data: %d\n", rand());
+ load_args a = *(struct load_args*)ptr;
+ if(a.exposure == 0) a.exposure = 1;
+ if(a.saturation == 0) a.saturation = 1;
+ if(a.aspect == 0) a.aspect = 1;
+
+ if (a.type == OLD_CLASSIFICATION_DATA){
+ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
+ } else if (a.type == REGRESSION_DATA){
+ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
+ } else if (a.type == CLASSIFICATION_DATA){
+ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
+ } else if (a.type == SUPER_DATA){
+ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
+ } else if (a.type == WRITING_DATA){
+ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
+ } else if (a.type == INSTANCE_DATA){
+ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
+ } else if (a.type == SEGMENTATION_DATA){
+ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
+ } else if (a.type == REGION_DATA){
+ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
+ } else if (a.type == DETECTION_DATA){
+ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
+ } else if (a.type == SWAG_DATA){
+ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
+ } else if (a.type == COMPARE_DATA){
+ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
+ } else if (a.type == IMAGE_DATA){
+ *(a.im) = load_image_color(a.path, 0, 0);
+ *(a.resized) = resize_image(*(a.im), a.w, a.h);
+ } else if (a.type == LETTERBOX_DATA){
+ *(a.im) = load_image_color(a.path, 0, 0);
+ *(a.resized) = letterbox_image(*(a.im), a.w, a.h);
+ } else if (a.type == TAG_DATA){
+ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
+ }
+ free(ptr);
+ return 0;
+}
+
+pthread_t load_data_in_thread(load_args args)
+{
+ pthread_t thread;
+ struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ *ptr = args;
+ if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
+ return thread;
+}
+
+void *load_threads(void *ptr)
+{
+ int i;
+ load_args args = *(load_args *)ptr;
+ if (args.threads == 0) args.threads = 1;
+ data *out = args.d;
+ int total = args.n;
+ free(ptr);
+ data *buffers = calloc(args.threads, sizeof(data));
+ pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
+ for(i = 0; i < args.threads; ++i){
+ args.d = buffers + i;
+ args.n = (i+1) * total/args.threads - i * total/args.threads;
+ threads[i] = load_data_in_thread(args);
+ }
+ for(i = 0; i < args.threads; ++i){
+ pthread_join(threads[i], 0);
+ }
+ *out = concat_datas(buffers, args.threads);
+ out->shallow = 0;
+ for(i = 0; i < args.threads; ++i){
+ buffers[i].shallow = 1;
+ free_data(buffers[i]);
+ }
+ free(buffers);
+ free(threads);
+ return 0;
+}
+
+void load_data_blocking(load_args args)
+{
+ struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ *ptr = args;
+ load_thread(ptr);
+}
+
+pthread_t load_data(load_args args)
+{
+ pthread_t thread;
+ struct load_args *ptr = calloc(1, sizeof(struct load_args));
+ *ptr = args;
+ if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
+ return thread;
+}
+
+data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
+ data d = {0};
+ d.shallow = 0;
+ d.X = load_image_paths(paths, n, w, h);
+ d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
+ if(m) free(paths);
+ int i;
+ for(i = 0; i < n; ++i) free(replace_paths[i]);
+ free(replace_paths);
+ return d;
+}
+
+data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+ d.X = load_image_paths(paths, n, w, h);
+ d.y = load_labels_paths(paths, n, labels, k, 0);
+ if(m) free(paths);
+ return d;
+}
+
+/*
+ data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
+ {
+ data d = {0};
+ d.indexes = calloc(n, sizeof(int));
+ if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
+ d.shallow = 0;
+ d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
+ d.y = load_labels_paths(paths, n, labels, k);
+ if(m) free(paths);
+ return d;
+ }
+ */
+
+data load_data_super(char **paths, int n, int m, int w, int h, int scale)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+
+ int i;
+ d.X.rows = n;
+ d.X.vals = calloc(n, sizeof(float*));
+ d.X.cols = w*h*3;
+
+ d.y.rows = n;
+ d.y.vals = calloc(n, sizeof(float*));
+ d.y.cols = w*scale * h*scale * 3;
+
+ for(i = 0; i < n; ++i){
+ image im = load_image_color(paths[i], 0, 0);
+ image crop = random_crop_image(im, w*scale, h*scale);
+ int flip = rand()%2;
+ if (flip) flip_image(crop);
+ image resize = resize_image(crop, w, h);
+ d.X.vals[i] = resize.data;
+ d.y.vals[i] = crop.data;
+ free_image(im);
+ }
+
+ if(m) free(paths);
+ return d;
+}
+
+data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+ d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
+ d.y = load_regression_labels_paths(paths, n, k);
+ if(m) free(paths);
+ return d;
+}
+
+data select_data(data *orig, int *inds)
+{
+ data d = {0};
+ d.shallow = 1;
+ d.w = orig[0].w;
+ d.h = orig[0].h;
+
+ d.X.rows = orig[0].X.rows;
+ d.y.rows = orig[0].X.rows;
+
+ d.X.cols = orig[0].X.cols;
+ d.y.cols = orig[0].y.cols;
+
+ d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
+ d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
+ int i;
+ for(i = 0; i < d.X.rows; ++i){
+ d.X.vals[i] = orig[inds[i]].X.vals[i];
+ d.y.vals[i] = orig[inds[i]].y.vals[i];
+ }
+ return d;
+}
+
+data *tile_data(data orig, int divs, int size)
+{
+ data *ds = calloc(divs*divs, sizeof(data));
+ int i, j;
+ #pragma omp parallel for
+ for(i = 0; i < divs*divs; ++i){
+ data d;
+ d.shallow = 0;
+ d.w = orig.w/divs * size;
+ d.h = orig.h/divs * size;
+ d.X.rows = orig.X.rows;
+ d.X.cols = d.w*d.h*3;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+
+ d.y = copy_matrix(orig.y);
+ #pragma omp parallel for
+ for(j = 0; j < orig.X.rows; ++j){
+ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
+ int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
+ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
+ d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
+ }
+ ds[i] = d;
+ }
+ return ds;
+}
+
+data resize_data(data orig, int w, int h)
+{
+ data d = {0};
+ d.shallow = 0;
+ d.w = w;
+ d.h = h;
+ int i;
+ d.X.rows = orig.X.rows;
+ d.X.cols = w*h*3;
+ d.X.vals = calloc(d.X.rows, sizeof(float*));
+
+ d.y = copy_matrix(orig.y);
+ #pragma omp parallel for
+ for(i = 0; i < orig.X.rows; ++i){
+ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
+ d.X.vals[i] = resize_image(im, w, h).data;
+ }
+ return d;
+}
+
+data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.shallow = 0;
+ d.w=size;
+ d.h=size;
+ d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
+ d.y = load_labels_paths(paths, n, labels, k, hierarchy);
+ if(m) free(paths);
+ return d;
+}
+
+data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
+{
+ if(m) paths = get_random_paths(paths, n, m);
+ data d = {0};
+ d.w = size;
+ d.h = size;
+ d.shallow = 0;
+ d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
+ d.y = load_tags_paths(paths, n, k);
+ if(m) free(paths);
+ return d;
+}
+
+matrix concat_matrix(matrix m1, matrix m2)
+{
+ int i, count = 0;
+ matrix m;
+ m.cols = m1.cols;
+ m.rows = m1.rows+m2.rows;
+ m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
+ for(i = 0; i < m1.rows; ++i){
+ m.vals[count++] = m1.vals[i];
+ }
+ for(i = 0; i < m2.rows; ++i){
+ m.vals[count++] = m2.vals[i];
+ }
+ return m;
+}
+
+data concat_data(data d1, data d2)
+{
+ data d = {0};
+ d.shallow = 1;
+ d.X = concat_matrix(d1.X, d2.X);
+ d.y = concat_matrix(d1.y, d2.y);
+ d.w = d1.w;
+ d.h = d1.h;
+ return d;
+}
+
+data concat_datas(data *d, int n)
+{
+ int i;
+ data out = {0};
+ for(i = 0; i < n; ++i){
+ data new = concat_data(d[i], out);
+ free_data(out);
+ out = new;
+ }
+ return out;
+}
+
+data load_categorical_data_csv(char *filename, int target, int k)
+{
+ data d = {0};
+ d.shallow = 0;
+ matrix X = csv_to_matrix(filename);
+ float *truth_1d = pop_column(&X, target);
+ float **truth = one_hot_encode(truth_1d, X.rows, k);
+ matrix y;
+ y.rows = X.rows;
+ y.cols = k;
+ y.vals = truth;
+ d.X = X;
+ d.y = y;
+ free(truth_1d);
+ return d;
+}
+
+data load_cifar10_data(char *filename)
+{
+ data d = {0};
+ d.shallow = 0;
+ long i,j;
+ matrix X = make_matrix(10000, 3072);
+ matrix y = make_matrix(10000, 10);
+ d.X = X;
+ d.y = y;
+
+ FILE *fp = fopen(filename, "rb");
+ if(!fp) file_error(filename);
+ for(i = 0; i < 10000; ++i){
+ unsigned char bytes[3073];
+ fread(bytes, 1, 3073, fp);
+ int class = bytes[0];
+ y.vals[i][class] = 1;
+ for(j = 0; j < X.cols; ++j){
+ X.vals[i][j] = (double)bytes[j+1];
+ }
+ }
+ scale_data_rows(d, 1./255);
+ //normalize_data_rows(d);
+ fclose(fp);
+ return d;
+}
+
+void get_random_batch(data d, int n, float *X, float *y)
+{
+ int j;
+ for(j = 0; j < n; ++j){
+ int index = rand()%d.X.rows;
+ memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
+ memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
+ }
+}
+
+void get_next_batch(data d, int n, int offset, float *X, float *y)
+{
+ int j;
+ for(j = 0; j < n; ++j){
+ int index = offset + j;
+ memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
+ if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
+ }
+}
+
+void smooth_data(data d)
+{
+ int i, j;
+ float scale = 1. / d.y.cols;
+ float eps = .1;
+ for(i = 0; i < d.y.rows; ++i){
+ for(j = 0; j < d.y.cols; ++j){
+ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
+ }
+ }
+}
+
+data load_all_cifar10()
+{
+ data d = {0};
+ d.shallow = 0;
+ int i,j,b;
+ matrix X = make_matrix(50000, 3072);
+ matrix y = make_matrix(50000, 10);
+ d.X = X;
+ d.y = y;
+
+
+ for(b = 0; b < 5; ++b){
+ char buff[256];
+ sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
+ FILE *fp = fopen(buff, "rb");
+ if(!fp) file_error(buff);
+ for(i = 0; i < 10000; ++i){
+ unsigned char bytes[3073];
+ fread(bytes, 1, 3073, fp);
+ int class = bytes[0];
+ y.vals[i+b*10000][class] = 1;
+ for(j = 0; j < X.cols; ++j){
+ X.vals[i+b*10000][j] = (double)bytes[j+1];
+ }
+ }
+ fclose(fp);
+ }
+ //normalize_data_rows(d);
+ scale_data_rows(d, 1./255);
+ smooth_data(d);
+ return d;
+}
+
+data load_go(char *filename)
+{
+ FILE *fp = fopen(filename, "rb");
+ matrix X = make_matrix(3363059, 361);
+ matrix y = make_matrix(3363059, 361);
+ int row, col;
+
+ if(!fp) file_error(filename);
+ char *label;
+ int count = 0;
+ while((label = fgetl(fp))){
+ int i;
+ if(count == X.rows){
+ X = resize_matrix(X, count*2);
+ y = resize_matrix(y, count*2);
+ }
+ sscanf(label, "%d %d", &row, &col);
+ char *board = fgetl(fp);
+
+ int index = row*19 + col;
+ y.vals[count][index] = 1;
+
+ for(i = 0; i < 19*19; ++i){
+ float val = 0;
+ if(board[i] == '1') val = 1;
+ else if(board[i] == '2') val = -1;
+ X.vals[count][i] = val;
+ }
+ ++count;
+ free(label);
+ free(board);
+ }
+ X = resize_matrix(X, count);
+ y = resize_matrix(y, count);
+
+ data d = {0};
+ d.shallow = 0;
+ d.X = X;
+ d.y = y;
+
+
+ fclose(fp);
+
+ return d;
+}
+
+
+void randomize_data(data d)
+{
+ int i;
+ for(i = d.X.rows-1; i > 0; --i){
+ int index = rand()%i;
+ float *swap = d.X.vals[index];
+ d.X.vals[index] = d.X.vals[i];
+ d.X.vals[i] = swap;
+
+ swap = d.y.vals[index];
+ d.y.vals[index] = d.y.vals[i];
+ d.y.vals[i] = swap;
+ }
+}
+
+void scale_data_rows(data d, float s)
+{
+ int i;
+ for(i = 0; i < d.X.rows; ++i){
+ scale_array(d.X.vals[i], d.X.cols, s);
+ }
+}
+
+void translate_data_rows(data d, float s)
+{
+ int i;
+ for(i = 0; i < d.X.rows; ++i){
+ translate_array(d.X.vals[i], d.X.cols, s);
+ }
+}
+
+data copy_data(data d)
+{
+ data c = {0};
+ c.w = d.w;
+ c.h = d.h;
+ c.shallow = 0;
+ c.num_boxes = d.num_boxes;
+ c.boxes = d.boxes;
+ c.X = copy_matrix(d.X);
+ c.y = copy_matrix(d.y);
+ return c;
+}
+
+void normalize_data_rows(data d)
+{
+ int i;
+ for(i = 0; i < d.X.rows; ++i){
+ normalize_array(d.X.vals[i], d.X.cols);
+ }
+}
+
+data get_data_part(data d, int part, int total)
+{
+ data p = {0};
+ p.shallow = 1;
+ p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
+ p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
+ p.X.cols = d.X.cols;
+ p.y.cols = d.y.cols;
+ p.X.vals = d.X.vals + d.X.rows * part / total;
+ p.y.vals = d.y.vals + d.y.rows * part / total;
+ return p;
+}
+
+data get_random_data(data d, int num)
+{
+ data r = {0};
+ r.shallow = 1;
+
+ r.X.rows = num;
+ r.y.rows = num;
+
+ r.X.cols = d.X.cols;
+ r.y.cols = d.y.cols;
+
+ r.X.vals = calloc(num, sizeof(float *));
+ r.y.vals = calloc(num, sizeof(float *));
+
+ int i;
+ for(i = 0; i < num; ++i){
+ int index = rand()%d.X.rows;
+ r.X.vals[i] = d.X.vals[index];
+ r.y.vals[i] = d.y.vals[index];
+ }
+ return r;
+}
+
+data *split_data(data d, int part, int total)
+{
+ data *split = calloc(2, sizeof(data));
+ int i;
+ int start = part*d.X.rows/total;
+ int end = (part+1)*d.X.rows/total;
+ data train;
+ data test;
+ train.shallow = test.shallow = 1;
+
+ test.X.rows = test.y.rows = end-start;
+ train.X.rows = train.y.rows = d.X.rows - (end-start);
+ train.X.cols = test.X.cols = d.X.cols;
+ train.y.cols = test.y.cols = d.y.cols;
+
+ train.X.vals = calloc(train.X.rows, sizeof(float*));
+ test.X.vals = calloc(test.X.rows, sizeof(float*));
+ train.y.vals = calloc(train.y.rows, sizeof(float*));
+ test.y.vals = calloc(test.y.rows, sizeof(float*));
+
+ for(i = 0; i < start; ++i){
+ train.X.vals[i] = d.X.vals[i];
+ train.y.vals[i] = d.y.vals[i];
+ }
+ for(i = start; i < end; ++i){
+ test.X.vals[i-start] = d.X.vals[i];
+ test.y.vals[i-start] = d.y.vals[i];
+ }
+ for(i = end; i < d.X.rows; ++i){
+ train.X.vals[i-(end-start)] = d.X.vals[i];
+ train.y.vals[i-(end-start)] = d.y.vals[i];
+ }
+ split[0] = train;
+ split[1] = test;
+ return split;
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.h
new file mode 100644
index 00000000000..781906f8743
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/data.h
@@ -0,0 +1,50 @@
+#ifndef DATA_H
+#define DATA_H
+#include
+
+#include "darknet.h"
+#include "matrix.h"
+#include "list.h"
+#include "image.h"
+#include "tree.h"
+
+static inline float distance_from_edge(int x, int max)
+{
+ int dx = (max/2) - x;
+ if (dx < 0) dx = -dx;
+ dx = (max/2) + 1 - dx;
+ dx *= 2;
+ float dist = (float)dx/max;
+ if (dist > 1) dist = 1;
+ return dist;
+}
+void load_data_blocking(load_args args);
+
+
+void print_letters(float *pred, int n);
+data load_data_captcha(char **paths, int n, int m, int k, int w, int h);
+data load_data_captcha_encode(char **paths, int n, int m, int w, int h);
+data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure);
+data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
+matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
+data load_data_super(char **paths, int n, int m, int w, int h, int scale);
+data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center);
+data load_data_regression(char **paths, int n, int m, int classes, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure);
+data load_go(char *filename);
+
+
+data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h);
+
+void get_random_batch(data d, int n, float *X, float *y);
+data get_data_part(data d, int part, int total);
+data get_random_data(data d, int num);
+data load_categorical_data_csv(char *filename, int target, int k);
+void normalize_data_rows(data d);
+void scale_data_rows(data d, float s);
+void translate_data_rows(data d, float s);
+void randomize_data(data d);
+data *split_data(data d, int part, int total);
+data concat_datas(data *d, int n);
+void fill_truth(char *path, char **labels, int k, float *truth);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_kernels.cu
new file mode 100644
index 00000000000..8267dcfa25b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_kernels.cu
@@ -0,0 +1,139 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "convolutional_layer.h"
+#include "deconvolutional_layer.h"
+#include "batchnorm_layer.h"
+#include "gemm.h"
+#include "blas.h"
+#include "im2col.h"
+#include "col2im.h"
+#include "utils.h"
+#include "cuda.h"
+}
+
+extern "C" void forward_deconvolutional_layer_gpu(layer l, network net)
+{
+ int i;
+
+ int m = l.size*l.size*l.n;
+ int n = l.h*l.w;
+ int k = l.c;
+
+ fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
+
+ for(i = 0; i < l.batch; ++i){
+ float *a = l.weights_gpu;
+ float *b = net.input_gpu + i*l.c*l.h*l.w;
+ float *c = net.workspace;
+
+ gemm_gpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
+
+ col2im_gpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output_gpu+i*l.outputs);
+ }
+ if (l.batch_normalize) {
+ forward_batchnorm_layer_gpu(l, net);
+ } else {
+ add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w*l.out_h);
+ }
+ activate_array_gpu(l.output_gpu, l.batch*l.n*l.out_w*l.out_h, l.activation);
+}
+
+extern "C" void backward_deconvolutional_layer_gpu(layer l, network net)
+{
+ int i;
+
+ //constrain_gpu(l.outputs*l.batch, 1, l.delta_gpu, 1);
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer_gpu(l, net);
+ } else {
+ backward_bias_gpu(l.bias_updates_gpu, l.delta_gpu, l.batch, l.n, l.out_w*l.out_h);
+ }
+
+ //if(net.delta_gpu) memset(net.delta_gpu, 0, l.batch*l.h*l.w*l.c*sizeof(float));
+
+ for(i = 0; i < l.batch; ++i){
+ int m = l.c;
+ int n = l.size*l.size*l.n;
+ int k = l.h*l.w;
+
+ float *a = net.input_gpu + i*m*k;
+ float *b = net.workspace;
+ float *c = l.weight_updates_gpu;
+
+ im2col_gpu(l.delta_gpu + i*l.outputs, l.out_c, l.out_h, l.out_w,
+ l.size, l.stride, l.pad, b);
+ gemm_gpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if(net.delta_gpu){
+ int m = l.c;
+ int n = l.h*l.w;
+ int k = l.size*l.size*l.n;
+
+ float *a = l.weights_gpu;
+ float *b = net.workspace;
+ float *c = net.delta_gpu + i*n*m;
+
+ gemm_gpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
+ }
+ }
+}
+
+extern "C" void pull_deconvolutional_layer(layer l)
+{
+ cuda_pull_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
+ cuda_pull_array(l.biases_gpu, l.biases, l.n);
+ cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
+ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ if (l.batch_normalize){
+ cuda_pull_array(l.scales_gpu, l.scales, l.n);
+ cuda_pull_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
+ cuda_pull_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
+ }
+}
+
+extern "C" void push_deconvolutional_layer(layer l)
+{
+ cuda_push_array(l.weights_gpu, l.weights, l.c*l.n*l.size*l.size);
+ cuda_push_array(l.biases_gpu, l.biases, l.n);
+ cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.c*l.n*l.size*l.size);
+ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ if (l.batch_normalize){
+ cuda_push_array(l.scales_gpu, l.scales, l.n);
+ cuda_push_array(l.rolling_mean_gpu, l.rolling_mean, l.n);
+ cuda_push_array(l.rolling_variance_gpu, l.rolling_variance, l.n);
+ }
+}
+
+void update_deconvolutional_layer_gpu(layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ if(a.adam){
+ adam_update_gpu(l.weights_gpu, l.weight_updates_gpu, l.m_gpu, l.v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.nweights, batch, a.t);
+ adam_update_gpu(l.biases_gpu, l.bias_updates_gpu, l.bias_m_gpu, l.bias_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
+ if(l.scales_gpu){
+ adam_update_gpu(l.scales_gpu, l.scale_updates_gpu, l.scale_m_gpu, l.scale_v_gpu, a.B1, a.B2, a.eps, decay, learning_rate, l.n, batch, a.t);
+ }
+ }else{
+ axpy_gpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
+ axpy_gpu(l.nweights, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
+ scal_gpu(l.nweights, momentum, l.weight_updates_gpu, 1);
+
+ axpy_gpu(l.n, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
+ scal_gpu(l.n, momentum, l.bias_updates_gpu, 1);
+
+ if(l.scales_gpu){
+ axpy_gpu(l.n, learning_rate/batch, l.scale_updates_gpu, 1, l.scales_gpu, 1);
+ scal_gpu(l.n, momentum, l.scale_updates_gpu, 1);
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.c
new file mode 100644
index 00000000000..00c0e85771d
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.c
@@ -0,0 +1,312 @@
+#include "deconvolutional_layer.h"
+#include "convolutional_layer.h"
+#include "batchnorm_layer.h"
+#include "utils.h"
+#include "im2col.h"
+#include "col2im.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+
+
+static size_t get_workspace_size(layer l){
+ return (size_t)l.h*l.w*l.size*l.size*l.n*sizeof(float);
+}
+
+void bilinear_init(layer l)
+{
+ int i,j,f;
+ float center = (l.size-1) / 2.;
+ for(f = 0; f < l.n; ++f){
+ for(j = 0; j < l.size; ++j){
+ for(i = 0; i < l.size; ++i){
+ float val = (1 - fabs(i - center)) * (1 - fabs(j - center));
+ int c = f%l.c;
+ int ind = f*l.size*l.size*l.c + c*l.size*l.size + j*l.size + i;
+ l.weights[ind] = val;
+ }
+ }
+ }
+}
+
+
+layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam)
+{
+ int i;
+ layer l = {0};
+ l.type = DECONVOLUTIONAL;
+
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.n = n;
+ l.batch = batch;
+ l.stride = stride;
+ l.size = size;
+
+ l.nweights = c*n*size*size;
+ l.nbiases = n;
+
+ l.weights = calloc(c*n*size*size, sizeof(float));
+ l.weight_updates = calloc(c*n*size*size, sizeof(float));
+
+ l.biases = calloc(n, sizeof(float));
+ l.bias_updates = calloc(n, sizeof(float));
+ //float scale = n/(size*size*c);
+ //printf("scale: %f\n", scale);
+ float scale = .02;
+ for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_normal();
+ //bilinear_init(l);
+ for(i = 0; i < n; ++i){
+ l.biases[i] = 0;
+ }
+ l.pad = padding;
+
+ l.out_h = (l.h - 1) * l.stride + l.size - 2*l.pad;
+ l.out_w = (l.w - 1) * l.stride + l.size - 2*l.pad;
+ l.out_c = n;
+ l.outputs = l.out_w * l.out_h * l.out_c;
+ l.inputs = l.w * l.h * l.c;
+
+ scal_cpu(l.nweights, (float)l.out_w*l.out_h/(l.w*l.h), l.weights, 1);
+
+ l.output = calloc(l.batch*l.outputs, sizeof(float));
+ l.delta = calloc(l.batch*l.outputs, sizeof(float));
+
+ l.forward = forward_deconvolutional_layer;
+ l.backward = backward_deconvolutional_layer;
+ l.update = update_deconvolutional_layer;
+
+ l.batch_normalize = batch_normalize;
+
+ if(batch_normalize){
+ l.scales = calloc(n, sizeof(float));
+ l.scale_updates = calloc(n, sizeof(float));
+ for(i = 0; i < n; ++i){
+ l.scales[i] = 1;
+ }
+
+ l.mean = calloc(n, sizeof(float));
+ l.variance = calloc(n, sizeof(float));
+
+ l.mean_delta = calloc(n, sizeof(float));
+ l.variance_delta = calloc(n, sizeof(float));
+
+ l.rolling_mean = calloc(n, sizeof(float));
+ l.rolling_variance = calloc(n, sizeof(float));
+ l.x = calloc(l.batch*l.outputs, sizeof(float));
+ l.x_norm = calloc(l.batch*l.outputs, sizeof(float));
+ }
+ if(adam){
+ l.m = calloc(c*n*size*size, sizeof(float));
+ l.v = calloc(c*n*size*size, sizeof(float));
+ l.bias_m = calloc(n, sizeof(float));
+ l.scale_m = calloc(n, sizeof(float));
+ l.bias_v = calloc(n, sizeof(float));
+ l.scale_v = calloc(n, sizeof(float));
+ }
+
+#ifdef GPU
+ l.forward_gpu = forward_deconvolutional_layer_gpu;
+ l.backward_gpu = backward_deconvolutional_layer_gpu;
+ l.update_gpu = update_deconvolutional_layer_gpu;
+
+ if(gpu_index >= 0){
+
+ if (adam) {
+ l.m_gpu = cuda_make_array(l.m, c*n*size*size);
+ l.v_gpu = cuda_make_array(l.v, c*n*size*size);
+ l.bias_m_gpu = cuda_make_array(l.bias_m, n);
+ l.bias_v_gpu = cuda_make_array(l.bias_v, n);
+ l.scale_m_gpu = cuda_make_array(l.scale_m, n);
+ l.scale_v_gpu = cuda_make_array(l.scale_v, n);
+ }
+ l.weights_gpu = cuda_make_array(l.weights, c*n*size*size);
+ l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size);
+
+ l.biases_gpu = cuda_make_array(l.biases, n);
+ l.bias_updates_gpu = cuda_make_array(l.bias_updates, n);
+
+ l.delta_gpu = cuda_make_array(l.delta, l.batch*l.out_h*l.out_w*n);
+ l.output_gpu = cuda_make_array(l.output, l.batch*l.out_h*l.out_w*n);
+
+ if(batch_normalize){
+ l.mean_gpu = cuda_make_array(0, n);
+ l.variance_gpu = cuda_make_array(0, n);
+
+ l.rolling_mean_gpu = cuda_make_array(0, n);
+ l.rolling_variance_gpu = cuda_make_array(0, n);
+
+ l.mean_delta_gpu = cuda_make_array(0, n);
+ l.variance_delta_gpu = cuda_make_array(0, n);
+
+ l.scales_gpu = cuda_make_array(l.scales, n);
+ l.scale_updates_gpu = cuda_make_array(0, n);
+
+ l.x_gpu = cuda_make_array(0, l.batch*l.out_h*l.out_w*n);
+ l.x_norm_gpu = cuda_make_array(0, l.batch*l.out_h*l.out_w*n);
+ }
+ }
+ #ifdef CUDNN
+ cudnnCreateTensorDescriptor(&l.dstTensorDesc);
+ cudnnCreateTensorDescriptor(&l.normTensorDesc);
+ cudnnSetTensor4dDescriptor(l.dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l.batch, l.out_c, l.out_h, l.out_w);
+ cudnnSetTensor4dDescriptor(l.normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l.out_c, 1, 1);
+ #endif
+#endif
+
+ l.activation = activation;
+ l.workspace_size = get_workspace_size(l);
+
+ fprintf(stderr, "deconv%5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
+
+ return l;
+}
+
+void denormalize_deconvolutional_layer(layer l)
+{
+ int i, j;
+ for(i = 0; i < l.n; ++i){
+ float scale = l.scales[i]/sqrt(l.rolling_variance[i] + .00001);
+ for(j = 0; j < l.c*l.size*l.size; ++j){
+ l.weights[i*l.c*l.size*l.size + j] *= scale;
+ }
+ l.biases[i] -= l.rolling_mean[i] * scale;
+ l.scales[i] = 1;
+ l.rolling_mean[i] = 0;
+ l.rolling_variance[i] = 1;
+ }
+}
+
+void resize_deconvolutional_layer(layer *l, int h, int w)
+{
+ l->h = h;
+ l->w = w;
+ l->out_h = (l->h - 1) * l->stride + l->size - 2*l->pad;
+ l->out_w = (l->w - 1) * l->stride + l->size - 2*l->pad;
+
+ l->outputs = l->out_h * l->out_w * l->out_c;
+ l->inputs = l->w * l->h * l->c;
+
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
+ if(l->batch_normalize){
+ l->x = realloc(l->x, l->batch*l->outputs*sizeof(float));
+ l->x_norm = realloc(l->x_norm, l->batch*l->outputs*sizeof(float));
+ }
+
+#ifdef GPU
+ cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
+
+ l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
+ l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+
+ if(l->batch_normalize){
+ cuda_free(l->x_gpu);
+ cuda_free(l->x_norm_gpu);
+
+ l->x_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+ l->x_norm_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+ }
+ #ifdef CUDNN
+ cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
+ cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1);
+ #endif
+#endif
+ l->workspace_size = get_workspace_size(*l);
+}
+
+void forward_deconvolutional_layer(const layer l, network net)
+{
+ int i;
+
+ int m = l.size*l.size*l.n;
+ int n = l.h*l.w;
+ int k = l.c;
+
+ fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+
+ for(i = 0; i < l.batch; ++i){
+ float *a = l.weights;
+ float *b = net.input + i*l.c*l.h*l.w;
+ float *c = net.workspace;
+
+ gemm_cpu(1,0,m,n,k,1,a,m,b,n,0,c,n);
+
+ col2im_cpu(net.workspace, l.out_c, l.out_h, l.out_w, l.size, l.stride, l.pad, l.output+i*l.outputs);
+ }
+ if (l.batch_normalize) {
+ forward_batchnorm_layer(l, net);
+ } else {
+ add_bias(l.output, l.biases, l.batch, l.n, l.out_w*l.out_h);
+ }
+ activate_array(l.output, l.batch*l.n*l.out_w*l.out_h, l.activation);
+}
+
+void backward_deconvolutional_layer(layer l, network net)
+{
+ int i;
+
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+
+ if(l.batch_normalize){
+ backward_batchnorm_layer(l, net);
+ } else {
+ backward_bias(l.bias_updates, l.delta, l.batch, l.n, l.out_w*l.out_h);
+ }
+
+ //if(net.delta) memset(net.delta, 0, l.batch*l.h*l.w*l.c*sizeof(float));
+
+ for(i = 0; i < l.batch; ++i){
+ int m = l.c;
+ int n = l.size*l.size*l.n;
+ int k = l.h*l.w;
+
+ float *a = net.input + i*m*k;
+ float *b = net.workspace;
+ float *c = l.weight_updates;
+
+ im2col_cpu(l.delta + i*l.outputs, l.out_c, l.out_h, l.out_w,
+ l.size, l.stride, l.pad, b);
+ gemm_cpu(0,1,m,n,k,1,a,k,b,k,1,c,n);
+
+ if(net.delta){
+ int m = l.c;
+ int n = l.h*l.w;
+ int k = l.size*l.size*l.n;
+
+ float *a = l.weights;
+ float *b = net.workspace;
+ float *c = net.delta + i*n*m;
+
+ gemm_cpu(0,0,m,n,k,1,a,k,b,n,1,c,n);
+ }
+ }
+}
+
+void update_deconvolutional_layer(layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ int size = l.size*l.size*l.c*l.n;
+ axpy_cpu(l.n, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+ scal_cpu(l.n, momentum, l.bias_updates, 1);
+
+ if(l.scales){
+ axpy_cpu(l.n, learning_rate/batch, l.scale_updates, 1, l.scales, 1);
+ scal_cpu(l.n, momentum, l.scale_updates, 1);
+ }
+
+ axpy_cpu(size, -decay*batch, l.weights, 1, l.weight_updates, 1);
+ axpy_cpu(size, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+ scal_cpu(size, momentum, l.weight_updates, 1);
+}
+
+
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.h
new file mode 100644
index 00000000000..b254fb91e69
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/deconvolutional_layer.h
@@ -0,0 +1,25 @@
+#ifndef DECONVOLUTIONAL_LAYER_H
+#define DECONVOLUTIONAL_LAYER_H
+
+#include "cuda.h"
+#include "image.h"
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+#ifdef GPU
+void forward_deconvolutional_layer_gpu(layer l, network net);
+void backward_deconvolutional_layer_gpu(layer l, network net);
+void update_deconvolutional_layer_gpu(layer l, update_args a);
+void push_deconvolutional_layer(layer l);
+void pull_deconvolutional_layer(layer l);
+#endif
+
+layer make_deconvolutional_layer(int batch, int h, int w, int c, int n, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int adam);
+void resize_deconvolutional_layer(layer *l, int h, int w);
+void forward_deconvolutional_layer(const layer l, network net);
+void update_deconvolutional_layer(layer l, update_args a);
+void backward_deconvolutional_layer(layer l, network net);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.c
new file mode 100644
index 00000000000..492c360d4ba
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.c
@@ -0,0 +1,364 @@
+#include "network.h"
+#include "detection_layer.h"
+#include "region_layer.h"
+#include "cost_layer.h"
+#include "utils.h"
+#include "parser.h"
+#include "box.h"
+#include "image.h"
+#include "demo.h"
+#include
+
+#define DEMO 1
+
+#ifdef OPENCV
+
+static char **demo_names;
+static image **demo_alphabet;
+static int demo_classes;
+
+static network *net;
+static image buff [3];
+static image buff_letter[3];
+static int buff_index = 0;
+static CvCapture * cap;
+static IplImage * ipl;
+static float fps = 0;
+static float demo_thresh = 0;
+static float demo_hier = .5;
+static int running = 0;
+
+static int demo_frame = 3;
+static int demo_index = 0;
+static float **predictions;
+static float *avg;
+static int demo_done = 0;
+static int demo_total = 0;
+double demo_time;
+
+detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num);
+
+int size_network(network *net)
+{
+ int i;
+ int count = 0;
+ for(i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
+ count += l.outputs;
+ }
+ }
+ return count;
+}
+
+void remember_network(network *net)
+{
+ int i;
+ int count = 0;
+ for(i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
+ memcpy(predictions[demo_index] + count, net->layers[i].output, sizeof(float) * l.outputs);
+ count += l.outputs;
+ }
+ }
+}
+
+detection *avg_predictions(network *net, int *nboxes)
+{
+ int i, j;
+ int count = 0;
+ fill_cpu(demo_total, 0, avg, 1);
+ for(j = 0; j < demo_frame; ++j){
+ axpy_cpu(demo_total, 1./demo_frame, predictions[j], 1, avg, 1);
+ }
+ for(i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ if(l.type == YOLO || l.type == REGION || l.type == DETECTION){
+ memcpy(l.output, avg + count, sizeof(float) * l.outputs);
+ count += l.outputs;
+ }
+ }
+ detection *dets = get_network_boxes(net, buff[0].w, buff[0].h, demo_thresh, demo_hier, 0, 1, nboxes);
+ return dets;
+}
+
+void *detect_in_thread(void *ptr)
+{
+ running = 1;
+ float nms = .4;
+
+ layer l = net->layers[net->n-1];
+ float *X = buff_letter[(buff_index+2)%3].data;
+ network_predict(net, X);
+
+ /*
+ if(l.type == DETECTION){
+ get_detection_boxes(l, 1, 1, demo_thresh, probs, boxes, 0);
+ } else */
+ remember_network(net);
+ detection *dets = 0;
+ int nboxes = 0;
+ dets = avg_predictions(net, &nboxes);
+
+
+ /*
+ int i,j;
+ box zero = {0};
+ int classes = l.classes;
+ for(i = 0; i < demo_detections; ++i){
+ avg[i].objectness = 0;
+ avg[i].bbox = zero;
+ memset(avg[i].prob, 0, classes*sizeof(float));
+ for(j = 0; j < demo_frame; ++j){
+ axpy_cpu(classes, 1./demo_frame, dets[j][i].prob, 1, avg[i].prob, 1);
+ avg[i].objectness += dets[j][i].objectness * 1./demo_frame;
+ avg[i].bbox.x += dets[j][i].bbox.x * 1./demo_frame;
+ avg[i].bbox.y += dets[j][i].bbox.y * 1./demo_frame;
+ avg[i].bbox.w += dets[j][i].bbox.w * 1./demo_frame;
+ avg[i].bbox.h += dets[j][i].bbox.h * 1./demo_frame;
+ }
+ //copy_cpu(classes, dets[0][i].prob, 1, avg[i].prob, 1);
+ //avg[i].objectness = dets[0][i].objectness;
+ }
+ */
+
+ if (nms > 0) do_nms_obj(dets, nboxes, l.classes, nms);
+
+ printf("\033[2J");
+ printf("\033[1;1H");
+ printf("\nFPS:%.1f\n",fps);
+ printf("Objects:\n\n");
+ image display = buff[(buff_index+2) % 3];
+ draw_detections(display, dets, nboxes, demo_thresh, demo_names, demo_alphabet, demo_classes);
+ free_detections(dets, nboxes);
+
+ demo_index = (demo_index + 1)%demo_frame;
+ running = 0;
+ return 0;
+}
+
+void *fetch_in_thread(void *ptr)
+{
+ int status = fill_image_from_stream(cap, buff[buff_index]);
+ letterbox_image_into(buff[buff_index], net->w, net->h, buff_letter[buff_index]);
+ if(status == 0) demo_done = 1;
+ return 0;
+}
+
+void *display_in_thread(void *ptr)
+{
+ show_image_cv(buff[(buff_index + 1)%3], "Demo", ipl);
+ int c = cvWaitKey(1);
+ if (c != -1) c = c%256;
+ if (c == 27) {
+ demo_done = 1;
+ return 0;
+ } else if (c == 82) {
+ demo_thresh += .02;
+ } else if (c == 84) {
+ demo_thresh -= .02;
+ if(demo_thresh <= .02) demo_thresh = .02;
+ } else if (c == 83) {
+ demo_hier += .02;
+ } else if (c == 81) {
+ demo_hier -= .02;
+ if(demo_hier <= .0) demo_hier = .0;
+ }
+ return 0;
+}
+
+void *display_loop(void *ptr)
+{
+ while(1){
+ display_in_thread(0);
+ }
+}
+
+void *detect_loop(void *ptr)
+{
+ while(1){
+ detect_in_thread(0);
+ }
+}
+
+void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg_frames, float hier, int w, int h, int frames, int fullscreen)
+{
+ //demo_frame = avg_frames;
+ image **alphabet = load_alphabet();
+ demo_names = names;
+ demo_alphabet = alphabet;
+ demo_classes = classes;
+ demo_thresh = thresh;
+ demo_hier = hier;
+ printf("Demo\n");
+ net = load_network(cfgfile, weightfile, 0);
+ set_batch_network(net, 1);
+ pthread_t detect_thread;
+ pthread_t fetch_thread;
+
+ srand(2222222);
+
+ int i;
+ demo_total = size_network(net);
+ predictions = calloc(demo_frame, sizeof(float*));
+ for (i = 0; i < demo_frame; ++i){
+ predictions[i] = calloc(demo_total, sizeof(float));
+ }
+ avg = calloc(demo_total, sizeof(float));
+
+ if(filename){
+ printf("video file: %s\n", filename);
+ cap = cvCaptureFromFile(filename);
+ }else{
+ cap = cvCaptureFromCAM(cam_index);
+
+ if(w){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
+ }
+ if(h){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
+ }
+ if(frames){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
+ }
+ }
+
+ if(!cap) error("Couldn't connect to webcam.\n");
+
+ buff[0] = get_image_from_stream(cap);
+ buff[1] = copy_image(buff[0]);
+ buff[2] = copy_image(buff[0]);
+ buff_letter[0] = letterbox_image(buff[0], net->w, net->h);
+ buff_letter[1] = letterbox_image(buff[0], net->w, net->h);
+ buff_letter[2] = letterbox_image(buff[0], net->w, net->h);
+ ipl = cvCreateImage(cvSize(buff[0].w,buff[0].h), IPL_DEPTH_8U, buff[0].c);
+
+ int count = 0;
+ if(!prefix){
+ cvNamedWindow("Demo", CV_WINDOW_NORMAL);
+ if(fullscreen){
+ cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
+ } else {
+ cvMoveWindow("Demo", 0, 0);
+ cvResizeWindow("Demo", 1352, 1013);
+ }
+ }
+
+ demo_time = what_time_is_it_now();
+
+ while(!demo_done){
+ buff_index = (buff_index + 1) %3;
+ if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
+ if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");
+ if(!prefix){
+ fps = 1./(what_time_is_it_now() - demo_time);
+ demo_time = what_time_is_it_now();
+ display_in_thread(0);
+ }else{
+ char name[256];
+ sprintf(name, "%s_%08d", prefix, count);
+ save_image(buff[(buff_index + 1)%3], name);
+ }
+ pthread_join(fetch_thread, 0);
+ pthread_join(detect_thread, 0);
+ ++count;
+ }
+}
+
+/*
+ void demo_compare(char *cfg1, char *weight1, char *cfg2, char *weight2, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg_frames, float hier, int w, int h, int frames, int fullscreen)
+ {
+ demo_frame = avg_frames;
+ predictions = calloc(demo_frame, sizeof(float*));
+ image **alphabet = load_alphabet();
+ demo_names = names;
+ demo_alphabet = alphabet;
+ demo_classes = classes;
+ demo_thresh = thresh;
+ demo_hier = hier;
+ printf("Demo\n");
+ net = load_network(cfg1, weight1, 0);
+ set_batch_network(net, 1);
+ pthread_t detect_thread;
+ pthread_t fetch_thread;
+
+ srand(2222222);
+
+ if(filename){
+ printf("video file: %s\n", filename);
+ cap = cvCaptureFromFile(filename);
+ }else{
+ cap = cvCaptureFromCAM(cam_index);
+
+ if(w){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
+ }
+ if(h){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
+ }
+ if(frames){
+ cvSetCaptureProperty(cap, CV_CAP_PROP_FPS, frames);
+ }
+ }
+
+ if(!cap) error("Couldn't connect to webcam.\n");
+
+ layer l = net->layers[net->n-1];
+ demo_detections = l.n*l.w*l.h;
+ int j;
+
+ avg = (float *) calloc(l.outputs, sizeof(float));
+ for(j = 0; j < demo_frame; ++j) predictions[j] = (float *) calloc(l.outputs, sizeof(float));
+
+ boxes = (box *)calloc(l.w*l.h*l.n, sizeof(box));
+ probs = (float **)calloc(l.w*l.h*l.n, sizeof(float *));
+ for(j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float *)calloc(l.classes+1, sizeof(float));
+
+ buff[0] = get_image_from_stream(cap);
+ buff[1] = copy_image(buff[0]);
+ buff[2] = copy_image(buff[0]);
+ buff_letter[0] = letterbox_image(buff[0], net->w, net->h);
+ buff_letter[1] = letterbox_image(buff[0], net->w, net->h);
+ buff_letter[2] = letterbox_image(buff[0], net->w, net->h);
+ ipl = cvCreateImage(cvSize(buff[0].w,buff[0].h), IPL_DEPTH_8U, buff[0].c);
+
+ int count = 0;
+ if(!prefix){
+ cvNamedWindow("Demo", CV_WINDOW_NORMAL);
+ if(fullscreen){
+ cvSetWindowProperty("Demo", CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
+ } else {
+ cvMoveWindow("Demo", 0, 0);
+ cvResizeWindow("Demo", 1352, 1013);
+ }
+ }
+
+ demo_time = what_time_is_it_now();
+
+ while(!demo_done){
+buff_index = (buff_index + 1) %3;
+if(pthread_create(&fetch_thread, 0, fetch_in_thread, 0)) error("Thread creation failed");
+if(pthread_create(&detect_thread, 0, detect_in_thread, 0)) error("Thread creation failed");
+if(!prefix){
+ fps = 1./(what_time_is_it_now() - demo_time);
+ demo_time = what_time_is_it_now();
+ display_in_thread(0);
+}else{
+ char name[256];
+ sprintf(name, "%s_%08d", prefix, count);
+ save_image(buff[(buff_index + 1)%3], name);
+}
+pthread_join(fetch_thread, 0);
+pthread_join(detect_thread, 0);
+++count;
+}
+}
+*/
+#else
+void demo(char *cfgfile, char *weightfile, float thresh, int cam_index, const char *filename, char **names, int classes, int delay, char *prefix, int avg, float hier, int w, int h, int frames, int fullscreen)
+{
+ fprintf(stderr, "Demo needs OpenCV for webcam images.\n");
+}
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.h
new file mode 100644
index 00000000000..86e46541d1a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/demo.h
@@ -0,0 +1,6 @@
+#ifndef DEMO_H
+#define DEMO_H
+
+#include "image.h"
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.c
new file mode 100644
index 00000000000..d0e0194b1db
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.c
@@ -0,0 +1,275 @@
+#include "detection_layer.h"
+#include "activations.h"
+#include "softmax_layer.h"
+#include "blas.h"
+#include "box.h"
+#include "cuda.h"
+#include "utils.h"
+
+#include
+#include
+#include
+#include
+
+detection_layer make_detection_layer(int batch, int inputs, int n, int side, int classes, int coords, int rescore)
+{
+ detection_layer l = {0};
+ l.type = DETECTION;
+
+ l.n = n;
+ l.batch = batch;
+ l.inputs = inputs;
+ l.classes = classes;
+ l.coords = coords;
+ l.rescore = rescore;
+ l.side = side;
+ l.w = side;
+ l.h = side;
+ assert(side*side*((1 + l.coords)*l.n + l.classes) == inputs);
+ l.cost = calloc(1, sizeof(float));
+ l.outputs = l.inputs;
+ l.truths = l.side*l.side*(1+l.coords+l.classes);
+ l.output = calloc(batch*l.outputs, sizeof(float));
+ l.delta = calloc(batch*l.outputs, sizeof(float));
+
+ l.forward = forward_detection_layer;
+ l.backward = backward_detection_layer;
+#ifdef GPU
+ l.forward_gpu = forward_detection_layer_gpu;
+ l.backward_gpu = backward_detection_layer_gpu;
+ l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
+ l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
+#endif
+
+ fprintf(stderr, "Detection Layer\n");
+ srand(0);
+
+ return l;
+}
+
+void forward_detection_layer(const detection_layer l, network net)
+{
+ int locations = l.side*l.side;
+ int i,j;
+ memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
+ //if(l.reorg) reorg(l.output, l.w*l.h, size*l.n, l.batch, 1);
+ int b;
+ if (l.softmax){
+ for(b = 0; b < l.batch; ++b){
+ int index = b*l.inputs;
+ for (i = 0; i < locations; ++i) {
+ int offset = i*l.classes;
+ softmax(l.output + index + offset, l.classes, 1, 1,
+ l.output + index + offset);
+ }
+ }
+ }
+ if(net.train){
+ float avg_iou = 0;
+ float avg_cat = 0;
+ float avg_allcat = 0;
+ float avg_obj = 0;
+ float avg_anyobj = 0;
+ int count = 0;
+ *(l.cost) = 0;
+ int size = l.inputs * l.batch;
+ memset(l.delta, 0, size * sizeof(float));
+ for (b = 0; b < l.batch; ++b){
+ int index = b*l.inputs;
+ for (i = 0; i < locations; ++i) {
+ int truth_index = (b*locations + i)*(1+l.coords+l.classes);
+ int is_obj = net.truth[truth_index];
+ for (j = 0; j < l.n; ++j) {
+ int p_index = index + locations*l.classes + i*l.n + j;
+ l.delta[p_index] = l.noobject_scale*(0 - l.output[p_index]);
+ *(l.cost) += l.noobject_scale*pow(l.output[p_index], 2);
+ avg_anyobj += l.output[p_index];
+ }
+
+ int best_index = -1;
+ float best_iou = 0;
+ float best_rmse = 20;
+
+ if (!is_obj){
+ continue;
+ }
+
+ int class_index = index + i*l.classes;
+ for(j = 0; j < l.classes; ++j) {
+ l.delta[class_index+j] = l.class_scale * (net.truth[truth_index+1+j] - l.output[class_index+j]);
+ *(l.cost) += l.class_scale * pow(net.truth[truth_index+1+j] - l.output[class_index+j], 2);
+ if(net.truth[truth_index + 1 + j]) avg_cat += l.output[class_index+j];
+ avg_allcat += l.output[class_index+j];
+ }
+
+ box truth = float_to_box(net.truth + truth_index + 1 + l.classes, 1);
+ truth.x /= l.side;
+ truth.y /= l.side;
+
+ for(j = 0; j < l.n; ++j){
+ int box_index = index + locations*(l.classes + l.n) + (i*l.n + j) * l.coords;
+ box out = float_to_box(l.output + box_index, 1);
+ out.x /= l.side;
+ out.y /= l.side;
+
+ if (l.sqrt){
+ out.w = out.w*out.w;
+ out.h = out.h*out.h;
+ }
+
+ float iou = box_iou(out, truth);
+ //iou = 0;
+ float rmse = box_rmse(out, truth);
+ if(best_iou > 0 || iou > 0){
+ if(iou > best_iou){
+ best_iou = iou;
+ best_index = j;
+ }
+ }else{
+ if(rmse < best_rmse){
+ best_rmse = rmse;
+ best_index = j;
+ }
+ }
+ }
+
+ if(l.forced){
+ if(truth.w*truth.h < .1){
+ best_index = 1;
+ }else{
+ best_index = 0;
+ }
+ }
+ if(l.random && *(net.seen) < 64000){
+ best_index = rand()%l.n;
+ }
+
+ int box_index = index + locations*(l.classes + l.n) + (i*l.n + best_index) * l.coords;
+ int tbox_index = truth_index + 1 + l.classes;
+
+ box out = float_to_box(l.output + box_index, 1);
+ out.x /= l.side;
+ out.y /= l.side;
+ if (l.sqrt) {
+ out.w = out.w*out.w;
+ out.h = out.h*out.h;
+ }
+ float iou = box_iou(out, truth);
+
+ //printf("%d,", best_index);
+ int p_index = index + locations*l.classes + i*l.n + best_index;
+ *(l.cost) -= l.noobject_scale * pow(l.output[p_index], 2);
+ *(l.cost) += l.object_scale * pow(1-l.output[p_index], 2);
+ avg_obj += l.output[p_index];
+ l.delta[p_index] = l.object_scale * (1.-l.output[p_index]);
+
+ if(l.rescore){
+ l.delta[p_index] = l.object_scale * (iou - l.output[p_index]);
+ }
+
+ l.delta[box_index+0] = l.coord_scale*(net.truth[tbox_index + 0] - l.output[box_index + 0]);
+ l.delta[box_index+1] = l.coord_scale*(net.truth[tbox_index + 1] - l.output[box_index + 1]);
+ l.delta[box_index+2] = l.coord_scale*(net.truth[tbox_index + 2] - l.output[box_index + 2]);
+ l.delta[box_index+3] = l.coord_scale*(net.truth[tbox_index + 3] - l.output[box_index + 3]);
+ if(l.sqrt){
+ l.delta[box_index+2] = l.coord_scale*(sqrt(net.truth[tbox_index + 2]) - l.output[box_index + 2]);
+ l.delta[box_index+3] = l.coord_scale*(sqrt(net.truth[tbox_index + 3]) - l.output[box_index + 3]);
+ }
+
+ *(l.cost) += pow(1-iou, 2);
+ avg_iou += iou;
+ ++count;
+ }
+ }
+
+ if(0){
+ float *costs = calloc(l.batch*locations*l.n, sizeof(float));
+ for (b = 0; b < l.batch; ++b) {
+ int index = b*l.inputs;
+ for (i = 0; i < locations; ++i) {
+ for (j = 0; j < l.n; ++j) {
+ int p_index = index + locations*l.classes + i*l.n + j;
+ costs[b*locations*l.n + i*l.n + j] = l.delta[p_index]*l.delta[p_index];
+ }
+ }
+ }
+ int indexes[100];
+ top_k(costs, l.batch*locations*l.n, 100, indexes);
+ float cutoff = costs[indexes[99]];
+ for (b = 0; b < l.batch; ++b) {
+ int index = b*l.inputs;
+ for (i = 0; i < locations; ++i) {
+ for (j = 0; j < l.n; ++j) {
+ int p_index = index + locations*l.classes + i*l.n + j;
+ if (l.delta[p_index]*l.delta[p_index] < cutoff) l.delta[p_index] = 0;
+ }
+ }
+ }
+ free(costs);
+ }
+
+
+ *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
+
+
+ printf("Detection Avg IOU: %f, Pos Cat: %f, All Cat: %f, Pos Obj: %f, Any Obj: %f, count: %d\n", avg_iou/count, avg_cat/count, avg_allcat/(count*l.classes), avg_obj/count, avg_anyobj/(l.batch*locations*l.n), count);
+ //if(l.reorg) reorg(l.delta, l.w*l.h, size*l.n, l.batch, 0);
+ }
+}
+
+void backward_detection_layer(const detection_layer l, network net)
+{
+ axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
+}
+
+void get_detection_detections(layer l, int w, int h, float thresh, detection *dets)
+{
+ int i,j,n;
+ float *predictions = l.output;
+ //int per_cell = 5*num+classes;
+ for (i = 0; i < l.side*l.side; ++i){
+ int row = i / l.side;
+ int col = i % l.side;
+ for(n = 0; n < l.n; ++n){
+ int index = i*l.n + n;
+ int p_index = l.side*l.side*l.classes + i*l.n + n;
+ float scale = predictions[p_index];
+ int box_index = l.side*l.side*(l.classes + l.n) + (i*l.n + n)*4;
+ box b;
+ b.x = (predictions[box_index + 0] + col) / l.side * w;
+ b.y = (predictions[box_index + 1] + row) / l.side * h;
+ b.w = pow(predictions[box_index + 2], (l.sqrt?2:1)) * w;
+ b.h = pow(predictions[box_index + 3], (l.sqrt?2:1)) * h;
+ dets[index].bbox = b;
+ dets[index].objectness = scale;
+ for(j = 0; j < l.classes; ++j){
+ int class_index = i*l.classes;
+ float prob = scale*predictions[class_index+j];
+ dets[index].prob[j] = (prob > thresh) ? prob : 0;
+ }
+ }
+ }
+}
+
+#ifdef GPU
+
+void forward_detection_layer_gpu(const detection_layer l, network net)
+{
+ if(!net.train){
+ copy_gpu(l.batch*l.inputs, net.input_gpu, 1, l.output_gpu, 1);
+ return;
+ }
+
+ cuda_pull_array(net.input_gpu, net.input, l.batch*l.inputs);
+ forward_detection_layer(l, net);
+ cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
+ cuda_push_array(l.delta_gpu, l.delta, l.batch*l.inputs);
+}
+
+void backward_detection_layer_gpu(detection_layer l, network net)
+{
+ axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
+ //copy_gpu(l.batch*l.inputs, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.h
new file mode 100644
index 00000000000..1c818535700
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/detection_layer.h
@@ -0,0 +1,18 @@
+#ifndef DETECTION_LAYER_H
+#define DETECTION_LAYER_H
+
+#include "layer.h"
+#include "network.h"
+
+typedef layer detection_layer;
+
+detection_layer make_detection_layer(int batch, int inputs, int n, int size, int classes, int coords, int rescore);
+void forward_detection_layer(const detection_layer l, network net);
+void backward_detection_layer(const detection_layer l, network net);
+
+#ifdef GPU
+void forward_detection_layer_gpu(const detection_layer l, network net);
+void backward_detection_layer_gpu(detection_layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.c
new file mode 100644
index 00000000000..780554fb371
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.c
@@ -0,0 +1,60 @@
+#include "dropout_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include
+#include
+
+dropout_layer make_dropout_layer(int batch, int inputs, float probability)
+{
+ dropout_layer l = {0};
+ l.type = DROPOUT;
+ l.probability = probability;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.batch = batch;
+ l.rand = calloc(inputs*batch, sizeof(float));
+ l.scale = 1./(1.-probability);
+ l.forward = forward_dropout_layer;
+ l.backward = backward_dropout_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_dropout_layer_gpu;
+ l.backward_gpu = backward_dropout_layer_gpu;
+ l.rand_gpu = cuda_make_array(l.rand, inputs*batch);
+ #endif
+ fprintf(stderr, "dropout p = %.2f %4d -> %4d\n", probability, inputs, inputs);
+ return l;
+}
+
+void resize_dropout_layer(dropout_layer *l, int inputs)
+{
+ l->rand = realloc(l->rand, l->inputs*l->batch*sizeof(float));
+ #ifdef GPU
+ cuda_free(l->rand_gpu);
+
+ l->rand_gpu = cuda_make_array(l->rand, inputs*l->batch);
+ #endif
+}
+
+void forward_dropout_layer(dropout_layer l, network net)
+{
+ int i;
+ if (!net.train) return;
+ for(i = 0; i < l.batch * l.inputs; ++i){
+ float r = rand_uniform(0, 1);
+ l.rand[i] = r;
+ if(r < l.probability) net.input[i] = 0;
+ else net.input[i] *= l.scale;
+ }
+}
+
+void backward_dropout_layer(dropout_layer l, network net)
+{
+ int i;
+ if(!net.delta) return;
+ for(i = 0; i < l.batch * l.inputs; ++i){
+ float r = l.rand[i];
+ if(r < l.probability) net.delta[i] = 0;
+ else net.delta[i] *= l.scale;
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.h
new file mode 100644
index 00000000000..01f94d4d7d1
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer.h
@@ -0,0 +1,20 @@
+#ifndef DROPOUT_LAYER_H
+#define DROPOUT_LAYER_H
+
+#include "layer.h"
+#include "network.h"
+
+typedef layer dropout_layer;
+
+dropout_layer make_dropout_layer(int batch, int inputs, float probability);
+
+void forward_dropout_layer(dropout_layer l, network net);
+void backward_dropout_layer(dropout_layer l, network net);
+void resize_dropout_layer(dropout_layer *l, int inputs);
+
+#ifdef GPU
+void forward_dropout_layer_gpu(dropout_layer l, network net);
+void backward_dropout_layer_gpu(dropout_layer l, network net);
+
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer_kernels.cu
new file mode 100644
index 00000000000..bd12b678758
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/dropout_layer_kernels.cu
@@ -0,0 +1,41 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "dropout_layer.h"
+#include "cuda.h"
+#include "utils.h"
+}
+
+__global__ void yoloswag420blazeit360noscope(float *input, int size, float *rand, float prob, float scale)
+{
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id < size) input[id] = (rand[id] < prob) ? 0 : input[id]*scale;
+}
+
+void forward_dropout_layer_gpu(dropout_layer layer, network net)
+{
+ if (!net.train) return;
+ int size = layer.inputs*layer.batch;
+ cuda_random(layer.rand_gpu, size);
+ /*
+ int i;
+ for(i = 0; i < size; ++i){
+ layer.rand[i] = rand_uniform();
+ }
+ cuda_push_array(layer.rand_gpu, layer.rand, size);
+ */
+
+ yoloswag420blazeit360noscope<<>>(net.input_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
+ check_error(cudaPeekAtLastError());
+}
+
+void backward_dropout_layer_gpu(dropout_layer layer, network net)
+{
+ if(!net.delta_gpu) return;
+ int size = layer.inputs*layer.batch;
+
+ yoloswag420blazeit360noscope<<>>(net.delta_gpu, size, layer.rand_gpu, layer.probability, layer.scale);
+ check_error(cudaPeekAtLastError());
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gemm.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gemm.c
new file mode 100644
index 00000000000..648027f2cdf
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gemm.c
@@ -0,0 +1,324 @@
+#include "gemm.h"
+#include "utils.h"
+#include "cuda.h"
+#include
+#include
+#include
+
+void gemm_bin(int M, int N, int K, float ALPHA,
+ char *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
+{
+ int i,j,k;
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ char A_PART = A[i*lda+k];
+ if(A_PART){
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += B[k*ldb+j];
+ }
+ } else {
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] -= B[k*ldb+j];
+ }
+ }
+ }
+ }
+}
+
+float *random_matrix(int rows, int cols)
+{
+ int i;
+ float *m = calloc(rows*cols, sizeof(float));
+ for(i = 0; i < rows*cols; ++i){
+ m[i] = (float)rand()/RAND_MAX;
+ }
+ return m;
+}
+
+void time_random_matrix(int TA, int TB, int m, int k, int n)
+{
+ float *a;
+ if(!TA) a = random_matrix(m,k);
+ else a = random_matrix(k,m);
+ int lda = (!TA)?k:m;
+ float *b;
+ if(!TB) b = random_matrix(k,n);
+ else b = random_matrix(n,k);
+ int ldb = (!TB)?n:k;
+
+ float *c = random_matrix(m,n);
+ int i;
+ clock_t start = clock(), end;
+ for(i = 0; i<10; ++i){
+ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
+ }
+ end = clock();
+ printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
+ free(a);
+ free(b);
+ free(c);
+}
+
+
+void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float BETA,
+ float *C, int ldc)
+{
+ gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
+}
+
+void gemm_nn(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
+{
+ int i,j,k;
+ #pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ register float A_PART = ALPHA*A[i*lda+k];
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += A_PART*B[k*ldb+j];
+ }
+ }
+ }
+}
+
+void gemm_nt(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
+{
+ int i,j,k;
+ #pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ register float sum = 0;
+ for(k = 0; k < K; ++k){
+ sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
+ }
+ C[i*ldc+j] += sum;
+ }
+ }
+}
+
+void gemm_tn(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
+{
+ int i,j,k;
+ #pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(k = 0; k < K; ++k){
+ register float A_PART = ALPHA*A[k*lda+i];
+ for(j = 0; j < N; ++j){
+ C[i*ldc+j] += A_PART*B[k*ldb+j];
+ }
+ }
+ }
+}
+
+void gemm_tt(int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float *C, int ldc)
+{
+ int i,j,k;
+ #pragma omp parallel for
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ register float sum = 0;
+ for(k = 0; k < K; ++k){
+ sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
+ }
+ C[i*ldc+j] += sum;
+ }
+ }
+}
+
+
+void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
+ float *A, int lda,
+ float *B, int ldb,
+ float BETA,
+ float *C, int ldc)
+{
+ //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
+ int i, j;
+ for(i = 0; i < M; ++i){
+ for(j = 0; j < N; ++j){
+ C[i*ldc + j] *= BETA;
+ }
+ }
+ if(!TA && !TB)
+ gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else if(TA && !TB)
+ gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else if(!TA && TB)
+ gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+ else
+ gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
+}
+
+#ifdef GPU
+
+#include
+
+void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
+ float *A_gpu, int lda,
+ float *B_gpu, int ldb,
+ float BETA,
+ float *C_gpu, int ldc)
+{
+ cublasHandle_t handle = blas_handle();
+ cudaError_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
+ (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
+ check_error(status);
+}
+
+#include
+#include
+#include
+#include
+
+void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
+{
+ float *a;
+ if(!TA) a = random_matrix(m,k);
+ else a = random_matrix(k,m);
+ int lda = (!TA)?k:m;
+ float *b;
+ if(!TB) b = random_matrix(k,n);
+ else b = random_matrix(n,k);
+ int ldb = (!TB)?n:k;
+
+ float *c = random_matrix(m,n);
+ int i;
+ clock_t start = clock(), end;
+ for(i = 0; i<32; ++i){
+ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
+ }
+ end = clock();
+ printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
+ free(a);
+ free(b);
+ free(c);
+}
+
+void time_gpu(int TA, int TB, int m, int k, int n)
+{
+ int iter = 10;
+ float *a = random_matrix(m,k);
+ float *b = random_matrix(k,n);
+
+ int lda = (!TA)?k:m;
+ int ldb = (!TB)?n:k;
+
+ float *c = random_matrix(m,n);
+
+ float *a_cl = cuda_make_array(a, m*k);
+ float *b_cl = cuda_make_array(b, k*n);
+ float *c_cl = cuda_make_array(c, m*n);
+
+ int i;
+ clock_t start = clock(), end;
+ for(i = 0; i
+#include
+#include
+#include
+
+static void increment_layer(layer *l, int steps)
+{
+ int num = l->outputs*l->batch*steps;
+ l->output += num;
+ l->delta += num;
+ l->x += num;
+ l->x_norm += num;
+
+#ifdef GPU
+ l->output_gpu += num;
+ l->delta_gpu += num;
+ l->x_gpu += num;
+ l->x_norm_gpu += num;
+#endif
+}
+
+layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
+{
+ fprintf(stderr, "GRU Layer: %d inputs, %d outputs\n", inputs, outputs);
+ batch = batch / steps;
+ layer l = {0};
+ l.batch = batch;
+ l.type = GRU;
+ l.steps = steps;
+ l.inputs = inputs;
+
+ l.uz = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.uz) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.uz->batch = batch;
+
+ l.wz = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wz) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wz->batch = batch;
+
+ l.ur = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.ur) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.ur->batch = batch;
+
+ l.wr = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wr) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wr->batch = batch;
+
+
+
+ l.uh = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.uh) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.uh->batch = batch;
+
+ l.wh = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wh) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wh->batch = batch;
+
+ l.batch_normalize = batch_normalize;
+
+
+ l.outputs = outputs;
+ l.output = calloc(outputs*batch*steps, sizeof(float));
+ l.delta = calloc(outputs*batch*steps, sizeof(float));
+ l.state = calloc(outputs*batch, sizeof(float));
+ l.prev_state = calloc(outputs*batch, sizeof(float));
+ l.forgot_state = calloc(outputs*batch, sizeof(float));
+ l.forgot_delta = calloc(outputs*batch, sizeof(float));
+
+ l.r_cpu = calloc(outputs*batch, sizeof(float));
+ l.z_cpu = calloc(outputs*batch, sizeof(float));
+ l.h_cpu = calloc(outputs*batch, sizeof(float));
+
+ l.forward = forward_gru_layer;
+ l.backward = backward_gru_layer;
+ l.update = update_gru_layer;
+
+#ifdef GPU
+ l.forward_gpu = forward_gru_layer_gpu;
+ l.backward_gpu = backward_gru_layer_gpu;
+ l.update_gpu = update_gru_layer_gpu;
+
+ l.forgot_state_gpu = cuda_make_array(0, batch*outputs);
+ l.forgot_delta_gpu = cuda_make_array(0, batch*outputs);
+ l.prev_state_gpu = cuda_make_array(0, batch*outputs);
+ l.state_gpu = cuda_make_array(0, batch*outputs);
+ l.output_gpu = cuda_make_array(0, batch*outputs*steps);
+ l.delta_gpu = cuda_make_array(0, batch*outputs*steps);
+ l.r_gpu = cuda_make_array(0, batch*outputs);
+ l.z_gpu = cuda_make_array(0, batch*outputs);
+ l.h_gpu = cuda_make_array(0, batch*outputs);
+
+#ifdef CUDNN
+ cudnnSetTensor4dDescriptor(l.uz->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uz->out_c, l.uz->out_h, l.uz->out_w);
+ cudnnSetTensor4dDescriptor(l.uh->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uh->out_c, l.uh->out_h, l.uh->out_w);
+ cudnnSetTensor4dDescriptor(l.ur->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.ur->out_c, l.ur->out_h, l.ur->out_w);
+ cudnnSetTensor4dDescriptor(l.wz->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wz->out_c, l.wz->out_h, l.wz->out_w);
+ cudnnSetTensor4dDescriptor(l.wh->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wh->out_c, l.wh->out_h, l.wh->out_w);
+ cudnnSetTensor4dDescriptor(l.wr->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wr->out_c, l.wr->out_h, l.wr->out_w);
+#endif
+#endif
+
+ return l;
+}
+
+void update_gru_layer(layer l, update_args a)
+{
+ update_connected_layer(*(l.ur), a);
+ update_connected_layer(*(l.uz), a);
+ update_connected_layer(*(l.uh), a);
+ update_connected_layer(*(l.wr), a);
+ update_connected_layer(*(l.wz), a);
+ update_connected_layer(*(l.wh), a);
+}
+
+void forward_gru_layer(layer l, network net)
+{
+ network s = net;
+ s.train = net.train;
+ int i;
+ layer uz = *(l.uz);
+ layer ur = *(l.ur);
+ layer uh = *(l.uh);
+
+ layer wz = *(l.wz);
+ layer wr = *(l.wr);
+ layer wh = *(l.wh);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, uz.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, ur.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, uh.delta, 1);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wz.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wr.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wh.delta, 1);
+ if(net.train) {
+ fill_cpu(l.outputs * l.batch * l.steps, 0, l.delta, 1);
+ copy_cpu(l.outputs*l.batch, l.state, 1, l.prev_state, 1);
+ }
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = l.state;
+ forward_connected_layer(wz, s);
+ forward_connected_layer(wr, s);
+
+ s.input = net.input;
+ forward_connected_layer(uz, s);
+ forward_connected_layer(ur, s);
+ forward_connected_layer(uh, s);
+
+
+ copy_cpu(l.outputs*l.batch, uz.output, 1, l.z_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, wz.output, 1, l.z_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, ur.output, 1, l.r_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, wr.output, 1, l.r_cpu, 1);
+
+ activate_array(l.z_cpu, l.outputs*l.batch, LOGISTIC);
+ activate_array(l.r_cpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_cpu(l.outputs*l.batch, l.state, 1, l.forgot_state, 1);
+ mul_cpu(l.outputs*l.batch, l.r_cpu, 1, l.forgot_state, 1);
+
+ s.input = l.forgot_state;
+ forward_connected_layer(wh, s);
+
+ copy_cpu(l.outputs*l.batch, uh.output, 1, l.h_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, wh.output, 1, l.h_cpu, 1);
+
+ if(l.tanh){
+ activate_array(l.h_cpu, l.outputs*l.batch, TANH);
+ } else {
+ activate_array(l.h_cpu, l.outputs*l.batch, LOGISTIC);
+ }
+
+ weighted_sum_cpu(l.state, l.h_cpu, l.z_cpu, l.outputs*l.batch, l.output);
+
+ copy_cpu(l.outputs*l.batch, l.output, 1, l.state, 1);
+
+ net.input += l.inputs*l.batch;
+ l.output += l.outputs*l.batch;
+ increment_layer(&uz, 1);
+ increment_layer(&ur, 1);
+ increment_layer(&uh, 1);
+
+ increment_layer(&wz, 1);
+ increment_layer(&wr, 1);
+ increment_layer(&wh, 1);
+ }
+}
+
+void backward_gru_layer(layer l, network net)
+{
+}
+
+#ifdef GPU
+
+void pull_gru_layer(layer l)
+{
+}
+
+void push_gru_layer(layer l)
+{
+}
+
+void update_gru_layer_gpu(layer l, update_args a)
+{
+ update_connected_layer_gpu(*(l.ur), a);
+ update_connected_layer_gpu(*(l.uz), a);
+ update_connected_layer_gpu(*(l.uh), a);
+ update_connected_layer_gpu(*(l.wr), a);
+ update_connected_layer_gpu(*(l.wz), a);
+ update_connected_layer_gpu(*(l.wh), a);
+}
+
+void forward_gru_layer_gpu(layer l, network net)
+{
+ network s = {0};
+ s.train = net.train;
+ int i;
+ layer uz = *(l.uz);
+ layer ur = *(l.ur);
+ layer uh = *(l.uh);
+
+ layer wz = *(l.wz);
+ layer wr = *(l.wr);
+ layer wh = *(l.wh);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, uz.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, ur.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, uh.delta_gpu, 1);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wz.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wr.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wh.delta_gpu, 1);
+ if(net.train) {
+ fill_gpu(l.outputs * l.batch * l.steps, 0, l.delta_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.state_gpu, 1, l.prev_state_gpu, 1);
+ }
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input_gpu = l.state_gpu;
+ forward_connected_layer_gpu(wz, s);
+ forward_connected_layer_gpu(wr, s);
+
+ s.input_gpu = net.input_gpu;
+ forward_connected_layer_gpu(uz, s);
+ forward_connected_layer_gpu(ur, s);
+ forward_connected_layer_gpu(uh, s);
+
+ copy_gpu(l.outputs*l.batch, uz.output_gpu, 1, l.z_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wz.output_gpu, 1, l.z_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, ur.output_gpu, 1, l.r_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wr.output_gpu, 1, l.r_gpu, 1);
+
+ activate_array_gpu(l.z_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.r_gpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_gpu(l.outputs*l.batch, l.state_gpu, 1, l.forgot_state_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.r_gpu, 1, l.forgot_state_gpu, 1);
+
+ s.input_gpu = l.forgot_state_gpu;
+ forward_connected_layer_gpu(wh, s);
+
+ copy_gpu(l.outputs*l.batch, uh.output_gpu, 1, l.h_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wh.output_gpu, 1, l.h_gpu, 1);
+
+ if(l.tanh){
+ activate_array_gpu(l.h_gpu, l.outputs*l.batch, TANH);
+ } else {
+ activate_array_gpu(l.h_gpu, l.outputs*l.batch, LOGISTIC);
+ }
+
+ weighted_sum_gpu(l.state_gpu, l.h_gpu, l.z_gpu, l.outputs*l.batch, l.output_gpu);
+ copy_gpu(l.outputs*l.batch, l.output_gpu, 1, l.state_gpu, 1);
+
+ net.input_gpu += l.inputs*l.batch;
+ l.output_gpu += l.outputs*l.batch;
+ increment_layer(&uz, 1);
+ increment_layer(&ur, 1);
+ increment_layer(&uh, 1);
+
+ increment_layer(&wz, 1);
+ increment_layer(&wr, 1);
+ increment_layer(&wh, 1);
+ }
+}
+
+void backward_gru_layer_gpu(layer l, network net)
+{
+ network s = {0};
+ s.train = net.train;
+ int i;
+ layer uz = *(l.uz);
+ layer ur = *(l.ur);
+ layer uh = *(l.uh);
+
+ layer wz = *(l.wz);
+ layer wr = *(l.wr);
+ layer wh = *(l.wh);
+
+ increment_layer(&uz, l.steps - 1);
+ increment_layer(&ur, l.steps - 1);
+ increment_layer(&uh, l.steps - 1);
+
+ increment_layer(&wz, l.steps - 1);
+ increment_layer(&wr, l.steps - 1);
+ increment_layer(&wh, l.steps - 1);
+
+ net.input_gpu += l.inputs*l.batch*(l.steps-1);
+ if(net.delta_gpu) net.delta_gpu += l.inputs*l.batch*(l.steps-1);
+ l.output_gpu += l.outputs*l.batch*(l.steps-1);
+ l.delta_gpu += l.outputs*l.batch*(l.steps-1);
+ float *end_state = l.output_gpu;
+ for (i = l.steps-1; i >= 0; --i) {
+ if(i != 0) copy_gpu(l.outputs*l.batch, l.output_gpu - l.outputs*l.batch, 1, l.state_gpu, 1);
+ else copy_gpu(l.outputs*l.batch, l.prev_state_gpu, 1, l.state_gpu, 1);
+ float *prev_delta_gpu = (i == 0) ? 0 : l.delta_gpu - l.outputs*l.batch;
+
+ copy_gpu(l.outputs*l.batch, uz.output_gpu, 1, l.z_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wz.output_gpu, 1, l.z_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, ur.output_gpu, 1, l.r_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wr.output_gpu, 1, l.r_gpu, 1);
+
+ activate_array_gpu(l.z_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.r_gpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_gpu(l.outputs*l.batch, uh.output_gpu, 1, l.h_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, wh.output_gpu, 1, l.h_gpu, 1);
+
+ if(l.tanh){
+ activate_array_gpu(l.h_gpu, l.outputs*l.batch, TANH);
+ } else {
+ activate_array_gpu(l.h_gpu, l.outputs*l.batch, LOGISTIC);
+ }
+
+ weighted_delta_gpu(l.state_gpu, l.h_gpu, l.z_gpu, prev_delta_gpu, uh.delta_gpu, uz.delta_gpu, l.outputs*l.batch, l.delta_gpu);
+
+ if(l.tanh){
+ gradient_array_gpu(l.h_gpu, l.outputs*l.batch, TANH, uh.delta_gpu);
+ } else {
+ gradient_array_gpu(l.h_gpu, l.outputs*l.batch, LOGISTIC, uh.delta_gpu);
+ }
+
+ copy_gpu(l.outputs*l.batch, uh.delta_gpu, 1, wh.delta_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.state_gpu, 1, l.forgot_state_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.r_gpu, 1, l.forgot_state_gpu, 1);
+ fill_gpu(l.outputs*l.batch, 0, l.forgot_delta_gpu, 1);
+
+ s.input_gpu = l.forgot_state_gpu;
+ s.delta_gpu = l.forgot_delta_gpu;
+
+ backward_connected_layer_gpu(wh, s);
+ if(prev_delta_gpu) mult_add_into_gpu(l.outputs*l.batch, l.forgot_delta_gpu, l.r_gpu, prev_delta_gpu);
+ mult_add_into_gpu(l.outputs*l.batch, l.forgot_delta_gpu, l.state_gpu, ur.delta_gpu);
+
+ gradient_array_gpu(l.r_gpu, l.outputs*l.batch, LOGISTIC, ur.delta_gpu);
+ copy_gpu(l.outputs*l.batch, ur.delta_gpu, 1, wr.delta_gpu, 1);
+
+ gradient_array_gpu(l.z_gpu, l.outputs*l.batch, LOGISTIC, uz.delta_gpu);
+ copy_gpu(l.outputs*l.batch, uz.delta_gpu, 1, wz.delta_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ s.delta_gpu = prev_delta_gpu;
+
+ backward_connected_layer_gpu(wr, s);
+ backward_connected_layer_gpu(wz, s);
+
+ s.input_gpu = net.input_gpu;
+ s.delta_gpu = net.delta_gpu;
+
+ backward_connected_layer_gpu(uh, s);
+ backward_connected_layer_gpu(ur, s);
+ backward_connected_layer_gpu(uz, s);
+
+
+ net.input_gpu -= l.inputs*l.batch;
+ if(net.delta_gpu) net.delta_gpu -= l.inputs*l.batch;
+ l.output_gpu -= l.outputs*l.batch;
+ l.delta_gpu -= l.outputs*l.batch;
+ increment_layer(&uz, -1);
+ increment_layer(&ur, -1);
+ increment_layer(&uh, -1);
+
+ increment_layer(&wz, -1);
+ increment_layer(&wr, -1);
+ increment_layer(&wh, -1);
+ }
+ copy_gpu(l.outputs*l.batch, end_state, 1, l.state_gpu, 1);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gru_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gru_layer.h
new file mode 100644
index 00000000000..9067942e949
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/gru_layer.h
@@ -0,0 +1,24 @@
+
+#ifndef GRU_LAYER_H
+#define GRU_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
+
+void forward_gru_layer(layer l, network state);
+void backward_gru_layer(layer l, network state);
+void update_gru_layer(layer l, update_args a);
+
+#ifdef GPU
+void forward_gru_layer_gpu(layer l, network state);
+void backward_gru_layer_gpu(layer l, network state);
+void update_gru_layer_gpu(layer l, update_args a);
+void push_gru_layer(layer l);
+void pull_gru_layer(layer l);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.c
new file mode 100644
index 00000000000..69ec98a9d12
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.c
@@ -0,0 +1,40 @@
+#include "im2col.h"
+#include
+float im2col_get_pixel(float *im, int height, int width, int channels,
+ int row, int col, int channel, int pad)
+{
+ row -= pad;
+ col -= pad;
+
+ if (row < 0 || col < 0 ||
+ row >= height || col >= width) return 0;
+ return im[col + width*(row + height*channel)];
+}
+
+//From Berkeley Vision's Caffe!
+//https://github.com/BVLC/caffe/blob/master/LICENSE
+void im2col_cpu(float* data_im,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float* data_col)
+{
+ int c,h,w;
+ int height_col = (height + 2*pad - ksize) / stride + 1;
+ int width_col = (width + 2*pad - ksize) / stride + 1;
+
+ int channels_col = channels * ksize * ksize;
+ for (c = 0; c < channels_col; ++c) {
+ int w_offset = c % ksize;
+ int h_offset = (c / ksize) % ksize;
+ int c_im = c / ksize / ksize;
+ for (h = 0; h < height_col; ++h) {
+ for (w = 0; w < width_col; ++w) {
+ int im_row = h_offset + h * stride;
+ int im_col = w_offset + w * stride;
+ int col_index = (c * height_col + h) * width_col + w;
+ data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
+ im_row, im_col, c_im, pad);
+ }
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.h
new file mode 100644
index 00000000000..02c4247fad9
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col.h
@@ -0,0 +1,15 @@
+#ifndef IM2COL_H
+#define IM2COL_H
+
+void im2col_cpu(float* data_im,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float* data_col);
+
+#ifdef GPU
+
+void im2col_gpu(float *im,
+ int channels, int height, int width,
+ int ksize, int stride, int pad,float *data_col);
+
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col_kernels.cu
new file mode 100644
index 00000000000..07b5e67989a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/im2col_kernels.cu
@@ -0,0 +1,61 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "im2col.h"
+#include "cuda.h"
+}
+
+// src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
+// You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
+
+__global__ void im2col_gpu_kernel(const int n, const float* data_im,
+ const int height, const int width, const int ksize,
+ const int pad,
+ const int stride,
+ const int height_col, const int width_col,
+ float *data_col) {
+ int index = blockIdx.x*blockDim.x+threadIdx.x;
+ for(; index < n; index += blockDim.x*gridDim.x){
+ int w_out = index % width_col;
+ int h_index = index / width_col;
+ int h_out = h_index % height_col;
+ int channel_in = h_index / height_col;
+ int channel_out = channel_in * ksize * ksize;
+ int h_in = h_out * stride - pad;
+ int w_in = w_out * stride - pad;
+ float* data_col_ptr = data_col;
+ data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
+ const float* data_im_ptr = data_im;
+ data_im_ptr += (channel_in * height + h_in) * width + w_in;
+ for (int i = 0; i < ksize; ++i) {
+ for (int j = 0; j < ksize; ++j) {
+ int h = h_in + i;
+ int w = w_in + j;
+
+ *data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
+ data_im_ptr[i * width + j] : 0;
+
+ //*data_col_ptr = data_im_ptr[ii * width + jj];
+
+ data_col_ptr += height_col * width_col;
+ }
+ }
+ }
+}
+
+void im2col_gpu(float *im,
+ int channels, int height, int width,
+ int ksize, int stride, int pad, float *data_col){
+ // We are going to launch channels * height_col * width_col kernels, each
+ // kernel responsible for copying a single-channel grid.
+ int height_col = (height + 2 * pad - ksize) / stride + 1;
+ int width_col = (width + 2 * pad - ksize) / stride + 1;
+ int num_kernels = channels * height_col * width_col;
+ im2col_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
+ BLOCK>>>(
+ num_kernels, im, height, width, ksize, pad,
+ stride, height_col,
+ width_col, data_col);
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.c
new file mode 100644
index 00000000000..786acb089e3
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.c
@@ -0,0 +1,1615 @@
+#include "image.h"
+#include "utils.h"
+#include "blas.h"
+#include "cuda.h"
+#include
+#include
+
+#define STB_IMAGE_IMPLEMENTATION
+#include "stb_image.h"
+#define STB_IMAGE_WRITE_IMPLEMENTATION
+#include "stb_image_write.h"
+
+int windows = 0;
+
+float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
+
+float get_color(int c, int x, int max)
+{
+ float ratio = ((float)x/max)*5;
+ int i = floor(ratio);
+ int j = ceil(ratio);
+ ratio -= i;
+ float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
+ //printf("%f\n", r);
+ return r;
+}
+
+image mask_to_rgb(image mask)
+{
+ int n = mask.c;
+ image im = make_image(mask.w, mask.h, 3);
+ int i, j;
+ for(j = 0; j < n; ++j){
+ int offset = j*123457 % n;
+ float red = get_color(2,offset,n);
+ float green = get_color(1,offset,n);
+ float blue = get_color(0,offset,n);
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i + 0*im.w*im.h] += mask.data[j*im.h*im.w + i]*red;
+ im.data[i + 1*im.w*im.h] += mask.data[j*im.h*im.w + i]*green;
+ im.data[i + 2*im.w*im.h] += mask.data[j*im.h*im.w + i]*blue;
+ }
+ }
+ return im;
+}
+
+static float get_pixel(image m, int x, int y, int c)
+{
+ assert(x < m.w && y < m.h && c < m.c);
+ return m.data[c*m.h*m.w + y*m.w + x];
+}
+static float get_pixel_extend(image m, int x, int y, int c)
+{
+ if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
+ /*
+ if(x < 0) x = 0;
+ if(x >= m.w) x = m.w-1;
+ if(y < 0) y = 0;
+ if(y >= m.h) y = m.h-1;
+ */
+ if(c < 0 || c >= m.c) return 0;
+ return get_pixel(m, x, y, c);
+}
+static void set_pixel(image m, int x, int y, int c, float val)
+{
+ if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
+ assert(x < m.w && y < m.h && c < m.c);
+ m.data[c*m.h*m.w + y*m.w + x] = val;
+}
+static void add_pixel(image m, int x, int y, int c, float val)
+{
+ assert(x < m.w && y < m.h && c < m.c);
+ m.data[c*m.h*m.w + y*m.w + x] += val;
+}
+
+static float bilinear_interpolate(image im, float x, float y, int c)
+{
+ int ix = (int) floorf(x);
+ int iy = (int) floorf(y);
+
+ float dx = x - ix;
+ float dy = y - iy;
+
+ float val = (1-dy) * (1-dx) * get_pixel_extend(im, ix, iy, c) +
+ dy * (1-dx) * get_pixel_extend(im, ix, iy+1, c) +
+ (1-dy) * dx * get_pixel_extend(im, ix+1, iy, c) +
+ dy * dx * get_pixel_extend(im, ix+1, iy+1, c);
+ return val;
+}
+
+
+void composite_image(image source, image dest, int dx, int dy)
+{
+ int x,y,k;
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float val = get_pixel(source, x, y, k);
+ float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
+ set_pixel(dest, dx+x, dy+y, k, val * val2);
+ }
+ }
+ }
+}
+
+image border_image(image a, int border)
+{
+ image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
+ int x,y,k;
+ for(k = 0; k < b.c; ++k){
+ for(y = 0; y < b.h; ++y){
+ for(x = 0; x < b.w; ++x){
+ float val = get_pixel_extend(a, x - border, y - border, k);
+ if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
+ set_pixel(b, x, y, k, val);
+ }
+ }
+ }
+ return b;
+}
+
+image tile_images(image a, image b, int dx)
+{
+ if(a.w == 0) return copy_image(b);
+ image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
+ fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
+ embed_image(a, c, 0, 0);
+ composite_image(b, c, a.w + dx, 0);
+ return c;
+}
+
+image get_label(image **characters, char *string, int size)
+{
+ size = size/10;
+ if(size > 7) size = 7;
+ image label = make_empty_image(0,0,0);
+ while(*string){
+ image l = characters[size][(int)*string];
+ image n = tile_images(label, l, -size - 1 + (size+1)/2);
+ free_image(label);
+ label = n;
+ ++string;
+ }
+ image b = border_image(label, label.h*.25);
+ free_image(label);
+ return b;
+}
+
+void draw_label(image a, int r, int c, image label, const float *rgb)
+{
+ int w = label.w;
+ int h = label.h;
+ if (r - h >= 0) r = r - h;
+
+ int i, j, k;
+ for(j = 0; j < h && j + r < a.h; ++j){
+ for(i = 0; i < w && i + c < a.w; ++i){
+ for(k = 0; k < label.c; ++k){
+ float val = get_pixel(label, i, j, k);
+ set_pixel(a, i+c, j+r, k, rgb[k] * val);
+ }
+ }
+ }
+}
+
+void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
+{
+ //normalize_image(a);
+ int i;
+ if(x1 < 0) x1 = 0;
+ if(x1 >= a.w) x1 = a.w-1;
+ if(x2 < 0) x2 = 0;
+ if(x2 >= a.w) x2 = a.w-1;
+
+ if(y1 < 0) y1 = 0;
+ if(y1 >= a.h) y1 = a.h-1;
+ if(y2 < 0) y2 = 0;
+ if(y2 >= a.h) y2 = a.h-1;
+
+ for(i = x1; i <= x2; ++i){
+ a.data[i + y1*a.w + 0*a.w*a.h] = r;
+ a.data[i + y2*a.w + 0*a.w*a.h] = r;
+
+ a.data[i + y1*a.w + 1*a.w*a.h] = g;
+ a.data[i + y2*a.w + 1*a.w*a.h] = g;
+
+ a.data[i + y1*a.w + 2*a.w*a.h] = b;
+ a.data[i + y2*a.w + 2*a.w*a.h] = b;
+ }
+ for(i = y1; i <= y2; ++i){
+ a.data[x1 + i*a.w + 0*a.w*a.h] = r;
+ a.data[x2 + i*a.w + 0*a.w*a.h] = r;
+
+ a.data[x1 + i*a.w + 1*a.w*a.h] = g;
+ a.data[x2 + i*a.w + 1*a.w*a.h] = g;
+
+ a.data[x1 + i*a.w + 2*a.w*a.h] = b;
+ a.data[x2 + i*a.w + 2*a.w*a.h] = b;
+ }
+}
+
+void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
+{
+ int i;
+ for(i = 0; i < w; ++i){
+ draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
+ }
+}
+
+void draw_bbox(image a, box bbox, int w, float r, float g, float b)
+{
+ int left = (bbox.x-bbox.w/2)*a.w;
+ int right = (bbox.x+bbox.w/2)*a.w;
+ int top = (bbox.y-bbox.h/2)*a.h;
+ int bot = (bbox.y+bbox.h/2)*a.h;
+
+ int i;
+ for(i = 0; i < w; ++i){
+ draw_box(a, left+i, top+i, right-i, bot-i, r, g, b);
+ }
+}
+
+image **load_alphabet()
+{
+ int i, j;
+ const int nsize = 8;
+ image **alphabets = calloc(nsize, sizeof(image));
+ for(j = 0; j < nsize; ++j){
+ alphabets[j] = calloc(128, sizeof(image));
+ for(i = 32; i < 127; ++i){
+ char buff[256];
+ sprintf(buff, "data/labels/%d_%d.png", i, j);
+ alphabets[j][i] = load_image_color(buff, 0, 0);
+ }
+ }
+ return alphabets;
+}
+
+void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
+{
+ int i,j;
+
+ for(i = 0; i < num; ++i){
+ char labelstr[4096] = {0};
+ int class = -1;
+ for(j = 0; j < classes; ++j){
+ if (dets[i].prob[j] > thresh){
+ if (class < 0) {
+ strcat(labelstr, names[j]);
+ class = j;
+ } else {
+ strcat(labelstr, ", ");
+ strcat(labelstr, names[j]);
+ }
+ printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
+ }
+ }
+ if(class >= 0){
+ int width = im.h * .006;
+
+ /*
+ if(0){
+ width = pow(prob, 1./2.)*10+1;
+ alphabet = 0;
+ }
+ */
+
+ //printf("%d %s: %.0f%%\n", i, names[class], prob*100);
+ int offset = class*123457 % classes;
+ float red = get_color(2,offset,classes);
+ float green = get_color(1,offset,classes);
+ float blue = get_color(0,offset,classes);
+ float rgb[3];
+
+ //width = prob*20+2;
+
+ rgb[0] = red;
+ rgb[1] = green;
+ rgb[2] = blue;
+ box b = dets[i].bbox;
+ //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
+
+ int left = (b.x-b.w/2.)*im.w;
+ int right = (b.x+b.w/2.)*im.w;
+ int top = (b.y-b.h/2.)*im.h;
+ int bot = (b.y+b.h/2.)*im.h;
+
+ if(left < 0) left = 0;
+ if(right > im.w-1) right = im.w-1;
+ if(top < 0) top = 0;
+ if(bot > im.h-1) bot = im.h-1;
+
+ draw_box_width(im, left, top, right, bot, width, red, green, blue);
+ if (alphabet) {
+ image label = get_label(alphabet, labelstr, (im.h*.03));
+ draw_label(im, top + width, left, label, rgb);
+ free_image(label);
+ }
+ if (dets[i].mask){
+ image mask = float_to_image(14, 14, 1, dets[i].mask);
+ image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
+ image tmask = threshold_image(resized_mask, .5);
+ embed_image(tmask, im, left, top);
+ free_image(mask);
+ free_image(resized_mask);
+ free_image(tmask);
+ }
+ }
+ }
+}
+
+void transpose_image(image im)
+{
+ assert(im.w == im.h);
+ int n, m;
+ int c;
+ for(c = 0; c < im.c; ++c){
+ for(n = 0; n < im.w-1; ++n){
+ for(m = n + 1; m < im.w; ++m){
+ float swap = im.data[m + im.w*(n + im.h*c)];
+ im.data[m + im.w*(n + im.h*c)] = im.data[n + im.w*(m + im.h*c)];
+ im.data[n + im.w*(m + im.h*c)] = swap;
+ }
+ }
+ }
+}
+
+void rotate_image_cw(image im, int times)
+{
+ assert(im.w == im.h);
+ times = (times + 400) % 4;
+ int i, x, y, c;
+ int n = im.w;
+ for(i = 0; i < times; ++i){
+ for(c = 0; c < im.c; ++c){
+ for(x = 0; x < n/2; ++x){
+ for(y = 0; y < (n-1)/2 + 1; ++y){
+ float temp = im.data[y + im.w*(x + im.h*c)];
+ im.data[y + im.w*(x + im.h*c)] = im.data[n-1-x + im.w*(y + im.h*c)];
+ im.data[n-1-x + im.w*(y + im.h*c)] = im.data[n-1-y + im.w*(n-1-x + im.h*c)];
+ im.data[n-1-y + im.w*(n-1-x + im.h*c)] = im.data[x + im.w*(n-1-y + im.h*c)];
+ im.data[x + im.w*(n-1-y + im.h*c)] = temp;
+ }
+ }
+ }
+ }
+}
+
+void flip_image(image a)
+{
+ int i,j,k;
+ for(k = 0; k < a.c; ++k){
+ for(i = 0; i < a.h; ++i){
+ for(j = 0; j < a.w/2; ++j){
+ int index = j + a.w*(i + a.h*(k));
+ int flip = (a.w - j - 1) + a.w*(i + a.h*(k));
+ float swap = a.data[flip];
+ a.data[flip] = a.data[index];
+ a.data[index] = swap;
+ }
+ }
+ }
+}
+
+image image_distance(image a, image b)
+{
+ int i,j;
+ image dist = make_image(a.w, a.h, 1);
+ for(i = 0; i < a.c; ++i){
+ for(j = 0; j < a.h*a.w; ++j){
+ dist.data[j] += pow(a.data[i*a.h*a.w+j]-b.data[i*a.h*a.w+j],2);
+ }
+ }
+ for(j = 0; j < a.h*a.w; ++j){
+ dist.data[j] = sqrt(dist.data[j]);
+ }
+ return dist;
+}
+
+void ghost_image(image source, image dest, int dx, int dy)
+{
+ int x,y,k;
+ float max_dist = sqrt((-source.w/2. + .5)*(-source.w/2. + .5));
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float dist = sqrt((x - source.w/2. + .5)*(x - source.w/2. + .5) + (y - source.h/2. + .5)*(y - source.h/2. + .5));
+ float alpha = (1 - dist/max_dist);
+ if(alpha < 0) alpha = 0;
+ float v1 = get_pixel(source, x,y,k);
+ float v2 = get_pixel(dest, dx+x,dy+y,k);
+ float val = alpha*v1 + (1-alpha)*v2;
+ set_pixel(dest, dx+x, dy+y, k, val);
+ }
+ }
+ }
+}
+
+void blocky_image(image im, int s)
+{
+ int i,j,k;
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
+ }
+ }
+ }
+}
+
+void censor_image(image im, int dx, int dy, int w, int h)
+{
+ int i,j,k;
+ int s = 32;
+ if(dx < 0) dx = 0;
+ if(dy < 0) dy = 0;
+
+ for(k = 0; k < im.c; ++k){
+ for(j = dy; j < dy + h && j < im.h; ++j){
+ for(i = dx; i < dx + w && i < im.w; ++i){
+ im.data[i + im.w*(j + im.h*k)] = im.data[i/s*s + im.w*(j/s*s + im.h*k)];
+ //im.data[i + j*im.w + k*im.w*im.h] = 0;
+ }
+ }
+ }
+}
+
+void embed_image(image source, image dest, int dx, int dy)
+{
+ int x,y,k;
+ for(k = 0; k < source.c; ++k){
+ for(y = 0; y < source.h; ++y){
+ for(x = 0; x < source.w; ++x){
+ float val = get_pixel(source, x,y,k);
+ set_pixel(dest, dx+x, dy+y, k, val);
+ }
+ }
+ }
+}
+
+image collapse_image_layers(image source, int border)
+{
+ int h = source.h;
+ h = (h+border)*source.c - border;
+ image dest = make_image(source.w, h, 1);
+ int i;
+ for(i = 0; i < source.c; ++i){
+ image layer = get_image_layer(source, i);
+ int h_offset = i*(source.h+border);
+ embed_image(layer, dest, 0, h_offset);
+ free_image(layer);
+ }
+ return dest;
+}
+
+void constrain_image(image im)
+{
+ int i;
+ for(i = 0; i < im.w*im.h*im.c; ++i){
+ if(im.data[i] < 0) im.data[i] = 0;
+ if(im.data[i] > 1) im.data[i] = 1;
+ }
+}
+
+void normalize_image(image p)
+{
+ int i;
+ float min = 9999999;
+ float max = -999999;
+
+ for(i = 0; i < p.h*p.w*p.c; ++i){
+ float v = p.data[i];
+ if(v < min) min = v;
+ if(v > max) max = v;
+ }
+ if(max - min < .000000001){
+ min = 0;
+ max = 1;
+ }
+ for(i = 0; i < p.c*p.w*p.h; ++i){
+ p.data[i] = (p.data[i] - min)/(max-min);
+ }
+}
+
+void normalize_image2(image p)
+{
+ float *min = calloc(p.c, sizeof(float));
+ float *max = calloc(p.c, sizeof(float));
+ int i,j;
+ for(i = 0; i < p.c; ++i) min[i] = max[i] = p.data[i*p.h*p.w];
+
+ for(j = 0; j < p.c; ++j){
+ for(i = 0; i < p.h*p.w; ++i){
+ float v = p.data[i+j*p.h*p.w];
+ if(v < min[j]) min[j] = v;
+ if(v > max[j]) max[j] = v;
+ }
+ }
+ for(i = 0; i < p.c; ++i){
+ if(max[i] - min[i] < .000000001){
+ min[i] = 0;
+ max[i] = 1;
+ }
+ }
+ for(j = 0; j < p.c; ++j){
+ for(i = 0; i < p.w*p.h; ++i){
+ p.data[i+j*p.h*p.w] = (p.data[i+j*p.h*p.w] - min[j])/(max[j]-min[j]);
+ }
+ }
+ free(min);
+ free(max);
+}
+
+void copy_image_into(image src, image dest)
+{
+ memcpy(dest.data, src.data, src.h*src.w*src.c*sizeof(float));
+}
+
+image copy_image(image p)
+{
+ image copy = p;
+ copy.data = calloc(p.h*p.w*p.c, sizeof(float));
+ memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
+ return copy;
+}
+
+void rgbgr_image(image im)
+{
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ float swap = im.data[i];
+ im.data[i] = im.data[i+im.w*im.h*2];
+ im.data[i+im.w*im.h*2] = swap;
+ }
+}
+
+#ifdef OPENCV
+void show_image_cv(image p, const char *name, IplImage *disp)
+{
+ int x,y,k;
+ if(p.c == 3) rgbgr_image(p);
+ //normalize_image(copy);
+
+ char buff[256];
+ //sprintf(buff, "%s (%d)", name, windows);
+ sprintf(buff, "%s", name);
+
+ int step = disp->widthStep;
+ cvNamedWindow(buff, CV_WINDOW_NORMAL);
+ //cvMoveWindow(buff, 100*(windows%10) + 200*(windows/10), 100*(windows%10));
+ ++windows;
+ for(y = 0; y < p.h; ++y){
+ for(x = 0; x < p.w; ++x){
+ for(k= 0; k < p.c; ++k){
+ disp->imageData[y*step + x*p.c + k] = (unsigned char)(get_pixel(p,x,y,k)*255);
+ }
+ }
+ }
+ if(0){
+ int w = 448;
+ int h = w*p.h/p.w;
+ if(h > 1000){
+ h = 1000;
+ w = h*p.w/p.h;
+ }
+ IplImage *buffer = disp;
+ disp = cvCreateImage(cvSize(w, h), buffer->depth, buffer->nChannels);
+ cvResize(buffer, disp, CV_INTER_LINEAR);
+ cvReleaseImage(&buffer);
+ }
+ cvShowImage(buff, disp);
+}
+#endif
+
+void show_image(image p, const char *name)
+{
+#ifdef OPENCV
+ IplImage *disp = cvCreateImage(cvSize(p.w,p.h), IPL_DEPTH_8U, p.c);
+ image copy = copy_image(p);
+ constrain_image(copy);
+ show_image_cv(copy, name, disp);
+ free_image(copy);
+ cvReleaseImage(&disp);
+#else
+ fprintf(stderr, "Not compiled with OpenCV, saving to %s.png instead\n", name);
+ save_image(p, name);
+#endif
+}
+
+#ifdef OPENCV
+
+void ipl_into_image(IplImage* src, image im)
+{
+ unsigned char *data = (unsigned char *)src->imageData;
+ int h = src->height;
+ int w = src->width;
+ int c = src->nChannels;
+ int step = src->widthStep;
+ int i, j, k;
+
+ for(i = 0; i < h; ++i){
+ for(k= 0; k < c; ++k){
+ for(j = 0; j < w; ++j){
+ im.data[k*w*h + i*w + j] = data[i*step + j*c + k]/255.;
+ }
+ }
+ }
+}
+
+image ipl_to_image(IplImage* src)
+{
+ int h = src->height;
+ int w = src->width;
+ int c = src->nChannels;
+ image out = make_image(w, h, c);
+ ipl_into_image(src, out);
+ return out;
+}
+
+image load_image_cv(char *filename, int channels)
+{
+ IplImage* src = 0;
+ int flag = -1;
+ if (channels == 0) flag = -1;
+ else if (channels == 1) flag = 0;
+ else if (channels == 3) flag = 1;
+ else {
+ fprintf(stderr, "OpenCV can't force load with %d channels\n", channels);
+ }
+
+ if( (src = cvLoadImage(filename, flag)) == 0 )
+ {
+ fprintf(stderr, "Cannot load image \"%s\"\n", filename);
+ char buff[256];
+ sprintf(buff, "echo %s >> bad.list", filename);
+ system(buff);
+ return make_image(10,10,3);
+ //exit(0);
+ }
+ image out = ipl_to_image(src);
+ cvReleaseImage(&src);
+ rgbgr_image(out);
+ return out;
+}
+
+void flush_stream_buffer(CvCapture *cap, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i) {
+ cvQueryFrame(cap);
+ }
+}
+
+image get_image_from_stream(CvCapture *cap)
+{
+ IplImage* src = cvQueryFrame(cap);
+ if (!src) return make_empty_image(0,0,0);
+ image im = ipl_to_image(src);
+ rgbgr_image(im);
+ return im;
+}
+
+int fill_image_from_stream(CvCapture *cap, image im)
+{
+ IplImage* src = cvQueryFrame(cap);
+ if (!src) return 0;
+ ipl_into_image(src, im);
+ rgbgr_image(im);
+ return 1;
+}
+
+void save_image_jpg(image p, const char *name)
+{
+ image copy = copy_image(p);
+ if(p.c == 3) rgbgr_image(copy);
+ int x,y,k;
+
+ char buff[256];
+ sprintf(buff, "%s.jpg", name);
+
+ IplImage *disp = cvCreateImage(cvSize(p.w,p.h), IPL_DEPTH_8U, p.c);
+ int step = disp->widthStep;
+ for(y = 0; y < p.h; ++y){
+ for(x = 0; x < p.w; ++x){
+ for(k= 0; k < p.c; ++k){
+ disp->imageData[y*step + x*p.c + k] = (unsigned char)(get_pixel(copy,x,y,k)*255);
+ }
+ }
+ }
+ cvSaveImage(buff, disp,0);
+ cvReleaseImage(&disp);
+ free_image(copy);
+}
+#endif
+
+void save_image_png(image im, const char *name)
+{
+ char buff[256];
+ //sprintf(buff, "%s (%d)", name, windows);
+ sprintf(buff, "%s.png", name);
+ unsigned char *data = calloc(im.w*im.h*im.c, sizeof(char));
+ int i,k;
+ for(k = 0; k < im.c; ++k){
+ for(i = 0; i < im.w*im.h; ++i){
+ data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
+ }
+ }
+ int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
+ free(data);
+ if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
+}
+
+void save_image(image im, const char *name)
+{
+#ifdef OPENCV
+ save_image_jpg(im, name);
+#else
+ save_image_png(im, name);
+#endif
+}
+
+
+void show_image_layers(image p, char *name)
+{
+ int i;
+ char buff[256];
+ for(i = 0; i < p.c; ++i){
+ sprintf(buff, "%s - Layer %d", name, i);
+ image layer = get_image_layer(p, i);
+ show_image(layer, buff);
+ free_image(layer);
+ }
+}
+
+void show_image_collapsed(image p, char *name)
+{
+ image c = collapse_image_layers(p, 1);
+ show_image(c, name);
+ free_image(c);
+}
+
+image make_empty_image(int w, int h, int c)
+{
+ image out;
+ out.data = 0;
+ out.h = h;
+ out.w = w;
+ out.c = c;
+ return out;
+}
+
+image make_image(int w, int h, int c)
+{
+ image out = make_empty_image(w,h,c);
+ out.data = calloc(h*w*c, sizeof(float));
+ return out;
+}
+
+image make_random_image(int w, int h, int c)
+{
+ image out = make_empty_image(w,h,c);
+ out.data = calloc(h*w*c, sizeof(float));
+ int i;
+ for(i = 0; i < w*h*c; ++i){
+ out.data[i] = (rand_normal() * .25) + .5;
+ }
+ return out;
+}
+
+image float_to_image(int w, int h, int c, float *data)
+{
+ image out = make_empty_image(w,h,c);
+ out.data = data;
+ return out;
+}
+
+void place_image(image im, int w, int h, int dx, int dy, image canvas)
+{
+ int x, y, c;
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < h; ++y){
+ for(x = 0; x < w; ++x){
+ float rx = ((float)x / w) * im.w;
+ float ry = ((float)y / h) * im.h;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(canvas, x + dx, y + dy, c, val);
+ }
+ }
+ }
+}
+
+image center_crop_image(image im, int w, int h)
+{
+ int m = (im.w < im.h) ? im.w : im.h;
+ image c = crop_image(im, (im.w - m) / 2, (im.h - m)/2, m, m);
+ image r = resize_image(c, w, h);
+ free_image(c);
+ return r;
+}
+
+image rotate_crop_image(image im, float rad, float s, int w, int h, float dx, float dy, float aspect)
+{
+ int x, y, c;
+ float cx = im.w/2.;
+ float cy = im.h/2.;
+ image rot = make_image(w, h, im.c);
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < h; ++y){
+ for(x = 0; x < w; ++x){
+ float rx = cos(rad)*((x - w/2.)/s*aspect + dx/s*aspect) - sin(rad)*((y - h/2.)/s + dy/s) + cx;
+ float ry = sin(rad)*((x - w/2.)/s*aspect + dx/s*aspect) + cos(rad)*((y - h/2.)/s + dy/s) + cy;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(rot, x, y, c, val);
+ }
+ }
+ }
+ return rot;
+}
+
+image rotate_image(image im, float rad)
+{
+ int x, y, c;
+ float cx = im.w/2.;
+ float cy = im.h/2.;
+ image rot = make_image(im.w, im.h, im.c);
+ for(c = 0; c < im.c; ++c){
+ for(y = 0; y < im.h; ++y){
+ for(x = 0; x < im.w; ++x){
+ float rx = cos(rad)*(x-cx) - sin(rad)*(y-cy) + cx;
+ float ry = sin(rad)*(x-cx) + cos(rad)*(y-cy) + cy;
+ float val = bilinear_interpolate(im, rx, ry, c);
+ set_pixel(rot, x, y, c, val);
+ }
+ }
+ }
+ return rot;
+}
+
+void fill_image(image m, float s)
+{
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
+}
+
+void translate_image(image m, float s)
+{
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] += s;
+}
+
+void scale_image(image m, float s)
+{
+ int i;
+ for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] *= s;
+}
+
+image crop_image(image im, int dx, int dy, int w, int h)
+{
+ image cropped = make_image(w, h, im.c);
+ int i, j, k;
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < h; ++j){
+ for(i = 0; i < w; ++i){
+ int r = j + dy;
+ int c = i + dx;
+ float val = 0;
+ r = constrain_int(r, 0, im.h-1);
+ c = constrain_int(c, 0, im.w-1);
+ val = get_pixel(im, c, r, k);
+ set_pixel(cropped, i, j, k, val);
+ }
+ }
+ }
+ return cropped;
+}
+
+int best_3d_shift_r(image a, image b, int min, int max)
+{
+ if(min == max) return min;
+ int mid = floor((min + max) / 2.);
+ image c1 = crop_image(b, 0, mid, b.w, b.h);
+ image c2 = crop_image(b, 0, mid+1, b.w, b.h);
+ float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 10);
+ float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 10);
+ free_image(c1);
+ free_image(c2);
+ if(d1 < d2) return best_3d_shift_r(a, b, min, mid);
+ else return best_3d_shift_r(a, b, mid+1, max);
+}
+
+int best_3d_shift(image a, image b, int min, int max)
+{
+ int i;
+ int best = 0;
+ float best_distance = FLT_MAX;
+ for(i = min; i <= max; i += 2){
+ image c = crop_image(b, 0, i, b.w, b.h);
+ float d = dist_array(c.data, a.data, a.w*a.h*a.c, 100);
+ if(d < best_distance){
+ best_distance = d;
+ best = i;
+ }
+ printf("%d %f\n", i, d);
+ free_image(c);
+ }
+ return best;
+}
+
+void composite_3d(char *f1, char *f2, char *out, int delta)
+{
+ if(!out) out = "out";
+ image a = load_image(f1, 0,0,0);
+ image b = load_image(f2, 0,0,0);
+ int shift = best_3d_shift_r(a, b, -a.h/100, a.h/100);
+
+ image c1 = crop_image(b, 10, shift, b.w, b.h);
+ float d1 = dist_array(c1.data, a.data, a.w*a.h*a.c, 100);
+ image c2 = crop_image(b, -10, shift, b.w, b.h);
+ float d2 = dist_array(c2.data, a.data, a.w*a.h*a.c, 100);
+
+ if(d2 < d1 && 0){
+ image swap = a;
+ a = b;
+ b = swap;
+ shift = -shift;
+ printf("swapped, %d\n", shift);
+ }
+ else{
+ printf("%d\n", shift);
+ }
+
+ image c = crop_image(b, delta, shift, a.w, a.h);
+ int i;
+ for(i = 0; i < c.w*c.h; ++i){
+ c.data[i] = a.data[i];
+ }
+#ifdef OPENCV
+ save_image_jpg(c, out);
+#else
+ save_image(c, out);
+#endif
+}
+
+void letterbox_image_into(image im, int w, int h, image boxed)
+{
+ int new_w = im.w;
+ int new_h = im.h;
+ if (((float)w/im.w) < ((float)h/im.h)) {
+ new_w = w;
+ new_h = (im.h * w)/im.w;
+ } else {
+ new_h = h;
+ new_w = (im.w * h)/im.h;
+ }
+ image resized = resize_image(im, new_w, new_h);
+ embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
+ free_image(resized);
+}
+
+image letterbox_image(image im, int w, int h)
+{
+ int new_w = im.w;
+ int new_h = im.h;
+ if (((float)w/im.w) < ((float)h/im.h)) {
+ new_w = w;
+ new_h = (im.h * w)/im.w;
+ } else {
+ new_h = h;
+ new_w = (im.w * h)/im.h;
+ }
+ image resized = resize_image(im, new_w, new_h);
+ image boxed = make_image(w, h, im.c);
+ fill_image(boxed, .5);
+ //int i;
+ //for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
+ embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
+ free_image(resized);
+ return boxed;
+}
+
+image resize_max(image im, int max)
+{
+ int w = im.w;
+ int h = im.h;
+ if(w > h){
+ h = (h * max) / w;
+ w = max;
+ } else {
+ w = (w * max) / h;
+ h = max;
+ }
+ if(w == im.w && h == im.h) return im;
+ image resized = resize_image(im, w, h);
+ return resized;
+}
+
+image resize_min(image im, int min)
+{
+ int w = im.w;
+ int h = im.h;
+ if(w < h){
+ h = (h * min) / w;
+ w = min;
+ } else {
+ w = (w * min) / h;
+ h = min;
+ }
+ if(w == im.w && h == im.h) return im;
+ image resized = resize_image(im, w, h);
+ return resized;
+}
+
+image random_crop_image(image im, int w, int h)
+{
+ int dx = rand_int(0, im.w - w);
+ int dy = rand_int(0, im.h - h);
+ image crop = crop_image(im, dx, dy, w, h);
+ return crop;
+}
+
+augment_args random_augment_args(image im, float angle, float aspect, int low, int high, int w, int h)
+{
+ augment_args a = {0};
+ aspect = rand_scale(aspect);
+ int r = rand_int(low, high);
+ int min = (im.h < im.w*aspect) ? im.h : im.w*aspect;
+ float scale = (float)r / min;
+
+ float rad = rand_uniform(-angle, angle) * TWO_PI / 360.;
+
+ float dx = (im.w*scale/aspect - w) / 2.;
+ float dy = (im.h*scale - w) / 2.;
+ //if(dx < 0) dx = 0;
+ //if(dy < 0) dy = 0;
+ dx = rand_uniform(-dx, dx);
+ dy = rand_uniform(-dy, dy);
+
+ a.rad = rad;
+ a.scale = scale;
+ a.w = w;
+ a.h = h;
+ a.dx = dx;
+ a.dy = dy;
+ a.aspect = aspect;
+ return a;
+}
+
+image random_augment_image(image im, float angle, float aspect, int low, int high, int w, int h)
+{
+ augment_args a = random_augment_args(im, angle, aspect, low, high, w, h);
+ image crop = rotate_crop_image(im, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
+ return crop;
+}
+
+float three_way_max(float a, float b, float c)
+{
+ return (a > b) ? ( (a > c) ? a : c) : ( (b > c) ? b : c) ;
+}
+
+float three_way_min(float a, float b, float c)
+{
+ return (a < b) ? ( (a < c) ? a : c) : ( (b < c) ? b : c) ;
+}
+
+void yuv_to_rgb(image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float y, u, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ y = get_pixel(im, i , j, 0);
+ u = get_pixel(im, i , j, 1);
+ v = get_pixel(im, i , j, 2);
+
+ r = y + 1.13983*v;
+ g = y + -.39465*u + -.58060*v;
+ b = y + 2.03211*u;
+
+ set_pixel(im, i, j, 0, r);
+ set_pixel(im, i, j, 1, g);
+ set_pixel(im, i, j, 2, b);
+ }
+ }
+}
+
+void rgb_to_yuv(image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float y, u, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ r = get_pixel(im, i , j, 0);
+ g = get_pixel(im, i , j, 1);
+ b = get_pixel(im, i , j, 2);
+
+ y = .299*r + .587*g + .114*b;
+ u = -.14713*r + -.28886*g + .436*b;
+ v = .615*r + -.51499*g + -.10001*b;
+
+ set_pixel(im, i, j, 0, y);
+ set_pixel(im, i, j, 1, u);
+ set_pixel(im, i, j, 2, v);
+ }
+ }
+}
+
+// http://www.cs.rit.edu/~ncs/color/t_convert.html
+void rgb_to_hsv(image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float h, s, v;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ r = get_pixel(im, i , j, 0);
+ g = get_pixel(im, i , j, 1);
+ b = get_pixel(im, i , j, 2);
+ float max = three_way_max(r,g,b);
+ float min = three_way_min(r,g,b);
+ float delta = max - min;
+ v = max;
+ if(max == 0){
+ s = 0;
+ h = 0;
+ }else{
+ s = delta/max;
+ if(r == max){
+ h = (g - b) / delta;
+ } else if (g == max) {
+ h = 2 + (b - r) / delta;
+ } else {
+ h = 4 + (r - g) / delta;
+ }
+ if (h < 0) h += 6;
+ h = h/6.;
+ }
+ set_pixel(im, i, j, 0, h);
+ set_pixel(im, i, j, 1, s);
+ set_pixel(im, i, j, 2, v);
+ }
+ }
+}
+
+void hsv_to_rgb(image im)
+{
+ assert(im.c == 3);
+ int i, j;
+ float r, g, b;
+ float h, s, v;
+ float f, p, q, t;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ h = 6 * get_pixel(im, i , j, 0);
+ s = get_pixel(im, i , j, 1);
+ v = get_pixel(im, i , j, 2);
+ if (s == 0) {
+ r = g = b = v;
+ } else {
+ int index = floor(h);
+ f = h - index;
+ p = v*(1-s);
+ q = v*(1-s*f);
+ t = v*(1-s*(1-f));
+ if(index == 0){
+ r = v; g = t; b = p;
+ } else if(index == 1){
+ r = q; g = v; b = p;
+ } else if(index == 2){
+ r = p; g = v; b = t;
+ } else if(index == 3){
+ r = p; g = q; b = v;
+ } else if(index == 4){
+ r = t; g = p; b = v;
+ } else {
+ r = v; g = p; b = q;
+ }
+ }
+ set_pixel(im, i, j, 0, r);
+ set_pixel(im, i, j, 1, g);
+ set_pixel(im, i, j, 2, b);
+ }
+ }
+}
+
+void grayscale_image_3c(image im)
+{
+ assert(im.c == 3);
+ int i, j, k;
+ float scale[] = {0.299, 0.587, 0.114};
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float val = 0;
+ for(k = 0; k < 3; ++k){
+ val += scale[k]*get_pixel(im, i, j, k);
+ }
+ im.data[0*im.h*im.w + im.w*j + i] = val;
+ im.data[1*im.h*im.w + im.w*j + i] = val;
+ im.data[2*im.h*im.w + im.w*j + i] = val;
+ }
+ }
+}
+
+image grayscale_image(image im)
+{
+ assert(im.c == 3);
+ int i, j, k;
+ image gray = make_image(im.w, im.h, 1);
+ float scale[] = {0.299, 0.587, 0.114};
+ for(k = 0; k < im.c; ++k){
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ gray.data[i+im.w*j] += scale[k]*get_pixel(im, i, j, k);
+ }
+ }
+ }
+ return gray;
+}
+
+image threshold_image(image im, float thresh)
+{
+ int i;
+ image t = make_image(im.w, im.h, im.c);
+ for(i = 0; i < im.w*im.h*im.c; ++i){
+ t.data[i] = im.data[i]>thresh ? 1 : 0;
+ }
+ return t;
+}
+
+image blend_image(image fore, image back, float alpha)
+{
+ assert(fore.w == back.w && fore.h == back.h && fore.c == back.c);
+ image blend = make_image(fore.w, fore.h, fore.c);
+ int i, j, k;
+ for(k = 0; k < fore.c; ++k){
+ for(j = 0; j < fore.h; ++j){
+ for(i = 0; i < fore.w; ++i){
+ float val = alpha * get_pixel(fore, i, j, k) +
+ (1 - alpha)* get_pixel(back, i, j, k);
+ set_pixel(blend, i, j, k, val);
+ }
+ }
+ }
+ return blend;
+}
+
+void scale_image_channel(image im, int c, float v)
+{
+ int i, j;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float pix = get_pixel(im, i, j, c);
+ pix = pix*v;
+ set_pixel(im, i, j, c, pix);
+ }
+ }
+}
+
+void translate_image_channel(image im, int c, float v)
+{
+ int i, j;
+ for(j = 0; j < im.h; ++j){
+ for(i = 0; i < im.w; ++i){
+ float pix = get_pixel(im, i, j, c);
+ pix = pix+v;
+ set_pixel(im, i, j, c, pix);
+ }
+ }
+}
+
+image binarize_image(image im)
+{
+ image c = copy_image(im);
+ int i;
+ for(i = 0; i < im.w * im.h * im.c; ++i){
+ if(c.data[i] > .5) c.data[i] = 1;
+ else c.data[i] = 0;
+ }
+ return c;
+}
+
+void saturate_image(image im, float sat)
+{
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+void hue_image(image im, float hue)
+{
+ rgb_to_hsv(im);
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i] = im.data[i] + hue;
+ if (im.data[i] > 1) im.data[i] -= 1;
+ if (im.data[i] < 0) im.data[i] += 1;
+ }
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+void exposure_image(image im, float sat)
+{
+ rgb_to_hsv(im);
+ scale_image_channel(im, 2, sat);
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+void distort_image(image im, float hue, float sat, float val)
+{
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ scale_image_channel(im, 2, val);
+ int i;
+ for(i = 0; i < im.w*im.h; ++i){
+ im.data[i] = im.data[i] + hue;
+ if (im.data[i] > 1) im.data[i] -= 1;
+ if (im.data[i] < 0) im.data[i] += 1;
+ }
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+void random_distort_image(image im, float hue, float saturation, float exposure)
+{
+ float dhue = rand_uniform(-hue, hue);
+ float dsat = rand_scale(saturation);
+ float dexp = rand_scale(exposure);
+ distort_image(im, dhue, dsat, dexp);
+}
+
+void saturate_exposure_image(image im, float sat, float exposure)
+{
+ rgb_to_hsv(im);
+ scale_image_channel(im, 1, sat);
+ scale_image_channel(im, 2, exposure);
+ hsv_to_rgb(im);
+ constrain_image(im);
+}
+
+image resize_image(image im, int w, int h)
+{
+ image resized = make_image(w, h, im.c);
+ image part = make_image(w, im.h, im.c);
+ int r, c, k;
+ float w_scale = (float)(im.w - 1) / (w - 1);
+ float h_scale = (float)(im.h - 1) / (h - 1);
+ for(k = 0; k < im.c; ++k){
+ for(r = 0; r < im.h; ++r){
+ for(c = 0; c < w; ++c){
+ float val = 0;
+ if(c == w-1 || im.w == 1){
+ val = get_pixel(im, im.w-1, r, k);
+ } else {
+ float sx = c*w_scale;
+ int ix = (int) sx;
+ float dx = sx - ix;
+ val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
+ }
+ set_pixel(part, c, r, k, val);
+ }
+ }
+ }
+ for(k = 0; k < im.c; ++k){
+ for(r = 0; r < h; ++r){
+ float sy = r*h_scale;
+ int iy = (int) sy;
+ float dy = sy - iy;
+ for(c = 0; c < w; ++c){
+ float val = (1-dy) * get_pixel(part, c, iy, k);
+ set_pixel(resized, c, r, k, val);
+ }
+ if(r == h-1 || im.h == 1) continue;
+ for(c = 0; c < w; ++c){
+ float val = dy * get_pixel(part, c, iy+1, k);
+ add_pixel(resized, c, r, k, val);
+ }
+ }
+ }
+
+ free_image(part);
+ return resized;
+}
+
+
+void test_resize(char *filename)
+{
+ image im = load_image(filename, 0,0, 3);
+ float mag = mag_array(im.data, im.w*im.h*im.c);
+ printf("L2 Norm: %f\n", mag);
+ image gray = grayscale_image(im);
+
+ image c1 = copy_image(im);
+ image c2 = copy_image(im);
+ image c3 = copy_image(im);
+ image c4 = copy_image(im);
+ distort_image(c1, .1, 1.5, 1.5);
+ distort_image(c2, -.1, .66666, .66666);
+ distort_image(c3, .1, 1.5, .66666);
+ distort_image(c4, .1, .66666, 1.5);
+
+
+ show_image(im, "Original");
+ show_image(gray, "Gray");
+ show_image(c1, "C1");
+ show_image(c2, "C2");
+ show_image(c3, "C3");
+ show_image(c4, "C4");
+#ifdef OPENCV
+ while(1){
+ image aug = random_augment_image(im, 0, .75, 320, 448, 320, 320);
+ show_image(aug, "aug");
+ free_image(aug);
+
+
+ float exposure = 1.15;
+ float saturation = 1.15;
+ float hue = .05;
+
+ image c = copy_image(im);
+
+ float dexp = rand_scale(exposure);
+ float dsat = rand_scale(saturation);
+ float dhue = rand_uniform(-hue, hue);
+
+ distort_image(c, dhue, dsat, dexp);
+ show_image(c, "rand");
+ printf("%f %f %f\n", dhue, dsat, dexp);
+ free_image(c);
+ cvWaitKey(0);
+ }
+#endif
+}
+
+
+image load_image_stb(char *filename, int channels)
+{
+ int w, h, c;
+ unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
+ if (!data) {
+ fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
+ exit(0);
+ }
+ if(channels) c = channels;
+ int i,j,k;
+ image im = make_image(w, h, c);
+ for(k = 0; k < c; ++k){
+ for(j = 0; j < h; ++j){
+ for(i = 0; i < w; ++i){
+ int dst_index = i + w*j + w*h*k;
+ int src_index = k + c*i + c*w*j;
+ im.data[dst_index] = (float)data[src_index]/255.;
+ }
+ }
+ }
+ free(data);
+ return im;
+}
+
+image load_image(char *filename, int w, int h, int c)
+{
+#ifdef OPENCV
+ image out = load_image_cv(filename, c);
+#else
+ image out = load_image_stb(filename, c);
+#endif
+
+ if((h && w) && (h != out.h || w != out.w)){
+ image resized = resize_image(out, w, h);
+ free_image(out);
+ out = resized;
+ }
+ return out;
+}
+
+image load_image_color(char *filename, int w, int h)
+{
+ return load_image(filename, w, h, 3);
+}
+
+image get_image_layer(image m, int l)
+{
+ image out = make_image(m.w, m.h, 1);
+ int i;
+ for(i = 0; i < m.h*m.w; ++i){
+ out.data[i] = m.data[i+l*m.h*m.w];
+ }
+ return out;
+}
+void print_image(image m)
+{
+ int i, j, k;
+ for(i =0 ; i < m.c; ++i){
+ for(j =0 ; j < m.h; ++j){
+ for(k = 0; k < m.w; ++k){
+ printf("%.2lf, ", m.data[i*m.h*m.w + j*m.w + k]);
+ if(k > 30) break;
+ }
+ printf("\n");
+ if(j > 30) break;
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+image collapse_images_vert(image *ims, int n)
+{
+ int color = 1;
+ int border = 1;
+ int h,w,c;
+ w = ims[0].w;
+ h = (ims[0].h + border) * n - border;
+ c = ims[0].c;
+ if(c != 3 || !color){
+ w = (w+border)*c - border;
+ c = 1;
+ }
+
+ image filters = make_image(w, h, c);
+ int i,j;
+ for(i = 0; i < n; ++i){
+ int h_offset = i*(ims[0].h+border);
+ image copy = copy_image(ims[i]);
+ //normalize_image(copy);
+ if(c == 3 && color){
+ embed_image(copy, filters, 0, h_offset);
+ }
+ else{
+ for(j = 0; j < copy.c; ++j){
+ int w_offset = j*(ims[0].w+border);
+ image layer = get_image_layer(copy, j);
+ embed_image(layer, filters, w_offset, h_offset);
+ free_image(layer);
+ }
+ }
+ free_image(copy);
+ }
+ return filters;
+}
+
+image collapse_images_horz(image *ims, int n)
+{
+ int color = 1;
+ int border = 1;
+ int h,w,c;
+ int size = ims[0].h;
+ h = size;
+ w = (ims[0].w + border) * n - border;
+ c = ims[0].c;
+ if(c != 3 || !color){
+ h = (h+border)*c - border;
+ c = 1;
+ }
+
+ image filters = make_image(w, h, c);
+ int i,j;
+ for(i = 0; i < n; ++i){
+ int w_offset = i*(size+border);
+ image copy = copy_image(ims[i]);
+ //normalize_image(copy);
+ if(c == 3 && color){
+ embed_image(copy, filters, w_offset, 0);
+ }
+ else{
+ for(j = 0; j < copy.c; ++j){
+ int h_offset = j*(size+border);
+ image layer = get_image_layer(copy, j);
+ embed_image(layer, filters, w_offset, h_offset);
+ free_image(layer);
+ }
+ }
+ free_image(copy);
+ }
+ return filters;
+}
+
+void show_image_normalized(image im, const char *name)
+{
+ image c = copy_image(im);
+ normalize_image(c);
+ show_image(c, name);
+ free_image(c);
+}
+
+void show_images(image *ims, int n, char *window)
+{
+ image m = collapse_images_vert(ims, n);
+ /*
+ int w = 448;
+ int h = ((float)m.h/m.w) * 448;
+ if(h > 896){
+ h = 896;
+ w = ((float)m.w/m.h) * 896;
+ }
+ image sized = resize_image(m, w, h);
+ */
+ normalize_image(m);
+ save_image(m, window);
+ show_image(m, window);
+ free_image(m);
+}
+
+void free_image(image m)
+{
+ if(m.data){
+ free(m.data);
+ }
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.h
new file mode 100644
index 00000000000..789cf18da01
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/image.h
@@ -0,0 +1,64 @@
+#ifndef IMAGE_H
+#define IMAGE_H
+
+#include
+#include
+#include
+#include
+#include
+#include "box.h"
+#include "darknet.h"
+
+#ifndef __cplusplus
+#ifdef OPENCV
+int fill_image_from_stream(CvCapture *cap, image im);
+image ipl_to_image(IplImage* src);
+void ipl_into_image(IplImage* src, image im);
+void flush_stream_buffer(CvCapture *cap, int n);
+void show_image_cv(image p, const char *name, IplImage *disp);
+#endif
+#endif
+
+float get_color(int c, int x, int max);
+void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b);
+void draw_bbox(image a, box bbox, int w, float r, float g, float b);
+void write_label(image a, int r, int c, image *characters, char *string, float *rgb);
+image image_distance(image a, image b);
+void scale_image(image m, float s);
+image rotate_crop_image(image im, float rad, float s, int w, int h, float dx, float dy, float aspect);
+image random_crop_image(image im, int w, int h);
+image random_augment_image(image im, float angle, float aspect, int low, int high, int w, int h);
+augment_args random_augment_args(image im, float angle, float aspect, int low, int high, int w, int h);
+void letterbox_image_into(image im, int w, int h, image boxed);
+image resize_max(image im, int max);
+void translate_image(image m, float s);
+void embed_image(image source, image dest, int dx, int dy);
+void place_image(image im, int w, int h, int dx, int dy, image canvas);
+void saturate_image(image im, float sat);
+void exposure_image(image im, float sat);
+void distort_image(image im, float hue, float sat, float val);
+void saturate_exposure_image(image im, float sat, float exposure);
+void rgb_to_hsv(image im);
+void hsv_to_rgb(image im);
+void yuv_to_rgb(image im);
+void rgb_to_yuv(image im);
+
+
+image collapse_image_layers(image source, int border);
+image collapse_images_horz(image *ims, int n);
+image collapse_images_vert(image *ims, int n);
+
+void show_image_normalized(image im, const char *name);
+void show_images(image *ims, int n, char *window);
+void show_image_layers(image p, char *name);
+void show_image_collapsed(image p, char *name);
+
+void print_image(image m);
+
+image make_empty_image(int w, int h, int c);
+void copy_image_into(image src, image dest);
+
+image get_image_layer(image m, int l);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.c
new file mode 100644
index 00000000000..d099479b4c0
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.c
@@ -0,0 +1,63 @@
+#include "l2norm_layer.h"
+#include "activations.h"
+#include "blas.h"
+#include "cuda.h"
+
+#include
+#include
+#include
+#include
+#include
+
+layer make_l2norm_layer(int batch, int inputs)
+{
+ fprintf(stderr, "l2norm %4d\n", inputs);
+ layer l = {0};
+ l.type = L2NORM;
+ l.batch = batch;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.output = calloc(inputs*batch, sizeof(float));
+ l.scales = calloc(inputs*batch, sizeof(float));
+ l.delta = calloc(inputs*batch, sizeof(float));
+
+ l.forward = forward_l2norm_layer;
+ l.backward = backward_l2norm_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_l2norm_layer_gpu;
+ l.backward_gpu = backward_l2norm_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, inputs*batch);
+ l.scales_gpu = cuda_make_array(l.output, inputs*batch);
+ l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+ #endif
+ return l;
+}
+
+void forward_l2norm_layer(const layer l, network net)
+{
+ copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
+ l2normalize_cpu(l.output, l.scales, l.batch, l.out_c, l.out_w*l.out_h);
+}
+
+void backward_l2norm_layer(const layer l, network net)
+{
+ //axpy_cpu(l.inputs*l.batch, 1, l.scales, 1, l.delta, 1);
+ axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void forward_l2norm_layer_gpu(const layer l, network net)
+{
+ copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
+ l2normalize_gpu(l.output_gpu, l.scales_gpu, l.batch, l.out_c, l.out_w*l.out_h);
+}
+
+void backward_l2norm_layer_gpu(const layer l, network net)
+{
+ axpy_gpu(l.batch*l.inputs, 1, l.scales_gpu, 1, l.delta_gpu, 1);
+ axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.h
new file mode 100644
index 00000000000..1ca6f710f01
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/l2norm_layer.h
@@ -0,0 +1,15 @@
+#ifndef L2NORM_LAYER_H
+#define L2NORM_LAYER_H
+#include "layer.h"
+#include "network.h"
+
+layer make_l2norm_layer(int batch, int inputs);
+void forward_l2norm_layer(const layer l, network net);
+void backward_l2norm_layer(const layer l, network net);
+
+#ifdef GPU
+void forward_l2norm_layer_gpu(const layer l, network net);
+void backward_l2norm_layer_gpu(const layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.c
new file mode 100644
index 00000000000..c27b4776421
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.c
@@ -0,0 +1,97 @@
+#include "layer.h"
+#include "cuda.h"
+
+#include
+
+void free_layer(layer l)
+{
+ if(l.type == DROPOUT){
+ if(l.rand) free(l.rand);
+#ifdef GPU
+ if(l.rand_gpu) cuda_free(l.rand_gpu);
+#endif
+ return;
+ }
+ if(l.cweights) free(l.cweights);
+ if(l.indexes) free(l.indexes);
+ if(l.input_layers) free(l.input_layers);
+ if(l.input_sizes) free(l.input_sizes);
+ if(l.map) free(l.map);
+ if(l.rand) free(l.rand);
+ if(l.cost) free(l.cost);
+ if(l.state) free(l.state);
+ if(l.prev_state) free(l.prev_state);
+ if(l.forgot_state) free(l.forgot_state);
+ if(l.forgot_delta) free(l.forgot_delta);
+ if(l.state_delta) free(l.state_delta);
+ if(l.concat) free(l.concat);
+ if(l.concat_delta) free(l.concat_delta);
+ if(l.binary_weights) free(l.binary_weights);
+ if(l.biases) free(l.biases);
+ if(l.bias_updates) free(l.bias_updates);
+ if(l.scales) free(l.scales);
+ if(l.scale_updates) free(l.scale_updates);
+ if(l.weights) free(l.weights);
+ if(l.weight_updates) free(l.weight_updates);
+ if(l.delta) free(l.delta);
+ if(l.output) free(l.output);
+ if(l.squared) free(l.squared);
+ if(l.norms) free(l.norms);
+ if(l.spatial_mean) free(l.spatial_mean);
+ if(l.mean) free(l.mean);
+ if(l.variance) free(l.variance);
+ if(l.mean_delta) free(l.mean_delta);
+ if(l.variance_delta) free(l.variance_delta);
+ if(l.rolling_mean) free(l.rolling_mean);
+ if(l.rolling_variance) free(l.rolling_variance);
+ if(l.x) free(l.x);
+ if(l.x_norm) free(l.x_norm);
+ if(l.m) free(l.m);
+ if(l.v) free(l.v);
+ if(l.z_cpu) free(l.z_cpu);
+ if(l.r_cpu) free(l.r_cpu);
+ if(l.h_cpu) free(l.h_cpu);
+ if(l.binary_input) free(l.binary_input);
+
+#ifdef GPU
+ if(l.indexes_gpu) cuda_free((float *)l.indexes_gpu);
+
+ if(l.z_gpu) cuda_free(l.z_gpu);
+ if(l.r_gpu) cuda_free(l.r_gpu);
+ if(l.h_gpu) cuda_free(l.h_gpu);
+ if(l.m_gpu) cuda_free(l.m_gpu);
+ if(l.v_gpu) cuda_free(l.v_gpu);
+ if(l.prev_state_gpu) cuda_free(l.prev_state_gpu);
+ if(l.forgot_state_gpu) cuda_free(l.forgot_state_gpu);
+ if(l.forgot_delta_gpu) cuda_free(l.forgot_delta_gpu);
+ if(l.state_gpu) cuda_free(l.state_gpu);
+ if(l.state_delta_gpu) cuda_free(l.state_delta_gpu);
+ if(l.gate_gpu) cuda_free(l.gate_gpu);
+ if(l.gate_delta_gpu) cuda_free(l.gate_delta_gpu);
+ if(l.save_gpu) cuda_free(l.save_gpu);
+ if(l.save_delta_gpu) cuda_free(l.save_delta_gpu);
+ if(l.concat_gpu) cuda_free(l.concat_gpu);
+ if(l.concat_delta_gpu) cuda_free(l.concat_delta_gpu);
+ if(l.binary_input_gpu) cuda_free(l.binary_input_gpu);
+ if(l.binary_weights_gpu) cuda_free(l.binary_weights_gpu);
+ if(l.mean_gpu) cuda_free(l.mean_gpu);
+ if(l.variance_gpu) cuda_free(l.variance_gpu);
+ if(l.rolling_mean_gpu) cuda_free(l.rolling_mean_gpu);
+ if(l.rolling_variance_gpu) cuda_free(l.rolling_variance_gpu);
+ if(l.variance_delta_gpu) cuda_free(l.variance_delta_gpu);
+ if(l.mean_delta_gpu) cuda_free(l.mean_delta_gpu);
+ if(l.x_gpu) cuda_free(l.x_gpu);
+ if(l.x_norm_gpu) cuda_free(l.x_norm_gpu);
+ if(l.weights_gpu) cuda_free(l.weights_gpu);
+ if(l.weight_updates_gpu) cuda_free(l.weight_updates_gpu);
+ if(l.biases_gpu) cuda_free(l.biases_gpu);
+ if(l.bias_updates_gpu) cuda_free(l.bias_updates_gpu);
+ if(l.scales_gpu) cuda_free(l.scales_gpu);
+ if(l.scale_updates_gpu) cuda_free(l.scale_updates_gpu);
+ if(l.output_gpu) cuda_free(l.output_gpu);
+ if(l.delta_gpu) cuda_free(l.delta_gpu);
+ if(l.rand_gpu) cuda_free(l.rand_gpu);
+ if(l.squared_gpu) cuda_free(l.squared_gpu);
+ if(l.norms_gpu) cuda_free(l.norms_gpu);
+#endif
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.h
new file mode 100644
index 00000000000..af6cd2ab505
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/layer.h
@@ -0,0 +1 @@
+#include "darknet.h"
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.c
new file mode 100644
index 00000000000..0e4165d3780
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.c
@@ -0,0 +1,92 @@
+#include
+#include
+#include "list.h"
+
+list *make_list()
+{
+ list *l = malloc(sizeof(list));
+ l->size = 0;
+ l->front = 0;
+ l->back = 0;
+ return l;
+}
+
+/*
+void transfer_node(list *s, list *d, node *n)
+{
+ node *prev, *next;
+ prev = n->prev;
+ next = n->next;
+ if(prev) prev->next = next;
+ if(next) next->prev = prev;
+ --s->size;
+ if(s->front == n) s->front = next;
+ if(s->back == n) s->back = prev;
+}
+*/
+
+void *list_pop(list *l){
+ if(!l->back) return 0;
+ node *b = l->back;
+ void *val = b->val;
+ l->back = b->prev;
+ if(l->back) l->back->next = 0;
+ free(b);
+ --l->size;
+
+ return val;
+}
+
+void list_insert(list *l, void *val)
+{
+ node *new = malloc(sizeof(node));
+ new->val = val;
+ new->next = 0;
+
+ if(!l->back){
+ l->front = new;
+ new->prev = 0;
+ }else{
+ l->back->next = new;
+ new->prev = l->back;
+ }
+ l->back = new;
+ ++l->size;
+}
+
+void free_node(node *n)
+{
+ node *next;
+ while(n) {
+ next = n->next;
+ free(n);
+ n = next;
+ }
+}
+
+void free_list(list *l)
+{
+ free_node(l->front);
+ free(l);
+}
+
+void free_list_contents(list *l)
+{
+ node *n = l->front;
+ while(n){
+ free(n->val);
+ n = n->next;
+ }
+}
+
+void **list_to_array(list *l)
+{
+ void **a = calloc(l->size, sizeof(void*));
+ int count = 0;
+ node *n = l->front;
+ while(n){
+ a[count++] = n->val;
+ n = n->next;
+ }
+ return a;
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.h
new file mode 100644
index 00000000000..6b445c717c2
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/list.h
@@ -0,0 +1,13 @@
+#ifndef LIST_H
+#define LIST_H
+#include "darknet.h"
+
+list *make_list();
+int list_find(list *l, void *val);
+
+void list_insert(list *, void *);
+
+
+void free_list_contents(list *l);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.c
new file mode 100644
index 00000000000..74f6910a8fd
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.c
@@ -0,0 +1,293 @@
+#include "local_layer.h"
+#include "utils.h"
+#include "im2col.h"
+#include "col2im.h"
+#include "blas.h"
+#include "gemm.h"
+#include
+#include
+
+int local_out_height(local_layer l)
+{
+ int h = l.h;
+ if (!l.pad) h -= l.size;
+ else h -= 1;
+ return h/l.stride + 1;
+}
+
+int local_out_width(local_layer l)
+{
+ int w = l.w;
+ if (!l.pad) w -= l.size;
+ else w -= 1;
+ return w/l.stride + 1;
+}
+
+local_layer make_local_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation)
+{
+ int i;
+ local_layer l = {0};
+ l.type = LOCAL;
+
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.n = n;
+ l.batch = batch;
+ l.stride = stride;
+ l.size = size;
+ l.pad = pad;
+
+ int out_h = local_out_height(l);
+ int out_w = local_out_width(l);
+ int locations = out_h*out_w;
+ l.out_h = out_h;
+ l.out_w = out_w;
+ l.out_c = n;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+ l.inputs = l.w * l.h * l.c;
+
+ l.weights = calloc(c*n*size*size*locations, sizeof(float));
+ l.weight_updates = calloc(c*n*size*size*locations, sizeof(float));
+
+ l.biases = calloc(l.outputs, sizeof(float));
+ l.bias_updates = calloc(l.outputs, sizeof(float));
+
+ // float scale = 1./sqrt(size*size*c);
+ float scale = sqrt(2./(size*size*c));
+ for(i = 0; i < c*n*size*size; ++i) l.weights[i] = scale*rand_uniform(-1,1);
+
+ l.output = calloc(l.batch*out_h * out_w * n, sizeof(float));
+ l.delta = calloc(l.batch*out_h * out_w * n, sizeof(float));
+
+ l.workspace_size = out_h*out_w*size*size*c;
+
+ l.forward = forward_local_layer;
+ l.backward = backward_local_layer;
+ l.update = update_local_layer;
+
+#ifdef GPU
+ l.forward_gpu = forward_local_layer_gpu;
+ l.backward_gpu = backward_local_layer_gpu;
+ l.update_gpu = update_local_layer_gpu;
+
+ l.weights_gpu = cuda_make_array(l.weights, c*n*size*size*locations);
+ l.weight_updates_gpu = cuda_make_array(l.weight_updates, c*n*size*size*locations);
+
+ l.biases_gpu = cuda_make_array(l.biases, l.outputs);
+ l.bias_updates_gpu = cuda_make_array(l.bias_updates, l.outputs);
+
+ l.delta_gpu = cuda_make_array(l.delta, l.batch*out_h*out_w*n);
+ l.output_gpu = cuda_make_array(l.output, l.batch*out_h*out_w*n);
+
+#endif
+ l.activation = activation;
+
+ fprintf(stderr, "Local Layer: %d x %d x %d image, %d filters -> %d x %d x %d image\n", h,w,c,n, out_h, out_w, n);
+
+ return l;
+}
+
+void forward_local_layer(const local_layer l, network net)
+{
+ int out_h = local_out_height(l);
+ int out_w = local_out_width(l);
+ int i, j;
+ int locations = out_h * out_w;
+
+ for(i = 0; i < l.batch; ++i){
+ copy_cpu(l.outputs, l.biases, 1, l.output + i*l.outputs, 1);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ float *input = net.input + i*l.w*l.h*l.c;
+ im2col_cpu(input, l.c, l.h, l.w,
+ l.size, l.stride, l.pad, net.workspace);
+ float *output = l.output + i*l.outputs;
+ for(j = 0; j < locations; ++j){
+ float *a = l.weights + j*l.size*l.size*l.c*l.n;
+ float *b = net.workspace + j;
+ float *c = output + j;
+
+ int m = l.n;
+ int n = 1;
+ int k = l.size*l.size*l.c;
+
+ gemm(0,0,m,n,k,1,a,k,b,locations,1,c,locations);
+ }
+ }
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+}
+
+void backward_local_layer(local_layer l, network net)
+{
+ int i, j;
+ int locations = l.out_w*l.out_h;
+
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+
+ for(i = 0; i < l.batch; ++i){
+ axpy_cpu(l.outputs, 1, l.delta + i*l.outputs, 1, l.bias_updates, 1);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ float *input = net.input + i*l.w*l.h*l.c;
+ im2col_cpu(input, l.c, l.h, l.w,
+ l.size, l.stride, l.pad, net.workspace);
+
+ for(j = 0; j < locations; ++j){
+ float *a = l.delta + i*l.outputs + j;
+ float *b = net.workspace + j;
+ float *c = l.weight_updates + j*l.size*l.size*l.c*l.n;
+ int m = l.n;
+ int n = l.size*l.size*l.c;
+ int k = 1;
+
+ gemm(0,1,m,n,k,1,a,locations,b,locations,1,c,n);
+ }
+
+ if(net.delta){
+ for(j = 0; j < locations; ++j){
+ float *a = l.weights + j*l.size*l.size*l.c*l.n;
+ float *b = l.delta + i*l.outputs + j;
+ float *c = net.workspace + j;
+
+ int m = l.size*l.size*l.c;
+ int n = 1;
+ int k = l.n;
+
+ gemm(1,0,m,n,k,1,a,m,b,locations,0,c,locations);
+ }
+
+ col2im_cpu(net.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, net.delta+i*l.c*l.h*l.w);
+ }
+ }
+}
+
+void update_local_layer(local_layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ axpy_cpu(l.outputs, learning_rate/batch, l.bias_updates, 1, l.biases, 1);
+ scal_cpu(l.outputs, momentum, l.bias_updates, 1);
+
+ axpy_cpu(size, -decay*batch, l.weights, 1, l.weight_updates, 1);
+ axpy_cpu(size, learning_rate/batch, l.weight_updates, 1, l.weights, 1);
+ scal_cpu(size, momentum, l.weight_updates, 1);
+}
+
+#ifdef GPU
+
+void forward_local_layer_gpu(const local_layer l, network net)
+{
+ int out_h = local_out_height(l);
+ int out_w = local_out_width(l);
+ int i, j;
+ int locations = out_h * out_w;
+
+ for(i = 0; i < l.batch; ++i){
+ copy_gpu(l.outputs, l.biases_gpu, 1, l.output_gpu + i*l.outputs, 1);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ float *input = net.input_gpu + i*l.w*l.h*l.c;
+ im2col_gpu(input, l.c, l.h, l.w,
+ l.size, l.stride, l.pad, net.workspace);
+ float *output = l.output_gpu + i*l.outputs;
+ for(j = 0; j < locations; ++j){
+ float *a = l.weights_gpu + j*l.size*l.size*l.c*l.n;
+ float *b = net.workspace + j;
+ float *c = output + j;
+
+ int m = l.n;
+ int n = 1;
+ int k = l.size*l.size*l.c;
+
+ gemm_gpu(0,0,m,n,k,1,a,k,b,locations,1,c,locations);
+ }
+ }
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
+}
+
+void backward_local_layer_gpu(local_layer l, network net)
+{
+ int i, j;
+ int locations = l.out_w*l.out_h;
+
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+ for(i = 0; i < l.batch; ++i){
+ axpy_gpu(l.outputs, 1, l.delta_gpu + i*l.outputs, 1, l.bias_updates_gpu, 1);
+ }
+
+ for(i = 0; i < l.batch; ++i){
+ float *input = net.input_gpu + i*l.w*l.h*l.c;
+ im2col_gpu(input, l.c, l.h, l.w,
+ l.size, l.stride, l.pad, net.workspace);
+
+ for(j = 0; j < locations; ++j){
+ float *a = l.delta_gpu + i*l.outputs + j;
+ float *b = net.workspace + j;
+ float *c = l.weight_updates_gpu + j*l.size*l.size*l.c*l.n;
+ int m = l.n;
+ int n = l.size*l.size*l.c;
+ int k = 1;
+
+ gemm_gpu(0,1,m,n,k,1,a,locations,b,locations,1,c,n);
+ }
+
+ if(net.delta_gpu){
+ for(j = 0; j < locations; ++j){
+ float *a = l.weights_gpu + j*l.size*l.size*l.c*l.n;
+ float *b = l.delta_gpu + i*l.outputs + j;
+ float *c = net.workspace + j;
+
+ int m = l.size*l.size*l.c;
+ int n = 1;
+ int k = l.n;
+
+ gemm_gpu(1,0,m,n,k,1,a,m,b,locations,0,c,locations);
+ }
+
+ col2im_gpu(net.workspace, l.c, l.h, l.w, l.size, l.stride, l.pad, net.delta_gpu+i*l.c*l.h*l.w);
+ }
+ }
+}
+
+void update_local_layer_gpu(local_layer l, update_args a)
+{
+ float learning_rate = a.learning_rate*l.learning_rate_scale;
+ float momentum = a.momentum;
+ float decay = a.decay;
+ int batch = a.batch;
+
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ axpy_gpu(l.outputs, learning_rate/batch, l.bias_updates_gpu, 1, l.biases_gpu, 1);
+ scal_gpu(l.outputs, momentum, l.bias_updates_gpu, 1);
+
+ axpy_gpu(size, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
+ axpy_gpu(size, learning_rate/batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
+ scal_gpu(size, momentum, l.weight_updates_gpu, 1);
+}
+
+void pull_local_layer(local_layer l)
+{
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ cuda_pull_array(l.weights_gpu, l.weights, size);
+ cuda_pull_array(l.biases_gpu, l.biases, l.outputs);
+}
+
+void push_local_layer(local_layer l)
+{
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ cuda_push_array(l.weights_gpu, l.weights, size);
+ cuda_push_array(l.biases_gpu, l.biases, l.outputs);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.h
new file mode 100644
index 00000000000..776e572f420
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/local_layer.h
@@ -0,0 +1,31 @@
+#ifndef LOCAL_LAYER_H
+#define LOCAL_LAYER_H
+
+#include "cuda.h"
+#include "image.h"
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+
+typedef layer local_layer;
+
+#ifdef GPU
+void forward_local_layer_gpu(local_layer layer, network net);
+void backward_local_layer_gpu(local_layer layer, network net);
+void update_local_layer_gpu(local_layer layer, update_args a);
+
+void push_local_layer(local_layer layer);
+void pull_local_layer(local_layer layer);
+#endif
+
+local_layer make_local_layer(int batch, int h, int w, int c, int n, int size, int stride, int pad, ACTIVATION activation);
+
+void forward_local_layer(const local_layer layer, network net);
+void backward_local_layer(local_layer layer, network net);
+void update_local_layer(local_layer layer, update_args a);
+
+void bias_output(float *output, float *biases, int batch, int n, int size);
+void backward_bias(float *bias_updates, float *delta, int batch, int n, int size);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.c
new file mode 100644
index 00000000000..b2b3d6b1ccf
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.c
@@ -0,0 +1,71 @@
+#include "logistic_layer.h"
+#include "activations.h"
+#include "blas.h"
+#include "cuda.h"
+
+#include
+#include
+#include
+#include
+#include
+
+layer make_logistic_layer(int batch, int inputs)
+{
+ fprintf(stderr, "logistic x entropy %4d\n", inputs);
+ layer l = {0};
+ l.type = LOGXENT;
+ l.batch = batch;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.loss = calloc(inputs*batch, sizeof(float));
+ l.output = calloc(inputs*batch, sizeof(float));
+ l.delta = calloc(inputs*batch, sizeof(float));
+ l.cost = calloc(1, sizeof(float));
+
+ l.forward = forward_logistic_layer;
+ l.backward = backward_logistic_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_logistic_layer_gpu;
+ l.backward_gpu = backward_logistic_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, inputs*batch);
+ l.loss_gpu = cuda_make_array(l.loss, inputs*batch);
+ l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+ #endif
+ return l;
+}
+
+void forward_logistic_layer(const layer l, network net)
+{
+ copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
+ activate_array(l.output, l.outputs*l.batch, LOGISTIC);
+ if(net.truth){
+ logistic_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss);
+ l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
+ }
+}
+
+void backward_logistic_layer(const layer l, network net)
+{
+ axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void forward_logistic_layer_gpu(const layer l, network net)
+{
+ copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, LOGISTIC);
+ if(net.truth){
+ logistic_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth_gpu, l.delta_gpu, l.loss_gpu);
+ cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs);
+ l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
+ }
+}
+
+void backward_logistic_layer_gpu(const layer l, network net)
+{
+ axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.h
new file mode 100644
index 00000000000..9c25bee3c2a
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/logistic_layer.h
@@ -0,0 +1,15 @@
+#ifndef LOGISTIC_LAYER_H
+#define LOGISTIC_LAYER_H
+#include "layer.h"
+#include "network.h"
+
+layer make_logistic_layer(int batch, int inputs);
+void forward_logistic_layer(const layer l, network net);
+void backward_logistic_layer(const layer l, network net);
+
+#ifdef GPU
+void forward_logistic_layer_gpu(const layer l, network net);
+void backward_logistic_layer_gpu(const layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.c
new file mode 100644
index 00000000000..fb07de20228
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.c
@@ -0,0 +1,626 @@
+#include "lstm_layer.h"
+#include "connected_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+#include
+#include
+
+static void increment_layer(layer *l, int steps)
+{
+ int num = l->outputs*l->batch*steps;
+ l->output += num;
+ l->delta += num;
+ l->x += num;
+ l->x_norm += num;
+
+#ifdef GPU
+ l->output_gpu += num;
+ l->delta_gpu += num;
+ l->x_gpu += num;
+ l->x_norm_gpu += num;
+#endif
+}
+
+layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
+{
+ fprintf(stderr, "LSTM Layer: %d inputs, %d outputs\n", inputs, outputs);
+ batch = batch / steps;
+ layer l = { 0 };
+ l.batch = batch;
+ l.type = LSTM;
+ l.steps = steps;
+ l.inputs = inputs;
+
+ l.uf = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.uf) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.uf->batch = batch;
+
+ l.ui = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.ui) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.ui->batch = batch;
+
+ l.ug = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.ug) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.ug->batch = batch;
+
+ l.uo = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.uo) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
+ l.uo->batch = batch;
+
+ l.wf = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wf) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wf->batch = batch;
+
+ l.wi = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wi) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wi->batch = batch;
+
+ l.wg = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wg) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wg->batch = batch;
+
+ l.wo = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.wo) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
+ l.wo->batch = batch;
+
+ l.batch_normalize = batch_normalize;
+ l.outputs = outputs;
+
+ l.output = calloc(outputs*batch*steps, sizeof(float));
+ l.state = calloc(outputs*batch, sizeof(float));
+
+ l.forward = forward_lstm_layer;
+ l.update = update_lstm_layer;
+
+ l.prev_state_cpu = calloc(batch*outputs, sizeof(float));
+ l.prev_cell_cpu = calloc(batch*outputs, sizeof(float));
+ l.cell_cpu = calloc(batch*outputs*steps, sizeof(float));
+
+ l.f_cpu = calloc(batch*outputs, sizeof(float));
+ l.i_cpu = calloc(batch*outputs, sizeof(float));
+ l.g_cpu = calloc(batch*outputs, sizeof(float));
+ l.o_cpu = calloc(batch*outputs, sizeof(float));
+ l.c_cpu = calloc(batch*outputs, sizeof(float));
+ l.h_cpu = calloc(batch*outputs, sizeof(float));
+ l.temp_cpu = calloc(batch*outputs, sizeof(float));
+ l.temp2_cpu = calloc(batch*outputs, sizeof(float));
+ l.temp3_cpu = calloc(batch*outputs, sizeof(float));
+ l.dc_cpu = calloc(batch*outputs, sizeof(float));
+ l.dh_cpu = calloc(batch*outputs, sizeof(float));
+
+#ifdef GPU
+ l.forward_gpu = forward_lstm_layer_gpu;
+ l.backward_gpu = backward_lstm_layer_gpu;
+ l.update_gpu = update_lstm_layer_gpu;
+
+ l.output_gpu = cuda_make_array(0, batch*outputs*steps);
+ l.delta_gpu = cuda_make_array(0, batch*l.outputs*steps);
+
+ l.prev_state_gpu = cuda_make_array(0, batch*outputs);
+ l.prev_cell_gpu = cuda_make_array(0, batch*outputs);
+ l.cell_gpu = cuda_make_array(0, batch*outputs*steps);
+
+ l.f_gpu = cuda_make_array(0, batch*outputs);
+ l.i_gpu = cuda_make_array(0, batch*outputs);
+ l.g_gpu = cuda_make_array(0, batch*outputs);
+ l.o_gpu = cuda_make_array(0, batch*outputs);
+ l.c_gpu = cuda_make_array(0, batch*outputs);
+ l.h_gpu = cuda_make_array(0, batch*outputs);
+ l.temp_gpu = cuda_make_array(0, batch*outputs);
+ l.temp2_gpu = cuda_make_array(0, batch*outputs);
+ l.temp3_gpu = cuda_make_array(0, batch*outputs);
+ l.dc_gpu = cuda_make_array(0, batch*outputs);
+ l.dh_gpu = cuda_make_array(0, batch*outputs);
+#ifdef CUDNN
+ cudnnSetTensor4dDescriptor(l.wf->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wf->out_c, l.wf->out_h, l.wf->out_w);
+ cudnnSetTensor4dDescriptor(l.wi->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wi->out_c, l.wi->out_h, l.wi->out_w);
+ cudnnSetTensor4dDescriptor(l.wg->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wg->out_c, l.wg->out_h, l.wg->out_w);
+ cudnnSetTensor4dDescriptor(l.wo->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.wo->out_c, l.wo->out_h, l.wo->out_w);
+
+ cudnnSetTensor4dDescriptor(l.uf->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uf->out_c, l.uf->out_h, l.uf->out_w);
+ cudnnSetTensor4dDescriptor(l.ui->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.ui->out_c, l.ui->out_h, l.ui->out_w);
+ cudnnSetTensor4dDescriptor(l.ug->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.ug->out_c, l.ug->out_h, l.ug->out_w);
+ cudnnSetTensor4dDescriptor(l.uo->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.uo->out_c, l.uo->out_h, l.uo->out_w);
+#endif
+
+#endif
+
+ return l;
+}
+
+void update_lstm_layer(layer l, update_args a)
+{
+ update_connected_layer(*(l.wf), a);
+ update_connected_layer(*(l.wi), a);
+ update_connected_layer(*(l.wg), a);
+ update_connected_layer(*(l.wo), a);
+ update_connected_layer(*(l.uf), a);
+ update_connected_layer(*(l.ui), a);
+ update_connected_layer(*(l.ug), a);
+ update_connected_layer(*(l.uo), a);
+}
+
+void forward_lstm_layer(layer l, network state)
+{
+ network s = { 0 };
+ s.train = state.train;
+ int i;
+ layer wf = *(l.wf);
+ layer wi = *(l.wi);
+ layer wg = *(l.wg);
+ layer wo = *(l.wo);
+
+ layer uf = *(l.uf);
+ layer ui = *(l.ui);
+ layer ug = *(l.ug);
+ layer uo = *(l.uo);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wf.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wi.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wg.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, wo.delta, 1);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, uf.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, ui.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, ug.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, uo.delta, 1);
+ if (state.train) {
+ fill_cpu(l.outputs * l.batch * l.steps, 0, l.delta, 1);
+ }
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = l.h_cpu;
+ forward_connected_layer(wf, s);
+ forward_connected_layer(wi, s);
+ forward_connected_layer(wg, s);
+ forward_connected_layer(wo, s);
+
+ s.input = state.input;
+ forward_connected_layer(uf, s);
+ forward_connected_layer(ui, s);
+ forward_connected_layer(ug, s);
+ forward_connected_layer(uo, s);
+
+ copy_cpu(l.outputs*l.batch, wf.output, 1, l.f_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, uf.output, 1, l.f_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wi.output, 1, l.i_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, ui.output, 1, l.i_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wg.output, 1, l.g_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, ug.output, 1, l.g_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wo.output, 1, l.o_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, uo.output, 1, l.o_cpu, 1);
+
+ activate_array(l.f_cpu, l.outputs*l.batch, LOGISTIC);
+ activate_array(l.i_cpu, l.outputs*l.batch, LOGISTIC);
+ activate_array(l.g_cpu, l.outputs*l.batch, TANH);
+ activate_array(l.o_cpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_cpu(l.outputs*l.batch, l.i_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.g_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.f_cpu, 1, l.c_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, l.temp_cpu, 1, l.c_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, l.c_cpu, 1, l.h_cpu, 1);
+ activate_array(l.h_cpu, l.outputs*l.batch, TANH);
+ mul_cpu(l.outputs*l.batch, l.o_cpu, 1, l.h_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, l.c_cpu, 1, l.cell_cpu, 1);
+ copy_cpu(l.outputs*l.batch, l.h_cpu, 1, l.output, 1);
+
+ state.input += l.inputs*l.batch;
+ l.output += l.outputs*l.batch;
+ l.cell_cpu += l.outputs*l.batch;
+
+ increment_layer(&wf, 1);
+ increment_layer(&wi, 1);
+ increment_layer(&wg, 1);
+ increment_layer(&wo, 1);
+
+ increment_layer(&uf, 1);
+ increment_layer(&ui, 1);
+ increment_layer(&ug, 1);
+ increment_layer(&uo, 1);
+ }
+}
+
+void backward_lstm_layer(layer l, network state)
+{
+ network s = { 0 };
+ s.train = state.train;
+ int i;
+ layer wf = *(l.wf);
+ layer wi = *(l.wi);
+ layer wg = *(l.wg);
+ layer wo = *(l.wo);
+
+ layer uf = *(l.uf);
+ layer ui = *(l.ui);
+ layer ug = *(l.ug);
+ layer uo = *(l.uo);
+
+ increment_layer(&wf, l.steps - 1);
+ increment_layer(&wi, l.steps - 1);
+ increment_layer(&wg, l.steps - 1);
+ increment_layer(&wo, l.steps - 1);
+
+ increment_layer(&uf, l.steps - 1);
+ increment_layer(&ui, l.steps - 1);
+ increment_layer(&ug, l.steps - 1);
+ increment_layer(&uo, l.steps - 1);
+
+ state.input += l.inputs*l.batch*(l.steps - 1);
+ if (state.delta) state.delta += l.inputs*l.batch*(l.steps - 1);
+
+ l.output += l.outputs*l.batch*(l.steps - 1);
+ l.cell_cpu += l.outputs*l.batch*(l.steps - 1);
+ l.delta += l.outputs*l.batch*(l.steps - 1);
+
+ for (i = l.steps - 1; i >= 0; --i) {
+ if (i != 0) copy_cpu(l.outputs*l.batch, l.cell_cpu - l.outputs*l.batch, 1, l.prev_cell_cpu, 1);
+ copy_cpu(l.outputs*l.batch, l.cell_cpu, 1, l.c_cpu, 1);
+ if (i != 0) copy_cpu(l.outputs*l.batch, l.output - l.outputs*l.batch, 1, l.prev_state_cpu, 1);
+ copy_cpu(l.outputs*l.batch, l.output, 1, l.h_cpu, 1);
+
+ l.dh_cpu = (i == 0) ? 0 : l.delta - l.outputs*l.batch;
+
+ copy_cpu(l.outputs*l.batch, wf.output, 1, l.f_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, uf.output, 1, l.f_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wi.output, 1, l.i_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, ui.output, 1, l.i_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wg.output, 1, l.g_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, ug.output, 1, l.g_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, wo.output, 1, l.o_cpu, 1);
+ axpy_cpu(l.outputs*l.batch, 1, uo.output, 1, l.o_cpu, 1);
+
+ activate_array(l.f_cpu, l.outputs*l.batch, LOGISTIC);
+ activate_array(l.i_cpu, l.outputs*l.batch, LOGISTIC);
+ activate_array(l.g_cpu, l.outputs*l.batch, TANH);
+ activate_array(l.o_cpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_cpu(l.outputs*l.batch, l.delta, 1, l.temp3_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, l.c_cpu, 1, l.temp_cpu, 1);
+ activate_array(l.temp_cpu, l.outputs*l.batch, TANH);
+
+ copy_cpu(l.outputs*l.batch, l.temp3_cpu, 1, l.temp2_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.o_cpu, 1, l.temp2_cpu, 1);
+
+ gradient_array(l.temp_cpu, l.outputs*l.batch, TANH, l.temp2_cpu);
+ axpy_cpu(l.outputs*l.batch, 1, l.dc_cpu, 1, l.temp2_cpu, 1);
+
+ copy_cpu(l.outputs*l.batch, l.c_cpu, 1, l.temp_cpu, 1);
+ activate_array(l.temp_cpu, l.outputs*l.batch, TANH);
+ mul_cpu(l.outputs*l.batch, l.temp3_cpu, 1, l.temp_cpu, 1);
+ gradient_array(l.o_cpu, l.outputs*l.batch, LOGISTIC, l.temp_cpu);
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, wo.delta, 1);
+ s.input = l.prev_state_cpu;
+ s.delta = l.dh_cpu;
+ backward_connected_layer(wo, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, uo.delta, 1);
+ s.input = state.input;
+ s.delta = state.delta;
+ backward_connected_layer(uo, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.i_cpu, 1, l.temp_cpu, 1);
+ gradient_array(l.g_cpu, l.outputs*l.batch, TANH, l.temp_cpu);
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, wg.delta, 1);
+ s.input = l.prev_state_cpu;
+ s.delta = l.dh_cpu;
+ backward_connected_layer(wg, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, ug.delta, 1);
+ s.input = state.input;
+ s.delta = state.delta;
+ backward_connected_layer(ug, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.g_cpu, 1, l.temp_cpu, 1);
+ gradient_array(l.i_cpu, l.outputs*l.batch, LOGISTIC, l.temp_cpu);
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, wi.delta, 1);
+ s.input = l.prev_state_cpu;
+ s.delta = l.dh_cpu;
+ backward_connected_layer(wi, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, ui.delta, 1);
+ s.input = state.input;
+ s.delta = state.delta;
+ backward_connected_layer(ui, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.prev_cell_cpu, 1, l.temp_cpu, 1);
+ gradient_array(l.f_cpu, l.outputs*l.batch, LOGISTIC, l.temp_cpu);
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, wf.delta, 1);
+ s.input = l.prev_state_cpu;
+ s.delta = l.dh_cpu;
+ backward_connected_layer(wf, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, uf.delta, 1);
+ s.input = state.input;
+ s.delta = state.delta;
+ backward_connected_layer(uf, s);
+
+ copy_cpu(l.outputs*l.batch, l.temp2_cpu, 1, l.temp_cpu, 1);
+ mul_cpu(l.outputs*l.batch, l.f_cpu, 1, l.temp_cpu, 1);
+ copy_cpu(l.outputs*l.batch, l.temp_cpu, 1, l.dc_cpu, 1);
+
+ state.input -= l.inputs*l.batch;
+ if (state.delta) state.delta -= l.inputs*l.batch;
+ l.output -= l.outputs*l.batch;
+ l.cell_cpu -= l.outputs*l.batch;
+ l.delta -= l.outputs*l.batch;
+
+ increment_layer(&wf, -1);
+ increment_layer(&wi, -1);
+ increment_layer(&wg, -1);
+ increment_layer(&wo, -1);
+
+ increment_layer(&uf, -1);
+ increment_layer(&ui, -1);
+ increment_layer(&ug, -1);
+ increment_layer(&uo, -1);
+ }
+}
+
+#ifdef GPU
+void update_lstm_layer_gpu(layer l, update_args a)
+{
+ update_connected_layer_gpu(*(l.wf), a);
+ update_connected_layer_gpu(*(l.wi), a);
+ update_connected_layer_gpu(*(l.wg), a);
+ update_connected_layer_gpu(*(l.wo), a);
+ update_connected_layer_gpu(*(l.uf), a);
+ update_connected_layer_gpu(*(l.ui), a);
+ update_connected_layer_gpu(*(l.ug), a);
+ update_connected_layer_gpu(*(l.uo), a);
+}
+
+void forward_lstm_layer_gpu(layer l, network state)
+{
+ network s = { 0 };
+ s.train = state.train;
+ int i;
+ layer wf = *(l.wf);
+ layer wi = *(l.wi);
+ layer wg = *(l.wg);
+ layer wo = *(l.wo);
+
+ layer uf = *(l.uf);
+ layer ui = *(l.ui);
+ layer ug = *(l.ug);
+ layer uo = *(l.uo);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wf.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wi.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wg.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, wo.delta_gpu, 1);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, uf.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, ui.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, ug.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, uo.delta_gpu, 1);
+ if (state.train) {
+ fill_gpu(l.outputs * l.batch * l.steps, 0, l.delta_gpu, 1);
+ }
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input_gpu = l.h_gpu;
+ forward_connected_layer_gpu(wf, s);
+ forward_connected_layer_gpu(wi, s);
+ forward_connected_layer_gpu(wg, s);
+ forward_connected_layer_gpu(wo, s);
+
+ s.input_gpu = state.input_gpu;
+ forward_connected_layer_gpu(uf, s);
+ forward_connected_layer_gpu(ui, s);
+ forward_connected_layer_gpu(ug, s);
+ forward_connected_layer_gpu(uo, s);
+
+ copy_gpu(l.outputs*l.batch, wf.output_gpu, 1, l.f_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wi.output_gpu, 1, l.i_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wg.output_gpu, 1, l.g_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wo.output_gpu, 1, l.o_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1);
+
+ activate_array_gpu(l.f_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.i_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.g_gpu, l.outputs*l.batch, TANH);
+ activate_array_gpu(l.o_gpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_gpu(l.outputs*l.batch, l.i_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.g_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.f_gpu, 1, l.c_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, l.temp_gpu, 1, l.c_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.c_gpu, 1, l.h_gpu, 1);
+ activate_array_gpu(l.h_gpu, l.outputs*l.batch, TANH);
+ mul_gpu(l.outputs*l.batch, l.o_gpu, 1, l.h_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.c_gpu, 1, l.cell_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.h_gpu, 1, l.output_gpu, 1);
+
+ state.input_gpu += l.inputs*l.batch;
+ l.output_gpu += l.outputs*l.batch;
+ l.cell_gpu += l.outputs*l.batch;
+
+ increment_layer(&wf, 1);
+ increment_layer(&wi, 1);
+ increment_layer(&wg, 1);
+ increment_layer(&wo, 1);
+
+ increment_layer(&uf, 1);
+ increment_layer(&ui, 1);
+ increment_layer(&ug, 1);
+ increment_layer(&uo, 1);
+ }
+}
+
+void backward_lstm_layer_gpu(layer l, network state)
+{
+ network s = { 0 };
+ s.train = state.train;
+ int i;
+ layer wf = *(l.wf);
+ layer wi = *(l.wi);
+ layer wg = *(l.wg);
+ layer wo = *(l.wo);
+
+ layer uf = *(l.uf);
+ layer ui = *(l.ui);
+ layer ug = *(l.ug);
+ layer uo = *(l.uo);
+
+ increment_layer(&wf, l.steps - 1);
+ increment_layer(&wi, l.steps - 1);
+ increment_layer(&wg, l.steps - 1);
+ increment_layer(&wo, l.steps - 1);
+
+ increment_layer(&uf, l.steps - 1);
+ increment_layer(&ui, l.steps - 1);
+ increment_layer(&ug, l.steps - 1);
+ increment_layer(&uo, l.steps - 1);
+
+ state.input_gpu += l.inputs*l.batch*(l.steps - 1);
+ if (state.delta_gpu) state.delta_gpu += l.inputs*l.batch*(l.steps - 1);
+
+ l.output_gpu += l.outputs*l.batch*(l.steps - 1);
+ l.cell_gpu += l.outputs*l.batch*(l.steps - 1);
+ l.delta_gpu += l.outputs*l.batch*(l.steps - 1);
+
+ for (i = l.steps - 1; i >= 0; --i) {
+ if (i != 0) copy_gpu(l.outputs*l.batch, l.cell_gpu - l.outputs*l.batch, 1, l.prev_cell_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.cell_gpu, 1, l.c_gpu, 1);
+ if (i != 0) copy_gpu(l.outputs*l.batch, l.output_gpu - l.outputs*l.batch, 1, l.prev_state_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.output_gpu, 1, l.h_gpu, 1);
+
+ l.dh_gpu = (i == 0) ? 0 : l.delta_gpu - l.outputs*l.batch;
+
+ copy_gpu(l.outputs*l.batch, wf.output_gpu, 1, l.f_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, uf.output_gpu, 1, l.f_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wi.output_gpu, 1, l.i_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, ui.output_gpu, 1, l.i_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wg.output_gpu, 1, l.g_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, ug.output_gpu, 1, l.g_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, wo.output_gpu, 1, l.o_gpu, 1);
+ axpy_gpu(l.outputs*l.batch, 1, uo.output_gpu, 1, l.o_gpu, 1);
+
+ activate_array_gpu(l.f_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.i_gpu, l.outputs*l.batch, LOGISTIC);
+ activate_array_gpu(l.g_gpu, l.outputs*l.batch, TANH);
+ activate_array_gpu(l.o_gpu, l.outputs*l.batch, LOGISTIC);
+
+ copy_gpu(l.outputs*l.batch, l.delta_gpu, 1, l.temp3_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.c_gpu, 1, l.temp_gpu, 1);
+ activate_array_gpu(l.temp_gpu, l.outputs*l.batch, TANH);
+
+ copy_gpu(l.outputs*l.batch, l.temp3_gpu, 1, l.temp2_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.o_gpu, 1, l.temp2_gpu, 1);
+
+ gradient_array_gpu(l.temp_gpu, l.outputs*l.batch, TANH, l.temp2_gpu);
+ axpy_gpu(l.outputs*l.batch, 1, l.dc_gpu, 1, l.temp2_gpu, 1);
+
+ copy_gpu(l.outputs*l.batch, l.c_gpu, 1, l.temp_gpu, 1);
+ activate_array_gpu(l.temp_gpu, l.outputs*l.batch, TANH);
+ mul_gpu(l.outputs*l.batch, l.temp3_gpu, 1, l.temp_gpu, 1);
+ gradient_array_gpu(l.o_gpu, l.outputs*l.batch, LOGISTIC, l.temp_gpu);
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, wo.delta_gpu, 1);
+ s.input_gpu = l.prev_state_gpu;
+ s.delta_gpu = l.dh_gpu;
+ backward_connected_layer_gpu(wo, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, uo.delta_gpu, 1);
+ s.input_gpu = state.input_gpu;
+ s.delta_gpu = state.delta_gpu;
+ backward_connected_layer_gpu(uo, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp2_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.i_gpu, 1, l.temp_gpu, 1);
+ gradient_array_gpu(l.g_gpu, l.outputs*l.batch, TANH, l.temp_gpu);
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, wg.delta_gpu, 1);
+ s.input_gpu = l.prev_state_gpu;
+ s.delta_gpu = l.dh_gpu;
+ backward_connected_layer_gpu(wg, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, ug.delta_gpu, 1);
+ s.input_gpu = state.input_gpu;
+ s.delta_gpu = state.delta_gpu;
+ backward_connected_layer_gpu(ug, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp2_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.g_gpu, 1, l.temp_gpu, 1);
+ gradient_array_gpu(l.i_gpu, l.outputs*l.batch, LOGISTIC, l.temp_gpu);
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, wi.delta_gpu, 1);
+ s.input_gpu = l.prev_state_gpu;
+ s.delta_gpu = l.dh_gpu;
+ backward_connected_layer_gpu(wi, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, ui.delta_gpu, 1);
+ s.input_gpu = state.input_gpu;
+ s.delta_gpu = state.delta_gpu;
+ backward_connected_layer_gpu(ui, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp2_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.prev_cell_gpu, 1, l.temp_gpu, 1);
+ gradient_array_gpu(l.f_gpu, l.outputs*l.batch, LOGISTIC, l.temp_gpu);
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, wf.delta_gpu, 1);
+ s.input_gpu = l.prev_state_gpu;
+ s.delta_gpu = l.dh_gpu;
+ backward_connected_layer_gpu(wf, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, uf.delta_gpu, 1);
+ s.input_gpu = state.input_gpu;
+ s.delta_gpu = state.delta_gpu;
+ backward_connected_layer_gpu(uf, s);
+
+ copy_gpu(l.outputs*l.batch, l.temp2_gpu, 1, l.temp_gpu, 1);
+ mul_gpu(l.outputs*l.batch, l.f_gpu, 1, l.temp_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.temp_gpu, 1, l.dc_gpu, 1);
+
+ state.input_gpu -= l.inputs*l.batch;
+ if (state.delta_gpu) state.delta_gpu -= l.inputs*l.batch;
+ l.output_gpu -= l.outputs*l.batch;
+ l.cell_gpu -= l.outputs*l.batch;
+ l.delta_gpu -= l.outputs*l.batch;
+
+ increment_layer(&wf, -1);
+ increment_layer(&wi, -1);
+ increment_layer(&wg, -1);
+ increment_layer(&wo, -1);
+
+ increment_layer(&uf, -1);
+ increment_layer(&ui, -1);
+ increment_layer(&ug, -1);
+ increment_layer(&uo, -1);
+ }
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.h
new file mode 100644
index 00000000000..b9f07e6424b
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/lstm_layer.h
@@ -0,0 +1,20 @@
+#ifndef LSTM_LAYER_H
+#define LSTM_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+#define USET
+
+layer make_lstm_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam);
+
+void forward_lstm_layer(layer l, network net);
+void update_lstm_layer(layer l, update_args a);
+
+#ifdef GPU
+void forward_lstm_layer_gpu(layer l, network net);
+void backward_lstm_layer_gpu(layer l, network net);
+void update_lstm_layer_gpu(layer l, update_args a);
+
+#endif
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.c
new file mode 100644
index 00000000000..799916bff01
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.c
@@ -0,0 +1,196 @@
+#include "matrix.h"
+#include "utils.h"
+#include "blas.h"
+#include
+#include
+#include
+#include
+#include
+
+void free_matrix(matrix m)
+{
+ int i;
+ for(i = 0; i < m.rows; ++i) free(m.vals[i]);
+ free(m.vals);
+}
+
+float matrix_topk_accuracy(matrix truth, matrix guess, int k)
+{
+ int *indexes = calloc(k, sizeof(int));
+ int n = truth.cols;
+ int i,j;
+ int correct = 0;
+ for(i = 0; i < truth.rows; ++i){
+ top_k(guess.vals[i], n, k, indexes);
+ for(j = 0; j < k; ++j){
+ int class = indexes[j];
+ if(truth.vals[i][class]){
+ ++correct;
+ break;
+ }
+ }
+ }
+ free(indexes);
+ return (float)correct/truth.rows;
+}
+
+void scale_matrix(matrix m, float scale)
+{
+ int i,j;
+ for(i = 0; i < m.rows; ++i){
+ for(j = 0; j < m.cols; ++j){
+ m.vals[i][j] *= scale;
+ }
+ }
+}
+
+matrix resize_matrix(matrix m, int size)
+{
+ int i;
+ if (m.rows == size) return m;
+ if (m.rows < size) {
+ m.vals = realloc(m.vals, size*sizeof(float*));
+ for (i = m.rows; i < size; ++i) {
+ m.vals[i] = calloc(m.cols, sizeof(float));
+ }
+ } else if (m.rows > size) {
+ for (i = size; i < m.rows; ++i) {
+ free(m.vals[i]);
+ }
+ m.vals = realloc(m.vals, size*sizeof(float*));
+ }
+ m.rows = size;
+ return m;
+}
+
+void matrix_add_matrix(matrix from, matrix to)
+{
+ assert(from.rows == to.rows && from.cols == to.cols);
+ int i,j;
+ for(i = 0; i < from.rows; ++i){
+ for(j = 0; j < from.cols; ++j){
+ to.vals[i][j] += from.vals[i][j];
+ }
+ }
+}
+
+matrix copy_matrix(matrix m)
+{
+ matrix c = {0};
+ c.rows = m.rows;
+ c.cols = m.cols;
+ c.vals = calloc(c.rows, sizeof(float *));
+ int i;
+ for(i = 0; i < c.rows; ++i){
+ c.vals[i] = calloc(c.cols, sizeof(float));
+ copy_cpu(c.cols, m.vals[i], 1, c.vals[i], 1);
+ }
+ return c;
+}
+
+matrix make_matrix(int rows, int cols)
+{
+ int i;
+ matrix m;
+ m.rows = rows;
+ m.cols = cols;
+ m.vals = calloc(m.rows, sizeof(float *));
+ for(i = 0; i < m.rows; ++i){
+ m.vals[i] = calloc(m.cols, sizeof(float));
+ }
+ return m;
+}
+
+matrix hold_out_matrix(matrix *m, int n)
+{
+ int i;
+ matrix h;
+ h.rows = n;
+ h.cols = m->cols;
+ h.vals = calloc(h.rows, sizeof(float *));
+ for(i = 0; i < n; ++i){
+ int index = rand()%m->rows;
+ h.vals[i] = m->vals[index];
+ m->vals[index] = m->vals[--(m->rows)];
+ }
+ return h;
+}
+
+float *pop_column(matrix *m, int c)
+{
+ float *col = calloc(m->rows, sizeof(float));
+ int i, j;
+ for(i = 0; i < m->rows; ++i){
+ col[i] = m->vals[i][c];
+ for(j = c; j < m->cols-1; ++j){
+ m->vals[i][j] = m->vals[i][j+1];
+ }
+ }
+ --m->cols;
+ return col;
+}
+
+matrix csv_to_matrix(char *filename)
+{
+ FILE *fp = fopen(filename, "r");
+ if(!fp) file_error(filename);
+
+ matrix m;
+ m.cols = -1;
+
+ char *line;
+
+ int n = 0;
+ int size = 1024;
+ m.vals = calloc(size, sizeof(float*));
+ while((line = fgetl(fp))){
+ if(m.cols == -1) m.cols = count_fields(line);
+ if(n == size){
+ size *= 2;
+ m.vals = realloc(m.vals, size*sizeof(float*));
+ }
+ m.vals[n] = parse_fields(line, m.cols);
+ free(line);
+ ++n;
+ }
+ m.vals = realloc(m.vals, n*sizeof(float*));
+ m.rows = n;
+ return m;
+}
+
+void matrix_to_csv(matrix m)
+{
+ int i, j;
+
+ for(i = 0; i < m.rows; ++i){
+ for(j = 0; j < m.cols; ++j){
+ if(j > 0) printf(",");
+ printf("%.17g", m.vals[i][j]);
+ }
+ printf("\n");
+ }
+}
+
+void print_matrix(matrix m)
+{
+ int i, j;
+ printf("%d X %d Matrix:\n",m.rows, m.cols);
+ printf(" __");
+ for(j = 0; j < 16*m.cols-1; ++j) printf(" ");
+ printf("__ \n");
+
+ printf("| ");
+ for(j = 0; j < 16*m.cols-1; ++j) printf(" ");
+ printf(" |\n");
+
+ for(i = 0; i < m.rows; ++i){
+ printf("| ");
+ for(j = 0; j < m.cols; ++j){
+ printf("%15.7f ", m.vals[i][j]);
+ }
+ printf(" |\n");
+ }
+ printf("|__");
+ for(j = 0; j < 16*m.cols-1; ++j) printf(" ");
+ printf("__|\n");
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.h
new file mode 100644
index 00000000000..879acd70d26
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/matrix.h
@@ -0,0 +1,13 @@
+#ifndef MATRIX_H
+#define MATRIX_H
+#include "darknet.h"
+
+matrix copy_matrix(matrix m);
+void print_matrix(matrix m);
+
+matrix hold_out_matrix(matrix *m, int n);
+matrix resize_matrix(matrix m, int size);
+
+float *pop_column(matrix *m, int c);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.c
new file mode 100644
index 00000000000..17dedf7a14c
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.c
@@ -0,0 +1,127 @@
+#include "maxpool_layer.h"
+#include "cuda.h"
+#include
+
+image get_maxpool_image(maxpool_layer l)
+{
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.c;
+ return float_to_image(w,h,c,l.output);
+}
+
+image get_maxpool_delta(maxpool_layer l)
+{
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.c;
+ return float_to_image(w,h,c,l.delta);
+}
+
+maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
+{
+ maxpool_layer l = {0};
+ l.type = MAXPOOL;
+ l.batch = batch;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.pad = padding;
+ l.out_w = (w + 2*padding)/stride;
+ l.out_h = (h + 2*padding)/stride;
+ l.out_c = c;
+ l.outputs = l.out_h * l.out_w * l.out_c;
+ l.inputs = h*w*c;
+ l.size = size;
+ l.stride = stride;
+ int output_size = l.out_h * l.out_w * l.out_c * batch;
+ l.indexes = calloc(output_size, sizeof(int));
+ l.output = calloc(output_size, sizeof(float));
+ l.delta = calloc(output_size, sizeof(float));
+ l.forward = forward_maxpool_layer;
+ l.backward = backward_maxpool_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_maxpool_layer_gpu;
+ l.backward_gpu = backward_maxpool_layer_gpu;
+ l.indexes_gpu = cuda_make_int_array(0, output_size);
+ l.output_gpu = cuda_make_array(l.output, output_size);
+ l.delta_gpu = cuda_make_array(l.delta, output_size);
+ #endif
+ fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
+ return l;
+}
+
+void resize_maxpool_layer(maxpool_layer *l, int w, int h)
+{
+ l->h = h;
+ l->w = w;
+ l->inputs = h*w*l->c;
+
+ l->out_w = (w + 2*l->pad)/l->stride;
+ l->out_h = (h + 2*l->pad)/l->stride;
+ l->outputs = l->out_w * l->out_h * l->c;
+ int output_size = l->outputs * l->batch;
+
+ l->indexes = realloc(l->indexes, output_size * sizeof(int));
+ l->output = realloc(l->output, output_size * sizeof(float));
+ l->delta = realloc(l->delta, output_size * sizeof(float));
+
+ #ifdef GPU
+ cuda_free((float *)l->indexes_gpu);
+ cuda_free(l->output_gpu);
+ cuda_free(l->delta_gpu);
+ l->indexes_gpu = cuda_make_int_array(0, output_size);
+ l->output_gpu = cuda_make_array(l->output, output_size);
+ l->delta_gpu = cuda_make_array(l->delta, output_size);
+ #endif
+}
+
+void forward_maxpool_layer(const maxpool_layer l, network net)
+{
+ int b,i,j,k,m,n;
+ int w_offset = -l.pad;
+ int h_offset = -l.pad;
+
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.c;
+
+ for(b = 0; b < l.batch; ++b){
+ for(k = 0; k < c; ++k){
+ for(i = 0; i < h; ++i){
+ for(j = 0; j < w; ++j){
+ int out_index = j + w*(i + h*(k + c*b));
+ float max = -FLT_MAX;
+ int max_i = -1;
+ for(n = 0; n < l.size; ++n){
+ for(m = 0; m < l.size; ++m){
+ int cur_h = h_offset + i*l.stride + n;
+ int cur_w = w_offset + j*l.stride + m;
+ int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
+ int valid = (cur_h >= 0 && cur_h < l.h &&
+ cur_w >= 0 && cur_w < l.w);
+ float val = (valid != 0) ? net.input[index] : -FLT_MAX;
+ max_i = (val > max) ? index : max_i;
+ max = (val > max) ? val : max;
+ }
+ }
+ l.output[out_index] = max;
+ l.indexes[out_index] = max_i;
+ }
+ }
+ }
+ }
+}
+
+void backward_maxpool_layer(const maxpool_layer l, network net)
+{
+ int i;
+ int h = l.out_h;
+ int w = l.out_w;
+ int c = l.c;
+ for(i = 0; i < h*w*c*l.batch; ++i){
+ int index = l.indexes[i];
+ net.delta[index] += l.delta[i];
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.h
new file mode 100644
index 00000000000..ceb5190716c
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer.h
@@ -0,0 +1,23 @@
+#ifndef MAXPOOL_LAYER_H
+#define MAXPOOL_LAYER_H
+
+#include "image.h"
+#include "cuda.h"
+#include "layer.h"
+#include "network.h"
+
+typedef layer maxpool_layer;
+
+image get_maxpool_image(maxpool_layer l);
+maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding);
+void resize_maxpool_layer(maxpool_layer *l, int w, int h);
+void forward_maxpool_layer(const maxpool_layer l, network net);
+void backward_maxpool_layer(const maxpool_layer l, network net);
+
+#ifdef GPU
+void forward_maxpool_layer_gpu(maxpool_layer l, network net);
+void backward_maxpool_layer_gpu(maxpool_layer l, network net);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer_kernels.cu b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer_kernels.cu
new file mode 100644
index 00000000000..3202e84b695
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/maxpool_layer_kernels.cu
@@ -0,0 +1,106 @@
+#include "cuda_runtime.h"
+#include "curand.h"
+#include "cublas_v2.h"
+
+extern "C" {
+#include "maxpool_layer.h"
+#include "cuda.h"
+}
+
+__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *input, float *output, int *indexes)
+{
+ int h = (in_h + 2*pad)/stride;
+ int w = (in_w + 2*pad)/stride;
+ int c = in_c;
+
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= n) return;
+
+ int j = id % w;
+ id /= w;
+ int i = id % h;
+ id /= h;
+ int k = id % c;
+ id /= c;
+ int b = id;
+
+ int w_offset = -pad;
+ int h_offset = -pad;
+
+ int out_index = j + w*(i + h*(k + c*b));
+ float max = -INFINITY;
+ int max_i = -1;
+ int l, m;
+ for(l = 0; l < size; ++l){
+ for(m = 0; m < size; ++m){
+ int cur_h = h_offset + i*stride + l;
+ int cur_w = w_offset + j*stride + m;
+ int index = cur_w + in_w*(cur_h + in_h*(k + b*in_c));
+ int valid = (cur_h >= 0 && cur_h < in_h &&
+ cur_w >= 0 && cur_w < in_w);
+ float val = (valid != 0) ? input[index] : -INFINITY;
+ max_i = (val > max) ? index : max_i;
+ max = (val > max) ? val : max;
+ }
+ }
+ output[out_index] = max;
+ indexes[out_index] = max_i;
+}
+
+__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w, int in_c, int stride, int size, int pad, float *delta, float *prev_delta, int *indexes)
+{
+ int h = (in_h + 2*pad)/stride;
+ int w = (in_w + 2*pad)/stride;
+ int c = in_c;
+ int area = (size-1)/stride;
+
+ int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
+ if(id >= n) return;
+
+ int index = id;
+ int j = id % in_w;
+ id /= in_w;
+ int i = id % in_h;
+ id /= in_h;
+ int k = id % in_c;
+ id /= in_c;
+ int b = id;
+
+ int w_offset = -pad;
+ int h_offset = -pad;
+
+ float d = 0;
+ int l, m;
+ for(l = -area; l < area+1; ++l){
+ for(m = -area; m < area+1; ++m){
+ int out_w = (j-w_offset)/stride + m;
+ int out_h = (i-h_offset)/stride + l;
+ int out_index = out_w + w*(out_h + h*(k + c*b));
+ int valid = (out_w >= 0 && out_w < w &&
+ out_h >= 0 && out_h < h);
+ d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
+ }
+ }
+ prev_delta[index] += d;
+}
+
+extern "C" void forward_maxpool_layer_gpu(maxpool_layer layer, network net)
+{
+ int h = layer.out_h;
+ int w = layer.out_w;
+ int c = layer.c;
+
+ size_t n = h*w*c*layer.batch;
+
+ forward_maxpool_layer_kernel<<>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, net.input_gpu, layer.output_gpu, layer.indexes_gpu);
+ check_error(cudaPeekAtLastError());
+}
+
+extern "C" void backward_maxpool_layer_gpu(maxpool_layer layer, network net)
+{
+ size_t n = layer.h*layer.w*layer.c*layer.batch;
+
+ backward_maxpool_layer_kernel<<>>(n, layer.h, layer.w, layer.c, layer.stride, layer.size, layer.pad, layer.delta_gpu, net.delta_gpu, layer.indexes_gpu);
+ check_error(cudaPeekAtLastError());
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.c
new file mode 100644
index 00000000000..aaab7997b5e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.c
@@ -0,0 +1,1129 @@
+#include
+#include
+#include
+#include "network.h"
+#include "image.h"
+#include "data.h"
+#include "utils.h"
+#include "blas.h"
+
+#include "crop_layer.h"
+#include "connected_layer.h"
+#include "gru_layer.h"
+#include "rnn_layer.h"
+#include "crnn_layer.h"
+#include "local_layer.h"
+#include "convolutional_layer.h"
+#include "activation_layer.h"
+#include "detection_layer.h"
+#include "region_layer.h"
+#include "yolo_layer.h"
+#include "normalization_layer.h"
+#include "batchnorm_layer.h"
+#include "maxpool_layer.h"
+#include "reorg_layer.h"
+#include "avgpool_layer.h"
+#include "cost_layer.h"
+#include "softmax_layer.h"
+#include "dropout_layer.h"
+#include "route_layer.h"
+#include "upsample_layer.h"
+#include "shortcut_layer.h"
+#include "parser.h"
+#include "data.h"
+
+load_args get_base_args(network *net)
+{
+ load_args args = {0};
+ args.w = net->w;
+ args.h = net->h;
+ args.size = net->w;
+
+ args.min = net->min_crop;
+ args.max = net->max_crop;
+ args.angle = net->angle;
+ args.aspect = net->aspect;
+ args.exposure = net->exposure;
+ args.center = net->center;
+ args.saturation = net->saturation;
+ args.hue = net->hue;
+ return args;
+}
+
+network *load_network(char *cfg, char *weights, int clear)
+{
+ network *net = parse_network_cfg(cfg);
+ if(weights && weights[0] != 0){
+ load_weights(net, weights);
+ }
+ if(clear) (*net->seen) = 0;
+ return net;
+}
+
+size_t get_current_batch(network *net)
+{
+ size_t batch_num = (*net->seen)/(net->batch*net->subdivisions);
+ return batch_num;
+}
+
+void reset_network_state(network *net, int b)
+{
+ int i;
+ for (i = 0; i < net->n; ++i) {
+ #ifdef GPU
+ layer l = net->layers[i];
+ if(l.state_gpu){
+ fill_gpu(l.outputs, 0, l.state_gpu + l.outputs*b, 1);
+ }
+ if(l.h_gpu){
+ fill_gpu(l.outputs, 0, l.h_gpu + l.outputs*b, 1);
+ }
+ #endif
+ }
+}
+
+void reset_rnn(network *net)
+{
+ reset_network_state(net, 0);
+}
+
+float get_current_rate(network *net)
+{
+ size_t batch_num = get_current_batch(net);
+ int i;
+ float rate;
+ if (batch_num < net->burn_in) return net->learning_rate * pow((float)batch_num / net->burn_in, net->power);
+ switch (net->policy) {
+ case CONSTANT:
+ return net->learning_rate;
+ case STEP:
+ return net->learning_rate * pow(net->scale, batch_num/net->step);
+ case STEPS:
+ rate = net->learning_rate;
+ for(i = 0; i < net->num_steps; ++i){
+ if(net->steps[i] > batch_num) return rate;
+ rate *= net->scales[i];
+ }
+ return rate;
+ case EXP:
+ return net->learning_rate * pow(net->gamma, batch_num);
+ case POLY:
+ return net->learning_rate * pow(1 - (float)batch_num / net->max_batches, net->power);
+ case RANDOM:
+ return net->learning_rate * pow(rand_uniform(0,1), net->power);
+ case SIG:
+ return net->learning_rate * (1./(1.+exp(net->gamma*(batch_num - net->step))));
+ default:
+ fprintf(stderr, "Policy is weird!\n");
+ return net->learning_rate;
+ }
+}
+
+char *get_layer_string(LAYER_TYPE a)
+{
+ switch(a){
+ case CONVOLUTIONAL:
+ return "convolutional";
+ case ACTIVE:
+ return "activation";
+ case LOCAL:
+ return "local";
+ case DECONVOLUTIONAL:
+ return "deconvolutional";
+ case CONNECTED:
+ return "connected";
+ case RNN:
+ return "rnn";
+ case GRU:
+ return "gru";
+ case LSTM:
+ return "lstm";
+ case CRNN:
+ return "crnn";
+ case MAXPOOL:
+ return "maxpool";
+ case REORG:
+ return "reorg";
+ case AVGPOOL:
+ return "avgpool";
+ case SOFTMAX:
+ return "softmax";
+ case DETECTION:
+ return "detection";
+ case REGION:
+ return "region";
+ case YOLO:
+ return "yolo";
+ case DROPOUT:
+ return "dropout";
+ case CROP:
+ return "crop";
+ case COST:
+ return "cost";
+ case ROUTE:
+ return "route";
+ case SHORTCUT:
+ return "shortcut";
+ case NORMALIZATION:
+ return "normalization";
+ case BATCHNORM:
+ return "batchnorm";
+ default:
+ break;
+ }
+ return "none";
+}
+
+network *make_network(int n)
+{
+ network *net = calloc(1, sizeof(network));
+ net->n = n;
+ net->layers = calloc(net->n, sizeof(layer));
+ net->seen = calloc(1, sizeof(size_t));
+ net->t = calloc(1, sizeof(int));
+ net->cost = calloc(1, sizeof(float));
+ return net;
+}
+
+void forward_network(network *netp)
+{
+#ifdef GPU
+ if(netp->gpu_index >= 0){
+ forward_network_gpu(netp);
+ return;
+ }
+#endif
+ network net = *netp;
+ int i;
+ for(i = 0; i < net.n; ++i){
+ net.index = i;
+ layer l = net.layers[i];
+ if(l.delta){
+ fill_cpu(l.outputs * l.batch, 0, l.delta, 1);
+ }
+ l.forward(l, net);
+ net.input = l.output;
+ if(l.truth) {
+ net.truth = l.output;
+ }
+ }
+ calc_network_cost(netp);
+}
+
+void update_network(network *netp)
+{
+#ifdef GPU
+ if(netp->gpu_index >= 0){
+ update_network_gpu(netp);
+ return;
+ }
+#endif
+ network net = *netp;
+ int i;
+ update_args a = {0};
+ a.batch = net.batch*net.subdivisions;
+ a.learning_rate = get_current_rate(netp);
+ a.momentum = net.momentum;
+ a.decay = net.decay;
+ a.adam = net.adam;
+ a.B1 = net.B1;
+ a.B2 = net.B2;
+ a.eps = net.eps;
+ ++*net.t;
+ a.t = *net.t;
+
+ for(i = 0; i < net.n; ++i){
+ layer l = net.layers[i];
+ if(l.update){
+ l.update(l, a);
+ }
+ }
+}
+
+void calc_network_cost(network *netp)
+{
+ network net = *netp;
+ int i;
+ float sum = 0;
+ int count = 0;
+ for(i = 0; i < net.n; ++i){
+ if(net.layers[i].cost){
+ sum += net.layers[i].cost[0];
+ ++count;
+ }
+ }
+ *net.cost = sum/count;
+}
+
+int get_predicted_class_network(network *net)
+{
+ return max_index(net->output, net->outputs);
+}
+
+void backward_network(network *netp)
+{
+#ifdef GPU
+ if(netp->gpu_index >= 0){
+ backward_network_gpu(netp);
+ return;
+ }
+#endif
+ network net = *netp;
+ int i;
+ network orig = net;
+ for(i = net.n-1; i >= 0; --i){
+ layer l = net.layers[i];
+ if(l.stopbackward) break;
+ if(i == 0){
+ net = orig;
+ }else{
+ layer prev = net.layers[i-1];
+ net.input = prev.output;
+ net.delta = prev.delta;
+ }
+ net.index = i;
+ l.backward(l, net);
+ }
+}
+
+float train_network_datum(network *net)
+{
+ *net->seen += net->batch;
+ net->train = 1;
+ forward_network(net);
+ backward_network(net);
+ float error = *net->cost;
+ if(((*net->seen)/net->batch)%net->subdivisions == 0) update_network(net);
+ return error;
+}
+
+float train_network_sgd(network *net, data d, int n)
+{
+ int batch = net->batch;
+
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; ++i){
+ get_random_batch(d, batch, net->input, net->truth);
+ float err = train_network_datum(net);
+ sum += err;
+ }
+ return (float)sum/(n*batch);
+}
+
+float train_network(network *net, data d)
+{
+ assert(d.X.rows % net->batch == 0);
+ int batch = net->batch;
+ int n = d.X.rows / batch;
+
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; ++i){
+ get_next_batch(d, batch, i*batch, net->input, net->truth);
+ float err = train_network_datum(net);
+ sum += err;
+ }
+ return (float)sum/(n*batch);
+}
+
+void set_temp_network(network *net, float t)
+{
+ int i;
+ for(i = 0; i < net->n; ++i){
+ net->layers[i].temperature = t;
+ }
+}
+
+
+void set_batch_network(network *net, int b)
+{
+ net->batch = b;
+ int i;
+ for(i = 0; i < net->n; ++i){
+ net->layers[i].batch = b;
+#ifdef CUDNN
+ if(net->layers[i].type == CONVOLUTIONAL){
+ cudnn_convolutional_setup(net->layers + i);
+ }
+ if(net->layers[i].type == DECONVOLUTIONAL){
+ layer *l = net->layers + i;
+ cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, l->out_h, l->out_w);
+ cudnnSetTensor4dDescriptor(l->normTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, 1, l->out_c, 1, 1);
+ }
+#endif
+ }
+}
+
+int resize_network(network *net, int w, int h)
+{
+#ifdef GPU
+ cuda_set_device(net->gpu_index);
+ cuda_free(net->workspace);
+#endif
+ int i;
+ //if(w == net->w && h == net->h) return 0;
+ net->w = w;
+ net->h = h;
+ int inputs = 0;
+ size_t workspace_size = 0;
+ //fprintf(stderr, "Resizing to %d x %d...\n", w, h);
+ //fflush(stderr);
+ for (i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ if(l.type == CONVOLUTIONAL){
+ resize_convolutional_layer(&l, w, h);
+ }else if(l.type == CROP){
+ resize_crop_layer(&l, w, h);
+ }else if(l.type == MAXPOOL){
+ resize_maxpool_layer(&l, w, h);
+ }else if(l.type == REGION){
+ resize_region_layer(&l, w, h);
+ }else if(l.type == YOLO){
+ resize_yolo_layer(&l, w, h);
+ }else if(l.type == ROUTE){
+ resize_route_layer(&l, net);
+ }else if(l.type == SHORTCUT){
+ resize_shortcut_layer(&l, w, h);
+ }else if(l.type == UPSAMPLE){
+ resize_upsample_layer(&l, w, h);
+ }else if(l.type == REORG){
+ resize_reorg_layer(&l, w, h);
+ }else if(l.type == AVGPOOL){
+ resize_avgpool_layer(&l, w, h);
+ }else if(l.type == NORMALIZATION){
+ resize_normalization_layer(&l, w, h);
+ }else if(l.type == COST){
+ resize_cost_layer(&l, inputs);
+ }else{
+ error("Cannot resize this type of layer");
+ }
+ if(l.workspace_size > workspace_size) workspace_size = l.workspace_size;
+ if(l.workspace_size > 2000000000) assert(0);
+ inputs = l.outputs;
+ net->layers[i] = l;
+ w = l.out_w;
+ h = l.out_h;
+ if(l.type == AVGPOOL) break;
+ }
+ layer out = get_network_output_layer(net);
+ net->inputs = net->layers[0].inputs;
+ net->outputs = out.outputs;
+ net->truths = out.outputs;
+ if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths;
+ net->output = out.output;
+ free(net->input);
+ free(net->truth);
+ net->input = calloc(net->inputs*net->batch, sizeof(float));
+ net->truth = calloc(net->truths*net->batch, sizeof(float));
+#ifdef GPU
+ if(gpu_index >= 0){
+ cuda_free(net->input_gpu);
+ cuda_free(net->truth_gpu);
+ net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch);
+ net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch);
+ if(workspace_size){
+ net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1);
+ }
+ }else {
+ free(net->workspace);
+ net->workspace = calloc(1, workspace_size);
+ }
+#else
+ free(net->workspace);
+ net->workspace = calloc(1, workspace_size);
+#endif
+ //fprintf(stderr, " Done!\n");
+ return 0;
+}
+
+layer get_network_detection_layer(network *net)
+{
+ int i;
+ for(i = 0; i < net->n; ++i){
+ if(net->layers[i].type == DETECTION){
+ return net->layers[i];
+ }
+ }
+ fprintf(stderr, "Detection layer not found!!\n");
+ layer l = {0};
+ return l;
+}
+
+image get_network_image_layer(network *net, int i)
+{
+ layer l = net->layers[i];
+#ifdef GPU
+ //cuda_pull_array(l.output_gpu, l.output, l.outputs);
+#endif
+ if (l.out_w && l.out_h && l.out_c){
+ return float_to_image(l.out_w, l.out_h, l.out_c, l.output);
+ }
+ image def = {0};
+ return def;
+}
+
+image get_network_image(network *net)
+{
+ int i;
+ for(i = net->n-1; i >= 0; --i){
+ image m = get_network_image_layer(net, i);
+ if(m.h != 0) return m;
+ }
+ image def = {0};
+ return def;
+}
+
+void visualize_network(network *net)
+{
+ image *prev = 0;
+ int i;
+ char buff[256];
+ for(i = 0; i < net->n; ++i){
+ sprintf(buff, "Layer %d", i);
+ layer l = net->layers[i];
+ if(l.type == CONVOLUTIONAL){
+ prev = visualize_convolutional_layer(l, buff, prev);
+ }
+ }
+}
+
+void top_predictions(network *net, int k, int *index)
+{
+ top_k(net->output, net->outputs, k, index);
+}
+
+
+float *network_predict(network *net, float *input)
+{
+ network orig = *net;
+ net->input = input;
+ net->truth = 0;
+ net->train = 0;
+ net->delta = 0;
+ forward_network(net);
+ float *out = net->output;
+ *net = orig;
+ return out;
+}
+
+int num_detections(network *net, float thresh)
+{
+ int i;
+ int s = 0;
+ for(i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ if(l.type == YOLO){
+ s += yolo_num_detections(l, thresh);
+ }
+ if(l.type == DETECTION || l.type == REGION){
+ s += l.w*l.h*l.n;
+ }
+ }
+ return s;
+}
+
+detection *make_network_boxes(network *net, float thresh, int *num)
+{
+ layer l = net->layers[net->n - 1];
+ int i;
+ int nboxes = num_detections(net, thresh);
+ if(num) *num = nboxes;
+ detection *dets = calloc(nboxes, sizeof(detection));
+ for(i = 0; i < nboxes; ++i){
+ dets[i].prob = calloc(l.classes, sizeof(float));
+ if(l.coords > 4){
+ dets[i].mask = calloc(l.coords-4, sizeof(float));
+ }
+ }
+ return dets;
+}
+
+void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
+{
+ int j;
+ for(j = 0; j < net->n; ++j){
+ layer l = net->layers[j];
+ if(l.type == YOLO){
+ int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
+ dets += count;
+ }
+ if(l.type == REGION){
+ get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
+ dets += l.w*l.h*l.n;
+ }
+ if(l.type == DETECTION){
+ get_detection_detections(l, w, h, thresh, dets);
+ dets += l.w*l.h*l.n;
+ }
+ }
+}
+
+detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
+{
+ detection *dets = make_network_boxes(net, thresh, num);
+ fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
+ return dets;
+}
+
+void free_detections(detection *dets, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ free(dets[i].prob);
+ if(dets[i].mask) free(dets[i].mask);
+ }
+ free(dets);
+}
+
+float *network_predict_image(network *net, image im)
+{
+ image imr = letterbox_image(im, net->w, net->h);
+ set_batch_network(net, 1);
+ float *p = network_predict(net, imr.data);
+ free_image(imr);
+ return p;
+}
+
+int network_width(network *net){return net->w;}
+int network_height(network *net){return net->h;}
+
+matrix network_predict_data_multi(network *net, data test, int n)
+{
+ int i,j,b,m;
+ int k = net->outputs;
+ matrix pred = make_matrix(test.X.rows, k);
+ float *X = calloc(net->batch*test.X.rows, sizeof(float));
+ for(i = 0; i < test.X.rows; i += net->batch){
+ for(b = 0; b < net->batch; ++b){
+ if(i+b == test.X.rows) break;
+ memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
+ }
+ for(m = 0; m < n; ++m){
+ float *out = network_predict(net, X);
+ for(b = 0; b < net->batch; ++b){
+ if(i+b == test.X.rows) break;
+ for(j = 0; j < k; ++j){
+ pred.vals[i+b][j] += out[j+b*k]/n;
+ }
+ }
+ }
+ }
+ free(X);
+ return pred;
+}
+
+matrix network_predict_data(network *net, data test)
+{
+ int i,j,b;
+ int k = net->outputs;
+ matrix pred = make_matrix(test.X.rows, k);
+ float *X = calloc(net->batch*test.X.cols, sizeof(float));
+ for(i = 0; i < test.X.rows; i += net->batch){
+ for(b = 0; b < net->batch; ++b){
+ if(i+b == test.X.rows) break;
+ memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
+ }
+ float *out = network_predict(net, X);
+ for(b = 0; b < net->batch; ++b){
+ if(i+b == test.X.rows) break;
+ for(j = 0; j < k; ++j){
+ pred.vals[i+b][j] = out[j+b*k];
+ }
+ }
+ }
+ free(X);
+ return pred;
+}
+
+void print_network(network *net)
+{
+ int i,j;
+ for(i = 0; i < net->n; ++i){
+ layer l = net->layers[i];
+ float *output = l.output;
+ int n = l.outputs;
+ float mean = mean_array(output, n);
+ float vari = variance_array(output, n);
+ fprintf(stderr, "Layer %d - Mean: %f, Variance: %f\n",i,mean, vari);
+ if(n > 100) n = 100;
+ for(j = 0; j < n; ++j) fprintf(stderr, "%f, ", output[j]);
+ if(n == 100)fprintf(stderr,".....\n");
+ fprintf(stderr, "\n");
+ }
+}
+
+void compare_networks(network *n1, network *n2, data test)
+{
+ matrix g1 = network_predict_data(n1, test);
+ matrix g2 = network_predict_data(n2, test);
+ int i;
+ int a,b,c,d;
+ a = b = c = d = 0;
+ for(i = 0; i < g1.rows; ++i){
+ int truth = max_index(test.y.vals[i], test.y.cols);
+ int p1 = max_index(g1.vals[i], g1.cols);
+ int p2 = max_index(g2.vals[i], g2.cols);
+ if(p1 == truth){
+ if(p2 == truth) ++d;
+ else ++c;
+ }else{
+ if(p2 == truth) ++b;
+ else ++a;
+ }
+ }
+ printf("%5d %5d\n%5d %5d\n", a, b, c, d);
+ float num = pow((abs(b - c) - 1.), 2.);
+ float den = b + c;
+ printf("%f\n", num/den);
+}
+
+float network_accuracy(network *net, data d)
+{
+ matrix guess = network_predict_data(net, d);
+ float acc = matrix_topk_accuracy(d.y, guess,1);
+ free_matrix(guess);
+ return acc;
+}
+
+float *network_accuracies(network *net, data d, int n)
+{
+ static float acc[2];
+ matrix guess = network_predict_data(net, d);
+ acc[0] = matrix_topk_accuracy(d.y, guess, 1);
+ acc[1] = matrix_topk_accuracy(d.y, guess, n);
+ free_matrix(guess);
+ return acc;
+}
+
+layer get_network_output_layer(network *net)
+{
+ int i;
+ for(i = net->n - 1; i >= 0; --i){
+ if(net->layers[i].type != COST) break;
+ }
+ return net->layers[i];
+}
+
+float network_accuracy_multi(network *net, data d, int n)
+{
+ matrix guess = network_predict_data_multi(net, d, n);
+ float acc = matrix_topk_accuracy(d.y, guess,1);
+ free_matrix(guess);
+ return acc;
+}
+
+void free_network(network *net)
+{
+ int i;
+ for(i = 0; i < net->n; ++i){
+ free_layer(net->layers[i]);
+ }
+ free(net->layers);
+ if(net->input) free(net->input);
+ if(net->truth) free(net->truth);
+#ifdef GPU
+ if(net->input_gpu) cuda_free(net->input_gpu);
+ if(net->truth_gpu) cuda_free(net->truth_gpu);
+#endif
+ free(net);
+}
+
+// Some day...
+// ^ What the hell is this comment for?
+
+
+layer network_output_layer(network *net)
+{
+ int i;
+ for(i = net->n - 1; i >= 0; --i){
+ if(net->layers[i].type != COST) break;
+ }
+ return net->layers[i];
+}
+
+int network_inputs(network *net)
+{
+ return net->layers[0].inputs;
+}
+
+int network_outputs(network *net)
+{
+ return network_output_layer(net).outputs;
+}
+
+float *network_output(network *net)
+{
+ return network_output_layer(net).output;
+}
+
+#ifdef GPU
+
+void forward_network_gpu(network *netp)
+{
+ network net = *netp;
+ cuda_set_device(net.gpu_index);
+ cuda_push_array(net.input_gpu, net.input, net.inputs*net.batch);
+ if(net.truth){
+ cuda_push_array(net.truth_gpu, net.truth, net.truths*net.batch);
+ }
+
+ int i;
+ for(i = 0; i < net.n; ++i){
+ net.index = i;
+ layer l = net.layers[i];
+ if(l.delta_gpu){
+ fill_gpu(l.outputs * l.batch, 0, l.delta_gpu, 1);
+ }
+ l.forward_gpu(l, net);
+ net.input_gpu = l.output_gpu;
+ net.input = l.output;
+ if(l.truth) {
+ net.truth_gpu = l.output_gpu;
+ net.truth = l.output;
+ }
+ }
+ pull_network_output(netp);
+ calc_network_cost(netp);
+}
+
+void backward_network_gpu(network *netp)
+{
+ int i;
+ network net = *netp;
+ network orig = net;
+ cuda_set_device(net.gpu_index);
+ for(i = net.n-1; i >= 0; --i){
+ layer l = net.layers[i];
+ if(l.stopbackward) break;
+ if(i == 0){
+ net = orig;
+ }else{
+ layer prev = net.layers[i-1];
+ net.input = prev.output;
+ net.delta = prev.delta;
+ net.input_gpu = prev.output_gpu;
+ net.delta_gpu = prev.delta_gpu;
+ }
+ net.index = i;
+ l.backward_gpu(l, net);
+ }
+}
+
+void update_network_gpu(network *netp)
+{
+ network net = *netp;
+ cuda_set_device(net.gpu_index);
+ int i;
+ update_args a = {0};
+ a.batch = net.batch*net.subdivisions;
+ a.learning_rate = get_current_rate(netp);
+ a.momentum = net.momentum;
+ a.decay = net.decay;
+ a.adam = net.adam;
+ a.B1 = net.B1;
+ a.B2 = net.B2;
+ a.eps = net.eps;
+ ++*net.t;
+ a.t = (*net.t);
+
+ for(i = 0; i < net.n; ++i){
+ layer l = net.layers[i];
+ if(l.update_gpu){
+ l.update_gpu(l, a);
+ }
+ }
+}
+
+void harmless_update_network_gpu(network *netp)
+{
+ network net = *netp;
+ cuda_set_device(net.gpu_index);
+ int i;
+ for(i = 0; i < net.n; ++i){
+ layer l = net.layers[i];
+ if(l.weight_updates_gpu) fill_gpu(l.nweights, 0, l.weight_updates_gpu, 1);
+ if(l.bias_updates_gpu) fill_gpu(l.nbiases, 0, l.bias_updates_gpu, 1);
+ if(l.scale_updates_gpu) fill_gpu(l.nbiases, 0, l.scale_updates_gpu, 1);
+ }
+}
+
+typedef struct {
+ network *net;
+ data d;
+ float *err;
+} train_args;
+
+void *train_thread(void *ptr)
+{
+ train_args args = *(train_args*)ptr;
+ free(ptr);
+ cuda_set_device(args.net->gpu_index);
+ *args.err = train_network(args.net, args.d);
+ return 0;
+}
+
+pthread_t train_network_in_thread(network *net, data d, float *err)
+{
+ pthread_t thread;
+ train_args *ptr = (train_args *)calloc(1, sizeof(train_args));
+ ptr->net = net;
+ ptr->d = d;
+ ptr->err = err;
+ if(pthread_create(&thread, 0, train_thread, ptr)) error("Thread creation failed");
+ return thread;
+}
+
+void merge_weights(layer l, layer base)
+{
+ if (l.type == CONVOLUTIONAL) {
+ axpy_cpu(l.n, 1, l.bias_updates, 1, base.biases, 1);
+ axpy_cpu(l.nweights, 1, l.weight_updates, 1, base.weights, 1);
+ if (l.scales) {
+ axpy_cpu(l.n, 1, l.scale_updates, 1, base.scales, 1);
+ }
+ } else if(l.type == CONNECTED) {
+ axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.biases, 1);
+ axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weights, 1);
+ }
+}
+
+void scale_weights(layer l, float s)
+{
+ if (l.type == CONVOLUTIONAL) {
+ scal_cpu(l.n, s, l.biases, 1);
+ scal_cpu(l.nweights, s, l.weights, 1);
+ if (l.scales) {
+ scal_cpu(l.n, s, l.scales, 1);
+ }
+ } else if(l.type == CONNECTED) {
+ scal_cpu(l.outputs, s, l.biases, 1);
+ scal_cpu(l.outputs*l.inputs, s, l.weights, 1);
+ }
+}
+
+
+void pull_weights(layer l)
+{
+ if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
+ cuda_pull_array(l.biases_gpu, l.bias_updates, l.n);
+ cuda_pull_array(l.weights_gpu, l.weight_updates, l.nweights);
+ if(l.scales) cuda_pull_array(l.scales_gpu, l.scale_updates, l.n);
+ } else if(l.type == CONNECTED){
+ cuda_pull_array(l.biases_gpu, l.bias_updates, l.outputs);
+ cuda_pull_array(l.weights_gpu, l.weight_updates, l.outputs*l.inputs);
+ }
+}
+
+void push_weights(layer l)
+{
+ if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
+ cuda_push_array(l.biases_gpu, l.biases, l.n);
+ cuda_push_array(l.weights_gpu, l.weights, l.nweights);
+ if(l.scales) cuda_push_array(l.scales_gpu, l.scales, l.n);
+ } else if(l.type == CONNECTED){
+ cuda_push_array(l.biases_gpu, l.biases, l.outputs);
+ cuda_push_array(l.weights_gpu, l.weights, l.outputs*l.inputs);
+ }
+}
+
+void distribute_weights(layer l, layer base)
+{
+ if (l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL) {
+ cuda_push_array(l.biases_gpu, base.biases, l.n);
+ cuda_push_array(l.weights_gpu, base.weights, l.nweights);
+ if (base.scales) cuda_push_array(l.scales_gpu, base.scales, l.n);
+ } else if (l.type == CONNECTED) {
+ cuda_push_array(l.biases_gpu, base.biases, l.outputs);
+ cuda_push_array(l.weights_gpu, base.weights, l.outputs*l.inputs);
+ }
+}
+
+
+/*
+
+ void pull_updates(layer l)
+ {
+ if(l.type == CONVOLUTIONAL){
+ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
+ if(l.scale_updates) cuda_pull_array(l.scale_updates_gpu, l.scale_updates, l.n);
+ } else if(l.type == CONNECTED){
+ cuda_pull_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+ cuda_pull_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
+ }
+ }
+
+ void push_updates(layer l)
+ {
+ if(l.type == CONVOLUTIONAL){
+ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.n);
+ cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.nweights);
+ if(l.scale_updates) cuda_push_array(l.scale_updates_gpu, l.scale_updates, l.n);
+ } else if(l.type == CONNECTED){
+ cuda_push_array(l.bias_updates_gpu, l.bias_updates, l.outputs);
+ cuda_push_array(l.weight_updates_gpu, l.weight_updates, l.outputs*l.inputs);
+ }
+ }
+
+ void update_layer(layer l, network net)
+ {
+ int update_batch = net.batch*net.subdivisions;
+ float rate = get_current_rate(net);
+ l.t = get_current_batch(net);
+ if(l.update_gpu){
+ l.update_gpu(l, update_batch, rate*l.learning_rate_scale, net.momentum, net.decay);
+ }
+ }
+ void merge_updates(layer l, layer base)
+ {
+ if (l.type == CONVOLUTIONAL) {
+ axpy_cpu(l.n, 1, l.bias_updates, 1, base.bias_updates, 1);
+ axpy_cpu(l.nweights, 1, l.weight_updates, 1, base.weight_updates, 1);
+ if (l.scale_updates) {
+ axpy_cpu(l.n, 1, l.scale_updates, 1, base.scale_updates, 1);
+ }
+ } else if(l.type == CONNECTED) {
+ axpy_cpu(l.outputs, 1, l.bias_updates, 1, base.bias_updates, 1);
+ axpy_cpu(l.outputs*l.inputs, 1, l.weight_updates, 1, base.weight_updates, 1);
+ }
+ }
+
+ void distribute_updates(layer l, layer base)
+ {
+ if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
+ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.n);
+ cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.nweights);
+ if(base.scale_updates) cuda_push_array(l.scale_updates_gpu, base.scale_updates, l.n);
+ } else if(l.type == CONNECTED){
+ cuda_push_array(l.bias_updates_gpu, base.bias_updates, l.outputs);
+ cuda_push_array(l.weight_updates_gpu, base.weight_updates, l.outputs*l.inputs);
+ }
+ }
+ */
+
+/*
+ void sync_layer(network *nets, int n, int j)
+ {
+ int i;
+ network net = nets[0];
+ layer base = net.layers[j];
+ scale_weights(base, 0);
+ for (i = 0; i < n; ++i) {
+ cuda_set_device(nets[i].gpu_index);
+ layer l = nets[i].layers[j];
+ pull_weights(l);
+ merge_weights(l, base);
+ }
+ scale_weights(base, 1./n);
+ for (i = 0; i < n; ++i) {
+ cuda_set_device(nets[i].gpu_index);
+ layer l = nets[i].layers[j];
+ distribute_weights(l, base);
+ }
+ }
+ */
+
+void sync_layer(network **nets, int n, int j)
+{
+ int i;
+ network *net = nets[0];
+ layer base = net->layers[j];
+ scale_weights(base, 0);
+ for (i = 0; i < n; ++i) {
+ cuda_set_device(nets[i]->gpu_index);
+ layer l = nets[i]->layers[j];
+ pull_weights(l);
+ merge_weights(l, base);
+ }
+ scale_weights(base, 1./n);
+ for (i = 0; i < n; ++i) {
+ cuda_set_device(nets[i]->gpu_index);
+ layer l = nets[i]->layers[j];
+ distribute_weights(l, base);
+ }
+}
+
+typedef struct{
+ network **nets;
+ int n;
+ int j;
+} sync_args;
+
+void *sync_layer_thread(void *ptr)
+{
+ sync_args args = *(sync_args*)ptr;
+ sync_layer(args.nets, args.n, args.j);
+ free(ptr);
+ return 0;
+}
+
+pthread_t sync_layer_in_thread(network **nets, int n, int j)
+{
+ pthread_t thread;
+ sync_args *ptr = (sync_args *)calloc(1, sizeof(sync_args));
+ ptr->nets = nets;
+ ptr->n = n;
+ ptr->j = j;
+ if(pthread_create(&thread, 0, sync_layer_thread, ptr)) error("Thread creation failed");
+ return thread;
+}
+
+void sync_nets(network **nets, int n, int interval)
+{
+ int j;
+ int layers = nets[0]->n;
+ pthread_t *threads = (pthread_t *) calloc(layers, sizeof(pthread_t));
+
+ *(nets[0]->seen) += interval * (n-1) * nets[0]->batch * nets[0]->subdivisions;
+ for (j = 0; j < n; ++j){
+ *(nets[j]->seen) = *(nets[0]->seen);
+ }
+ for (j = 0; j < layers; ++j) {
+ threads[j] = sync_layer_in_thread(nets, n, j);
+ }
+ for (j = 0; j < layers; ++j) {
+ pthread_join(threads[j], 0);
+ }
+ free(threads);
+}
+
+float train_networks(network **nets, int n, data d, int interval)
+{
+ int i;
+ int batch = nets[0]->batch;
+ int subdivisions = nets[0]->subdivisions;
+ assert(batch * subdivisions * n == d.X.rows);
+ pthread_t *threads = (pthread_t *) calloc(n, sizeof(pthread_t));
+ float *errors = (float *) calloc(n, sizeof(float));
+
+ float sum = 0;
+ for(i = 0; i < n; ++i){
+ data p = get_data_part(d, i, n);
+ threads[i] = train_network_in_thread(nets[i], p, errors + i);
+ }
+ for(i = 0; i < n; ++i){
+ pthread_join(threads[i], 0);
+ //printf("%f\n", errors[i]);
+ sum += errors[i];
+ }
+ //cudaDeviceSynchronize();
+ if (get_current_batch(nets[0]) % interval == 0) {
+ printf("Syncing... ");
+ fflush(stdout);
+ sync_nets(nets, n, interval);
+ printf("Done!\n");
+ }
+ //cudaDeviceSynchronize();
+ free(threads);
+ free(errors);
+ return (float)sum/(n);
+}
+
+void pull_network_output(network *net)
+{
+ layer l = get_network_output_layer(net);
+ cuda_pull_array(l.output_gpu, l.output, l.outputs*l.batch);
+}
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.h
new file mode 100644
index 00000000000..1b0dfd1aaa3
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/network.h
@@ -0,0 +1,29 @@
+// Oh boy, why am I about to do this....
+#ifndef NETWORK_H
+#define NETWORK_H
+#include "darknet.h"
+
+#include "image.h"
+#include "layer.h"
+#include "data.h"
+#include "tree.h"
+
+
+#ifdef GPU
+void pull_network_output(network *net);
+#endif
+
+void compare_networks(network *n1, network *n2, data d);
+char *get_layer_string(LAYER_TYPE a);
+
+network *make_network(int n);
+
+
+float network_accuracy_multi(network *net, data d, int n);
+int get_predicted_class_network(network *net);
+void print_network(network *net);
+int resize_network(network *net, int w, int h);
+void calc_network_cost(network *net);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.c
new file mode 100644
index 00000000000..424714fe865
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.c
@@ -0,0 +1,151 @@
+#include "normalization_layer.h"
+#include "blas.h"
+
+#include
+
+layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa)
+{
+ fprintf(stderr, "Local Response Normalization Layer: %d x %d x %d image, %d size\n", w,h,c,size);
+ layer layer = {0};
+ layer.type = NORMALIZATION;
+ layer.batch = batch;
+ layer.h = layer.out_h = h;
+ layer.w = layer.out_w = w;
+ layer.c = layer.out_c = c;
+ layer.kappa = kappa;
+ layer.size = size;
+ layer.alpha = alpha;
+ layer.beta = beta;
+ layer.output = calloc(h * w * c * batch, sizeof(float));
+ layer.delta = calloc(h * w * c * batch, sizeof(float));
+ layer.squared = calloc(h * w * c * batch, sizeof(float));
+ layer.norms = calloc(h * w * c * batch, sizeof(float));
+ layer.inputs = w*h*c;
+ layer.outputs = layer.inputs;
+
+ layer.forward = forward_normalization_layer;
+ layer.backward = backward_normalization_layer;
+ #ifdef GPU
+ layer.forward_gpu = forward_normalization_layer_gpu;
+ layer.backward_gpu = backward_normalization_layer_gpu;
+
+ layer.output_gpu = cuda_make_array(layer.output, h * w * c * batch);
+ layer.delta_gpu = cuda_make_array(layer.delta, h * w * c * batch);
+ layer.squared_gpu = cuda_make_array(layer.squared, h * w * c * batch);
+ layer.norms_gpu = cuda_make_array(layer.norms, h * w * c * batch);
+ #endif
+ return layer;
+}
+
+void resize_normalization_layer(layer *layer, int w, int h)
+{
+ int c = layer->c;
+ int batch = layer->batch;
+ layer->h = h;
+ layer->w = w;
+ layer->out_h = h;
+ layer->out_w = w;
+ layer->inputs = w*h*c;
+ layer->outputs = layer->inputs;
+ layer->output = realloc(layer->output, h * w * c * batch * sizeof(float));
+ layer->delta = realloc(layer->delta, h * w * c * batch * sizeof(float));
+ layer->squared = realloc(layer->squared, h * w * c * batch * sizeof(float));
+ layer->norms = realloc(layer->norms, h * w * c * batch * sizeof(float));
+#ifdef GPU
+ cuda_free(layer->output_gpu);
+ cuda_free(layer->delta_gpu);
+ cuda_free(layer->squared_gpu);
+ cuda_free(layer->norms_gpu);
+ layer->output_gpu = cuda_make_array(layer->output, h * w * c * batch);
+ layer->delta_gpu = cuda_make_array(layer->delta, h * w * c * batch);
+ layer->squared_gpu = cuda_make_array(layer->squared, h * w * c * batch);
+ layer->norms_gpu = cuda_make_array(layer->norms, h * w * c * batch);
+#endif
+}
+
+void forward_normalization_layer(const layer layer, network net)
+{
+ int k,b;
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ scal_cpu(w*h*c*layer.batch, 0, layer.squared, 1);
+
+ for(b = 0; b < layer.batch; ++b){
+ float *squared = layer.squared + w*h*c*b;
+ float *norms = layer.norms + w*h*c*b;
+ float *input = net.input + w*h*c*b;
+ pow_cpu(w*h*c, 2, input, 1, squared, 1);
+
+ const_cpu(w*h, layer.kappa, norms, 1);
+ for(k = 0; k < layer.size/2; ++k){
+ axpy_cpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1);
+ }
+
+ for(k = 1; k < layer.c; ++k){
+ copy_cpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1);
+ int prev = k - ((layer.size-1)/2) - 1;
+ int next = k + (layer.size/2);
+ if(prev >= 0) axpy_cpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1);
+ if(next < layer.c) axpy_cpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1);
+ }
+ }
+ pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, layer.output, 1);
+ mul_cpu(w*h*c*layer.batch, net.input, 1, layer.output, 1);
+}
+
+void backward_normalization_layer(const layer layer, network net)
+{
+ // TODO This is approximate ;-)
+ // Also this should add in to delta instead of overwritting.
+
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ pow_cpu(w*h*c*layer.batch, -layer.beta, layer.norms, 1, net.delta, 1);
+ mul_cpu(w*h*c*layer.batch, layer.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+void forward_normalization_layer_gpu(const layer layer, network net)
+{
+ int k,b;
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ scal_gpu(w*h*c*layer.batch, 0, layer.squared_gpu, 1);
+
+ for(b = 0; b < layer.batch; ++b){
+ float *squared = layer.squared_gpu + w*h*c*b;
+ float *norms = layer.norms_gpu + w*h*c*b;
+ float *input = net.input_gpu + w*h*c*b;
+ pow_gpu(w*h*c, 2, input, 1, squared, 1);
+
+ const_gpu(w*h, layer.kappa, norms, 1);
+ for(k = 0; k < layer.size/2; ++k){
+ axpy_gpu(w*h, layer.alpha, squared + w*h*k, 1, norms, 1);
+ }
+
+ for(k = 1; k < layer.c; ++k){
+ copy_gpu(w*h, norms + w*h*(k-1), 1, norms + w*h*k, 1);
+ int prev = k - ((layer.size-1)/2) - 1;
+ int next = k + (layer.size/2);
+ if(prev >= 0) axpy_gpu(w*h, -layer.alpha, squared + w*h*prev, 1, norms + w*h*k, 1);
+ if(next < layer.c) axpy_gpu(w*h, layer.alpha, squared + w*h*next, 1, norms + w*h*k, 1);
+ }
+ }
+ pow_gpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, layer.output_gpu, 1);
+ mul_gpu(w*h*c*layer.batch, net.input_gpu, 1, layer.output_gpu, 1);
+}
+
+void backward_normalization_layer_gpu(const layer layer, network net)
+{
+ // TODO This is approximate ;-)
+
+ int w = layer.w;
+ int h = layer.h;
+ int c = layer.c;
+ pow_gpu(w*h*c*layer.batch, -layer.beta, layer.norms_gpu, 1, net.delta_gpu, 1);
+ mul_gpu(w*h*c*layer.batch, layer.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.h
new file mode 100644
index 00000000000..665baa50662
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/normalization_layer.h
@@ -0,0 +1,19 @@
+#ifndef NORMALIZATION_LAYER_H
+#define NORMALIZATION_LAYER_H
+
+#include "image.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_normalization_layer(int batch, int w, int h, int c, int size, float alpha, float beta, float kappa);
+void resize_normalization_layer(layer *layer, int h, int w);
+void forward_normalization_layer(const layer layer, network net);
+void backward_normalization_layer(const layer layer, network net);
+void visualize_normalization_layer(layer layer, char *window);
+
+#ifdef GPU
+void forward_normalization_layer_gpu(const layer layer, network net);
+void backward_normalization_layer_gpu(const layer layer, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.c
new file mode 100644
index 00000000000..2f52781f809
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.c
@@ -0,0 +1,140 @@
+#include
+#include
+#include
+#include "option_list.h"
+#include "utils.h"
+
+list *read_data_cfg(char *filename)
+{
+ FILE *file = fopen(filename, "r");
+ if(file == 0) file_error(filename);
+ char *line;
+ int nu = 0;
+ list *options = make_list();
+ while((line=fgetl(file)) != 0){
+ ++ nu;
+ strip(line);
+ switch(line[0]){
+ case '\0':
+ case '#':
+ case ';':
+ free(line);
+ break;
+ default:
+ if(!read_option(line, options)){
+ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
+ free(line);
+ }
+ break;
+ }
+ }
+ fclose(file);
+ return options;
+}
+
+metadata get_metadata(char *file)
+{
+ metadata m = {0};
+ list *options = read_data_cfg(file);
+
+ char *name_list = option_find_str(options, "names", 0);
+ if(!name_list) name_list = option_find_str(options, "labels", 0);
+ if(!name_list) {
+ fprintf(stderr, "No names or labels found\n");
+ } else {
+ m.names = get_labels(name_list);
+ }
+ m.classes = option_find_int(options, "classes", 2);
+ free_list(options);
+ return m;
+}
+
+int read_option(char *s, list *options)
+{
+ size_t i;
+ size_t len = strlen(s);
+ char *val = 0;
+ for(i = 0; i < len; ++i){
+ if(s[i] == '='){
+ s[i] = '\0';
+ val = s+i+1;
+ break;
+ }
+ }
+ if(i == len-1) return 0;
+ char *key = s;
+ option_insert(options, key, val);
+ return 1;
+}
+
+void option_insert(list *l, char *key, char *val)
+{
+ kvp *p = malloc(sizeof(kvp));
+ p->key = key;
+ p->val = val;
+ p->used = 0;
+ list_insert(l, p);
+}
+
+void option_unused(list *l)
+{
+ node *n = l->front;
+ while(n){
+ kvp *p = (kvp *)n->val;
+ if(!p->used){
+ fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
+ }
+ n = n->next;
+ }
+}
+
+char *option_find(list *l, char *key)
+{
+ node *n = l->front;
+ while(n){
+ kvp *p = (kvp *)n->val;
+ if(strcmp(p->key, key) == 0){
+ p->used = 1;
+ return p->val;
+ }
+ n = n->next;
+ }
+ return 0;
+}
+char *option_find_str(list *l, char *key, char *def)
+{
+ char *v = option_find(l, key);
+ if(v) return v;
+ if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
+ return def;
+}
+
+int option_find_int(list *l, char *key, int def)
+{
+ char *v = option_find(l, key);
+ if(v) return atoi(v);
+ fprintf(stderr, "%s: Using default '%d'\n", key, def);
+ return def;
+}
+
+int option_find_int_quiet(list *l, char *key, int def)
+{
+ char *v = option_find(l, key);
+ if(v) return atoi(v);
+ return def;
+}
+
+float option_find_float_quiet(list *l, char *key, float def)
+{
+ char *v = option_find(l, key);
+ if(v) return atof(v);
+ return def;
+}
+
+float option_find_float(list *l, char *key, float def)
+{
+ char *v = option_find(l, key);
+ if(v) return atof(v);
+ fprintf(stderr, "%s: Using default '%lf'\n", key, def);
+ return def;
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.h
new file mode 100644
index 00000000000..844bd8724b7
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/option_list.h
@@ -0,0 +1,19 @@
+#ifndef OPTION_LIST_H
+#define OPTION_LIST_H
+#include "list.h"
+
+typedef struct{
+ char *key;
+ char *val;
+ int used;
+} kvp;
+
+
+int read_option(char *s, list *options);
+void option_insert(list *l, char *key, char *val);
+char *option_find(list *l, char *key);
+float option_find_float(list *l, char *key, float def);
+float option_find_float_quiet(list *l, char *key, float def);
+void option_unused(list *l);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.c
new file mode 100644
index 00000000000..8b43f3c540d
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.c
@@ -0,0 +1,1296 @@
+#include
+#include
+#include
+#include
+
+#include "activation_layer.h"
+#include "logistic_layer.h"
+#include "l2norm_layer.h"
+#include "activations.h"
+#include "avgpool_layer.h"
+#include "batchnorm_layer.h"
+#include "blas.h"
+#include "connected_layer.h"
+#include "deconvolutional_layer.h"
+#include "convolutional_layer.h"
+#include "cost_layer.h"
+#include "crnn_layer.h"
+#include "crop_layer.h"
+#include "detection_layer.h"
+#include "dropout_layer.h"
+#include "gru_layer.h"
+#include "list.h"
+#include "local_layer.h"
+#include "maxpool_layer.h"
+#include "normalization_layer.h"
+#include "option_list.h"
+#include "parser.h"
+#include "region_layer.h"
+#include "yolo_layer.h"
+#include "reorg_layer.h"
+#include "rnn_layer.h"
+#include "route_layer.h"
+#include "upsample_layer.h"
+#include "shortcut_layer.h"
+#include "softmax_layer.h"
+#include "lstm_layer.h"
+#include "utils.h"
+
+typedef struct{
+ char *type;
+ list *options;
+}section;
+
+list *read_cfg(char *filename);
+
+LAYER_TYPE string_to_layer_type(char * type)
+{
+
+ if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
+ if (strcmp(type, "[crop]")==0) return CROP;
+ if (strcmp(type, "[cost]")==0) return COST;
+ if (strcmp(type, "[detection]")==0) return DETECTION;
+ if (strcmp(type, "[region]")==0) return REGION;
+ if (strcmp(type, "[yolo]")==0) return YOLO;
+ if (strcmp(type, "[local]")==0) return LOCAL;
+ if (strcmp(type, "[conv]")==0
+ || strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
+ if (strcmp(type, "[deconv]")==0
+ || strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
+ if (strcmp(type, "[activation]")==0) return ACTIVE;
+ if (strcmp(type, "[logistic]")==0) return LOGXENT;
+ if (strcmp(type, "[l2norm]")==0) return L2NORM;
+ if (strcmp(type, "[net]")==0
+ || strcmp(type, "[network]")==0) return NETWORK;
+ if (strcmp(type, "[crnn]")==0) return CRNN;
+ if (strcmp(type, "[gru]")==0) return GRU;
+ if (strcmp(type, "[lstm]") == 0) return LSTM;
+ if (strcmp(type, "[rnn]")==0) return RNN;
+ if (strcmp(type, "[conn]")==0
+ || strcmp(type, "[connected]")==0) return CONNECTED;
+ if (strcmp(type, "[max]")==0
+ || strcmp(type, "[maxpool]")==0) return MAXPOOL;
+ if (strcmp(type, "[reorg]")==0) return REORG;
+ if (strcmp(type, "[avg]")==0
+ || strcmp(type, "[avgpool]")==0) return AVGPOOL;
+ if (strcmp(type, "[dropout]")==0) return DROPOUT;
+ if (strcmp(type, "[lrn]")==0
+ || strcmp(type, "[normalization]")==0) return NORMALIZATION;
+ if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
+ if (strcmp(type, "[soft]")==0
+ || strcmp(type, "[softmax]")==0) return SOFTMAX;
+ if (strcmp(type, "[route]")==0) return ROUTE;
+ if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
+ return BLANK;
+}
+
+void free_section(section *s)
+{
+ free(s->type);
+ node *n = s->options->front;
+ while(n){
+ kvp *pair = (kvp *)n->val;
+ free(pair->key);
+ free(pair);
+ node *next = n->next;
+ free(n);
+ n = next;
+ }
+ free(s->options);
+ free(s);
+}
+
+void parse_data(char *data, float *a, int n)
+{
+ int i;
+ if(!data) return;
+ char *curr = data;
+ char *next = data;
+ int done = 0;
+ for(i = 0; i < n && !done; ++i){
+ while(*++next !='\0' && *next != ',');
+ if(*next == '\0') done = 1;
+ *next = '\0';
+ sscanf(curr, "%g", &a[i]);
+ curr = next+1;
+ }
+}
+
+typedef struct size_params{
+ int batch;
+ int inputs;
+ int h;
+ int w;
+ int c;
+ int index;
+ int time_steps;
+ network *net;
+} size_params;
+
+local_layer parse_local(list *options, size_params params)
+{
+ int n = option_find_int(options, "filters",1);
+ int size = option_find_int(options, "size",1);
+ int stride = option_find_int(options, "stride",1);
+ int pad = option_find_int(options, "pad",0);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before local layer must output image.");
+
+ local_layer layer = make_local_layer(batch,h,w,c,n,size,stride,pad,activation);
+
+ return layer;
+}
+
+layer parse_deconvolutional(list *options, size_params params)
+{
+ int n = option_find_int(options, "filters",1);
+ int size = option_find_int(options, "size",1);
+ int stride = option_find_int(options, "stride",1);
+
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before deconvolutional layer must output image.");
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+ int pad = option_find_int_quiet(options, "pad",0);
+ int padding = option_find_int_quiet(options, "padding",0);
+ if(pad) padding = size/2;
+
+ layer l = make_deconvolutional_layer(batch,h,w,c,n,size,stride,padding, activation, batch_normalize, params.net->adam);
+
+ return l;
+}
+
+
+convolutional_layer parse_convolutional(list *options, size_params params)
+{
+ int n = option_find_int(options, "filters",1);
+ int size = option_find_int(options, "size",1);
+ int stride = option_find_int(options, "stride",1);
+ int pad = option_find_int_quiet(options, "pad",0);
+ int padding = option_find_int_quiet(options, "padding",0);
+ int groups = option_find_int_quiet(options, "groups", 1);
+ if(pad) padding = size/2;
+
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before convolutional layer must output image.");
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+ int binary = option_find_int_quiet(options, "binary", 0);
+ int xnor = option_find_int_quiet(options, "xnor", 0);
+
+ convolutional_layer layer = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
+ layer.flipped = option_find_int_quiet(options, "flipped", 0);
+ layer.dot = option_find_float_quiet(options, "dot", 0);
+
+ return layer;
+}
+
+layer parse_crnn(list *options, size_params params)
+{
+ int output_filters = option_find_int(options, "output_filters",1);
+ int hidden_filters = option_find_int(options, "hidden_filters",1);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_crnn_layer(params.batch, params.w, params.h, params.c, hidden_filters, output_filters, params.time_steps, activation, batch_normalize);
+
+ l.shortcut = option_find_int_quiet(options, "shortcut", 0);
+
+ return l;
+}
+
+layer parse_rnn(list *options, size_params params)
+{
+ int output = option_find_int(options, "output",1);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_rnn_layer(params.batch, params.inputs, output, params.time_steps, activation, batch_normalize, params.net->adam);
+
+ l.shortcut = option_find_int_quiet(options, "shortcut", 0);
+
+ return l;
+}
+
+layer parse_gru(list *options, size_params params)
+{
+ int output = option_find_int(options, "output",1);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_gru_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
+ l.tanh = option_find_int_quiet(options, "tanh", 0);
+
+ return l;
+}
+
+layer parse_lstm(list *options, size_params params)
+{
+ int output = option_find_int(options, "output", 1);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_lstm_layer(params.batch, params.inputs, output, params.time_steps, batch_normalize, params.net->adam);
+
+ return l;
+}
+
+layer parse_connected(list *options, size_params params)
+{
+ int output = option_find_int(options, "output",1);
+ char *activation_s = option_find_str(options, "activation", "logistic");
+ ACTIVATION activation = get_activation(activation_s);
+ int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
+
+ layer l = make_connected_layer(params.batch, params.inputs, output, activation, batch_normalize, params.net->adam);
+ return l;
+}
+
+softmax_layer parse_softmax(list *options, size_params params)
+{
+ int groups = option_find_int_quiet(options, "groups",1);
+ softmax_layer layer = make_softmax_layer(params.batch, params.inputs, groups);
+ layer.temperature = option_find_float_quiet(options, "temperature", 1);
+ char *tree_file = option_find_str(options, "tree", 0);
+ if (tree_file) layer.softmax_tree = read_tree(tree_file);
+ layer.w = params.w;
+ layer.h = params.h;
+ layer.c = params.c;
+ layer.spatial = option_find_float_quiet(options, "spatial", 0);
+ return layer;
+}
+
+int *parse_yolo_mask(char *a, int *num)
+{
+ int *mask = 0;
+ if(a){
+ int len = strlen(a);
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (a[i] == ',') ++n;
+ }
+ mask = calloc(n, sizeof(int));
+ for(i = 0; i < n; ++i){
+ int val = atoi(a);
+ mask[i] = val;
+ a = strchr(a, ',')+1;
+ }
+ *num = n;
+ }
+ return mask;
+}
+
+layer parse_yolo(list *options, size_params params)
+{
+ int classes = option_find_int(options, "classes", 20);
+ int total = option_find_int(options, "num", 1);
+ int num = total;
+
+ char *a = option_find_str(options, "mask", 0);
+ int *mask = parse_yolo_mask(a, &num);
+ layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
+ assert(l.outputs == params.inputs);
+
+ l.max_boxes = option_find_int_quiet(options, "max",90);
+ l.jitter = option_find_float(options, "jitter", .2);
+
+ l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
+ l.truth_thresh = option_find_float(options, "truth_thresh", 1);
+ l.random = option_find_int_quiet(options, "random", 0);
+
+ char *map_file = option_find_str(options, "map", 0);
+ if (map_file) l.map = read_map(map_file);
+
+ a = option_find_str(options, "anchors", 0);
+ if(a){
+ int len = strlen(a);
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (a[i] == ',') ++n;
+ }
+ for(i = 0; i < n; ++i){
+ float bias = atof(a);
+ l.biases[i] = bias;
+ a = strchr(a, ',')+1;
+ }
+ }
+ return l;
+}
+
+layer parse_region(list *options, size_params params)
+{
+ int coords = option_find_int(options, "coords", 4);
+ int classes = option_find_int(options, "classes", 20);
+ int num = option_find_int(options, "num", 1);
+
+ layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
+ assert(l.outputs == params.inputs);
+
+ l.log = option_find_int_quiet(options, "log", 0);
+ l.sqrt = option_find_int_quiet(options, "sqrt", 0);
+
+ l.softmax = option_find_int(options, "softmax", 0);
+ l.background = option_find_int_quiet(options, "background", 0);
+ l.max_boxes = option_find_int_quiet(options, "max",30);
+ l.jitter = option_find_float(options, "jitter", .2);
+ l.rescore = option_find_int_quiet(options, "rescore",0);
+
+ l.thresh = option_find_float(options, "thresh", .5);
+ l.classfix = option_find_int_quiet(options, "classfix", 0);
+ l.absolute = option_find_int_quiet(options, "absolute", 0);
+ l.random = option_find_int_quiet(options, "random", 0);
+
+ l.coord_scale = option_find_float(options, "coord_scale", 1);
+ l.object_scale = option_find_float(options, "object_scale", 1);
+ l.noobject_scale = option_find_float(options, "noobject_scale", 1);
+ l.mask_scale = option_find_float(options, "mask_scale", 1);
+ l.class_scale = option_find_float(options, "class_scale", 1);
+ l.bias_match = option_find_int_quiet(options, "bias_match",0);
+
+ char *tree_file = option_find_str(options, "tree", 0);
+ if (tree_file) l.softmax_tree = read_tree(tree_file);
+ char *map_file = option_find_str(options, "map", 0);
+ if (map_file) l.map = read_map(map_file);
+
+ char *a = option_find_str(options, "anchors", 0);
+ if(a){
+ int len = strlen(a);
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (a[i] == ',') ++n;
+ }
+ for(i = 0; i < n; ++i){
+ float bias = atof(a);
+ l.biases[i] = bias;
+ a = strchr(a, ',')+1;
+ }
+ }
+ return l;
+}
+
+detection_layer parse_detection(list *options, size_params params)
+{
+ int coords = option_find_int(options, "coords", 1);
+ int classes = option_find_int(options, "classes", 1);
+ int rescore = option_find_int(options, "rescore", 0);
+ int num = option_find_int(options, "num", 1);
+ int side = option_find_int(options, "side", 7);
+ detection_layer layer = make_detection_layer(params.batch, params.inputs, num, side, classes, coords, rescore);
+
+ layer.softmax = option_find_int(options, "softmax", 0);
+ layer.sqrt = option_find_int(options, "sqrt", 0);
+
+ layer.max_boxes = option_find_int_quiet(options, "max",90);
+ layer.coord_scale = option_find_float(options, "coord_scale", 1);
+ layer.forced = option_find_int(options, "forced", 0);
+ layer.object_scale = option_find_float(options, "object_scale", 1);
+ layer.noobject_scale = option_find_float(options, "noobject_scale", 1);
+ layer.class_scale = option_find_float(options, "class_scale", 1);
+ layer.jitter = option_find_float(options, "jitter", .2);
+ layer.random = option_find_int_quiet(options, "random", 0);
+ layer.reorg = option_find_int_quiet(options, "reorg", 0);
+ return layer;
+}
+
+cost_layer parse_cost(list *options, size_params params)
+{
+ char *type_s = option_find_str(options, "type", "sse");
+ COST_TYPE type = get_cost_type(type_s);
+ float scale = option_find_float_quiet(options, "scale",1);
+ cost_layer layer = make_cost_layer(params.batch, params.inputs, type, scale);
+ layer.ratio = option_find_float_quiet(options, "ratio",0);
+ layer.noobject_scale = option_find_float_quiet(options, "noobj", 1);
+ layer.thresh = option_find_float_quiet(options, "thresh",0);
+ return layer;
+}
+
+crop_layer parse_crop(list *options, size_params params)
+{
+ int crop_height = option_find_int(options, "crop_height",1);
+ int crop_width = option_find_int(options, "crop_width",1);
+ int flip = option_find_int(options, "flip",0);
+ float angle = option_find_float(options, "angle",0);
+ float saturation = option_find_float(options, "saturation",1);
+ float exposure = option_find_float(options, "exposure",1);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before crop layer must output image.");
+
+ int noadjust = option_find_int_quiet(options, "noadjust",0);
+
+ crop_layer l = make_crop_layer(batch,h,w,c,crop_height,crop_width,flip, angle, saturation, exposure);
+ l.shift = option_find_float(options, "shift", 0);
+ l.noadjust = noadjust;
+ return l;
+}
+
+layer parse_reorg(list *options, size_params params)
+{
+ int stride = option_find_int(options, "stride",1);
+ int reverse = option_find_int_quiet(options, "reverse",0);
+ int flatten = option_find_int_quiet(options, "flatten",0);
+ int extra = option_find_int_quiet(options, "extra",0);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before reorg layer must output image.");
+
+ layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
+ return layer;
+}
+
+maxpool_layer parse_maxpool(list *options, size_params params)
+{
+ int stride = option_find_int(options, "stride",1);
+ int size = option_find_int(options, "size",stride);
+ int padding = option_find_int_quiet(options, "padding", (size-1)/2);
+
+ int batch,h,w,c;
+ h = params.h;
+ w = params.w;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before maxpool layer must output image.");
+
+ maxpool_layer layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
+ return layer;
+}
+
+avgpool_layer parse_avgpool(list *options, size_params params)
+{
+ int batch,w,h,c;
+ w = params.w;
+ h = params.h;
+ c = params.c;
+ batch=params.batch;
+ if(!(h && w && c)) error("Layer before avgpool layer must output image.");
+
+ avgpool_layer layer = make_avgpool_layer(batch,w,h,c);
+ return layer;
+}
+
+dropout_layer parse_dropout(list *options, size_params params)
+{
+ float probability = option_find_float(options, "probability", .5);
+ dropout_layer layer = make_dropout_layer(params.batch, params.inputs, probability);
+ layer.out_w = params.w;
+ layer.out_h = params.h;
+ layer.out_c = params.c;
+ return layer;
+}
+
+layer parse_normalization(list *options, size_params params)
+{
+ float alpha = option_find_float(options, "alpha", .0001);
+ float beta = option_find_float(options, "beta" , .75);
+ float kappa = option_find_float(options, "kappa", 1);
+ int size = option_find_int(options, "size", 5);
+ layer l = make_normalization_layer(params.batch, params.w, params.h, params.c, size, alpha, beta, kappa);
+ return l;
+}
+
+layer parse_batchnorm(list *options, size_params params)
+{
+ layer l = make_batchnorm_layer(params.batch, params.w, params.h, params.c);
+ return l;
+}
+
+layer parse_shortcut(list *options, size_params params, network *net)
+{
+ char *l = option_find(options, "from");
+ int index = atoi(l);
+ if(index < 0) index = params.index + index;
+
+ int batch = params.batch;
+ layer from = net->layers[index];
+
+ layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
+
+ char *activation_s = option_find_str(options, "activation", "linear");
+ ACTIVATION activation = get_activation(activation_s);
+ s.activation = activation;
+ s.alpha = option_find_float_quiet(options, "alpha", 1);
+ s.beta = option_find_float_quiet(options, "beta", 1);
+ return s;
+}
+
+
+layer parse_l2norm(list *options, size_params params)
+{
+ layer l = make_l2norm_layer(params.batch, params.inputs);
+ l.h = l.out_h = params.h;
+ l.w = l.out_w = params.w;
+ l.c = l.out_c = params.c;
+ return l;
+}
+
+
+layer parse_logistic(list *options, size_params params)
+{
+ layer l = make_logistic_layer(params.batch, params.inputs);
+ l.h = l.out_h = params.h;
+ l.w = l.out_w = params.w;
+ l.c = l.out_c = params.c;
+ return l;
+}
+
+layer parse_activation(list *options, size_params params)
+{
+ char *activation_s = option_find_str(options, "activation", "linear");
+ ACTIVATION activation = get_activation(activation_s);
+
+ layer l = make_activation_layer(params.batch, params.inputs, activation);
+
+ l.h = l.out_h = params.h;
+ l.w = l.out_w = params.w;
+ l.c = l.out_c = params.c;
+
+ return l;
+}
+
+layer parse_upsample(list *options, size_params params, network *net)
+{
+
+ int stride = option_find_int(options, "stride",2);
+ layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
+ l.scale = option_find_float_quiet(options, "scale", 1);
+ return l;
+}
+
+route_layer parse_route(list *options, size_params params, network *net)
+{
+ char *l = option_find(options, "layers");
+ int len = strlen(l);
+ if(!l) error("Route Layer must specify input layers");
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (l[i] == ',') ++n;
+ }
+
+ int *layers = calloc(n, sizeof(int));
+ int *sizes = calloc(n, sizeof(int));
+ for(i = 0; i < n; ++i){
+ int index = atoi(l);
+ l = strchr(l, ',')+1;
+ if(index < 0) index = params.index + index;
+ layers[i] = index;
+ sizes[i] = net->layers[index].outputs;
+ }
+ int batch = params.batch;
+
+ route_layer layer = make_route_layer(batch, n, layers, sizes);
+
+ convolutional_layer first = net->layers[layers[0]];
+ layer.out_w = first.out_w;
+ layer.out_h = first.out_h;
+ layer.out_c = first.out_c;
+ for(i = 1; i < n; ++i){
+ int index = layers[i];
+ convolutional_layer next = net->layers[index];
+ if(next.out_w == first.out_w && next.out_h == first.out_h){
+ layer.out_c += next.out_c;
+ }else{
+ layer.out_h = layer.out_w = layer.out_c = 0;
+ }
+ }
+
+ return layer;
+}
+
+learning_rate_policy get_policy(char *s)
+{
+ if (strcmp(s, "random")==0) return RANDOM;
+ if (strcmp(s, "poly")==0) return POLY;
+ if (strcmp(s, "constant")==0) return CONSTANT;
+ if (strcmp(s, "step")==0) return STEP;
+ if (strcmp(s, "exp")==0) return EXP;
+ if (strcmp(s, "sigmoid")==0) return SIG;
+ if (strcmp(s, "steps")==0) return STEPS;
+ fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
+ return CONSTANT;
+}
+
+void parse_net_options(list *options, network *net)
+{
+ net->batch = option_find_int(options, "batch",1);
+ net->learning_rate = option_find_float(options, "learning_rate", .001);
+ net->momentum = option_find_float(options, "momentum", .9);
+ net->decay = option_find_float(options, "decay", .0001);
+ int subdivs = option_find_int(options, "subdivisions",1);
+ net->time_steps = option_find_int_quiet(options, "time_steps",1);
+ net->notruth = option_find_int_quiet(options, "notruth",0);
+ net->batch /= subdivs;
+ net->batch *= net->time_steps;
+ net->subdivisions = subdivs;
+ net->random = option_find_int_quiet(options, "random", 0);
+
+ net->adam = option_find_int_quiet(options, "adam", 0);
+ if(net->adam){
+ net->B1 = option_find_float(options, "B1", .9);
+ net->B2 = option_find_float(options, "B2", .999);
+ net->eps = option_find_float(options, "eps", .0000001);
+ }
+
+ net->h = option_find_int_quiet(options, "height",0);
+ net->w = option_find_int_quiet(options, "width",0);
+ net->c = option_find_int_quiet(options, "channels",0);
+ net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
+ net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
+ net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
+ net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
+ net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
+ net->center = option_find_int_quiet(options, "center",0);
+ net->clip = option_find_float_quiet(options, "clip", 0);
+
+ net->angle = option_find_float_quiet(options, "angle", 0);
+ net->aspect = option_find_float_quiet(options, "aspect", 1);
+ net->saturation = option_find_float_quiet(options, "saturation", 1);
+ net->exposure = option_find_float_quiet(options, "exposure", 1);
+ net->hue = option_find_float_quiet(options, "hue", 0);
+
+ if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
+
+ char *policy_s = option_find_str(options, "policy", "constant");
+ net->policy = get_policy(policy_s);
+ net->burn_in = option_find_int_quiet(options, "burn_in", 0);
+ net->power = option_find_float_quiet(options, "power", 4);
+ if(net->policy == STEP){
+ net->step = option_find_int(options, "step", 1);
+ net->scale = option_find_float(options, "scale", 1);
+ } else if (net->policy == STEPS){
+ char *l = option_find(options, "steps");
+ char *p = option_find(options, "scales");
+ if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
+
+ int len = strlen(l);
+ int n = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (l[i] == ',') ++n;
+ }
+ int *steps = calloc(n, sizeof(int));
+ float *scales = calloc(n, sizeof(float));
+ for(i = 0; i < n; ++i){
+ int step = atoi(l);
+ float scale = atof(p);
+ l = strchr(l, ',')+1;
+ p = strchr(p, ',')+1;
+ steps[i] = step;
+ scales[i] = scale;
+ }
+ net->scales = scales;
+ net->steps = steps;
+ net->num_steps = n;
+ } else if (net->policy == EXP){
+ net->gamma = option_find_float(options, "gamma", 1);
+ } else if (net->policy == SIG){
+ net->gamma = option_find_float(options, "gamma", 1);
+ net->step = option_find_int(options, "step", 1);
+ } else if (net->policy == POLY || net->policy == RANDOM){
+ }
+ net->max_batches = option_find_int(options, "max_batches", 0);
+}
+
+int is_network(section *s)
+{
+ return (strcmp(s->type, "[net]")==0
+ || strcmp(s->type, "[network]")==0);
+}
+
+network *parse_network_cfg(char *filename)
+{
+ list *sections = read_cfg(filename);
+ node *n = sections->front;
+ if(!n) error("Config file has no sections");
+ network *net = make_network(sections->size - 1);
+ net->gpu_index = gpu_index;
+ size_params params;
+
+ section *s = (section *)n->val;
+ list *options = s->options;
+ if(!is_network(s)) error("First section must be [net] or [network]");
+ parse_net_options(options, net);
+
+ params.h = net->h;
+ params.w = net->w;
+ params.c = net->c;
+ params.inputs = net->inputs;
+ params.batch = net->batch;
+ params.time_steps = net->time_steps;
+ params.net = net;
+
+ size_t workspace_size = 0;
+ n = n->next;
+ int count = 0;
+ free_section(s);
+ fprintf(stderr, "layer filters size input output\n");
+ while(n){
+ params.index = count;
+ fprintf(stderr, "%5d ", count);
+ s = (section *)n->val;
+ options = s->options;
+ layer l = {0};
+ LAYER_TYPE lt = string_to_layer_type(s->type);
+ if(lt == CONVOLUTIONAL){
+ l = parse_convolutional(options, params);
+ }else if(lt == DECONVOLUTIONAL){
+ l = parse_deconvolutional(options, params);
+ }else if(lt == LOCAL){
+ l = parse_local(options, params);
+ }else if(lt == ACTIVE){
+ l = parse_activation(options, params);
+ }else if(lt == LOGXENT){
+ l = parse_logistic(options, params);
+ }else if(lt == L2NORM){
+ l = parse_l2norm(options, params);
+ }else if(lt == RNN){
+ l = parse_rnn(options, params);
+ }else if(lt == GRU){
+ l = parse_gru(options, params);
+ }else if (lt == LSTM) {
+ l = parse_lstm(options, params);
+ }else if(lt == CRNN){
+ l = parse_crnn(options, params);
+ }else if(lt == CONNECTED){
+ l = parse_connected(options, params);
+ }else if(lt == CROP){
+ l = parse_crop(options, params);
+ }else if(lt == COST){
+ l = parse_cost(options, params);
+ }else if(lt == REGION){
+ l = parse_region(options, params);
+ }else if(lt == YOLO){
+ l = parse_yolo(options, params);
+ }else if(lt == DETECTION){
+ l = parse_detection(options, params);
+ }else if(lt == SOFTMAX){
+ l = parse_softmax(options, params);
+ net->hierarchy = l.softmax_tree;
+ }else if(lt == NORMALIZATION){
+ l = parse_normalization(options, params);
+ }else if(lt == BATCHNORM){
+ l = parse_batchnorm(options, params);
+ }else if(lt == MAXPOOL){
+ l = parse_maxpool(options, params);
+ }else if(lt == REORG){
+ l = parse_reorg(options, params);
+ }else if(lt == AVGPOOL){
+ l = parse_avgpool(options, params);
+ }else if(lt == ROUTE){
+ l = parse_route(options, params, net);
+ }else if(lt == UPSAMPLE){
+ l = parse_upsample(options, params, net);
+ }else if(lt == SHORTCUT){
+ l = parse_shortcut(options, params, net);
+ }else if(lt == DROPOUT){
+ l = parse_dropout(options, params);
+ l.output = net->layers[count-1].output;
+ l.delta = net->layers[count-1].delta;
+#ifdef GPU
+ l.output_gpu = net->layers[count-1].output_gpu;
+ l.delta_gpu = net->layers[count-1].delta_gpu;
+#endif
+ }else{
+ fprintf(stderr, "Type not recognized: %s\n", s->type);
+ }
+ l.clip = net->clip;
+ l.truth = option_find_int_quiet(options, "truth", 0);
+ l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
+ l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
+ l.dontsave = option_find_int_quiet(options, "dontsave", 0);
+ l.dontload = option_find_int_quiet(options, "dontload", 0);
+ l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
+ l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
+ l.smooth = option_find_float_quiet(options, "smooth", 0);
+ option_unused(options);
+ net->layers[count] = l;
+ if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
+ free_section(s);
+ n = n->next;
+ ++count;
+ if(n){
+ params.h = l.out_h;
+ params.w = l.out_w;
+ params.c = l.out_c;
+ params.inputs = l.outputs;
+ }
+ }
+ free_list(sections);
+ layer out = get_network_output_layer(net);
+ net->outputs = out.outputs;
+ net->truths = out.outputs;
+ if(net->layers[net->n-1].truths) net->truths = net->layers[net->n-1].truths;
+ net->output = out.output;
+ net->input = calloc(net->inputs*net->batch, sizeof(float));
+ net->truth = calloc(net->truths*net->batch, sizeof(float));
+#ifdef GPU
+ net->output_gpu = out.output_gpu;
+ net->input_gpu = cuda_make_array(net->input, net->inputs*net->batch);
+ net->truth_gpu = cuda_make_array(net->truth, net->truths*net->batch);
+#endif
+ if(workspace_size){
+ //printf("%ld\n", workspace_size);
+#ifdef GPU
+ if(gpu_index >= 0){
+ net->workspace = cuda_make_array(0, (workspace_size-1)/sizeof(float)+1);
+ }else {
+ net->workspace = calloc(1, workspace_size);
+ }
+#else
+ net->workspace = calloc(1, workspace_size);
+#endif
+ }
+ return net;
+}
+
+list *read_cfg(char *filename)
+{
+ FILE *file = fopen(filename, "r");
+ if(file == 0) file_error(filename);
+ char *line;
+ int nu = 0;
+ list *options = make_list();
+ section *current = 0;
+ while((line=fgetl(file)) != 0){
+ ++ nu;
+ strip(line);
+ switch(line[0]){
+ case '[':
+ current = malloc(sizeof(section));
+ list_insert(options, current);
+ current->options = make_list();
+ current->type = line;
+ break;
+ case '\0':
+ case '#':
+ case ';':
+ free(line);
+ break;
+ default:
+ if(!read_option(line, current->options)){
+ fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
+ free(line);
+ }
+ break;
+ }
+ }
+ fclose(file);
+ return options;
+}
+
+void save_convolutional_weights_binary(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_convolutional_layer(l);
+ }
+#endif
+ binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.binary_weights);
+ int size = l.c*l.size*l.size;
+ int i, j, k;
+ fwrite(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.n, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.n, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.n, fp);
+ }
+ for(i = 0; i < l.n; ++i){
+ float mean = l.binary_weights[i*size];
+ if(mean < 0) mean = -mean;
+ fwrite(&mean, sizeof(float), 1, fp);
+ for(j = 0; j < size/8; ++j){
+ int index = i*size + j*8;
+ unsigned char c = 0;
+ for(k = 0; k < 8; ++k){
+ if (j*8 + k >= size) break;
+ if (l.binary_weights[index + k] > 0) c = (c | 1<= 0){
+ pull_convolutional_layer(l);
+ }
+#endif
+ int num = l.nweights;
+ fwrite(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.n, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.n, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.n, fp);
+ }
+ fwrite(l.weights, sizeof(float), num, fp);
+}
+
+void save_batchnorm_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_batchnorm_layer(l);
+ }
+#endif
+ fwrite(l.scales, sizeof(float), l.c, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.c, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.c, fp);
+}
+
+void save_connected_weights(layer l, FILE *fp)
+{
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_connected_layer(l);
+ }
+#endif
+ fwrite(l.biases, sizeof(float), l.outputs, fp);
+ fwrite(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+ if (l.batch_normalize){
+ fwrite(l.scales, sizeof(float), l.outputs, fp);
+ fwrite(l.rolling_mean, sizeof(float), l.outputs, fp);
+ fwrite(l.rolling_variance, sizeof(float), l.outputs, fp);
+ }
+}
+
+void save_weights_upto(network *net, char *filename, int cutoff)
+{
+#ifdef GPU
+ if(net->gpu_index >= 0){
+ cuda_set_device(net->gpu_index);
+ }
+#endif
+ fprintf(stderr, "Saving weights to %s\n", filename);
+ FILE *fp = fopen(filename, "wb");
+ if(!fp) file_error(filename);
+
+ int major = 0;
+ int minor = 2;
+ int revision = 0;
+ fwrite(&major, sizeof(int), 1, fp);
+ fwrite(&minor, sizeof(int), 1, fp);
+ fwrite(&revision, sizeof(int), 1, fp);
+ fwrite(net->seen, sizeof(size_t), 1, fp);
+
+ int i;
+ for(i = 0; i < net->n && i < cutoff; ++i){
+ layer l = net->layers[i];
+ if (l.dontsave) continue;
+ if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
+ save_convolutional_weights(l, fp);
+ } if(l.type == CONNECTED){
+ save_connected_weights(l, fp);
+ } if(l.type == BATCHNORM){
+ save_batchnorm_weights(l, fp);
+ } if(l.type == RNN){
+ save_connected_weights(*(l.input_layer), fp);
+ save_connected_weights(*(l.self_layer), fp);
+ save_connected_weights(*(l.output_layer), fp);
+ } if (l.type == LSTM) {
+ save_connected_weights(*(l.wi), fp);
+ save_connected_weights(*(l.wf), fp);
+ save_connected_weights(*(l.wo), fp);
+ save_connected_weights(*(l.wg), fp);
+ save_connected_weights(*(l.ui), fp);
+ save_connected_weights(*(l.uf), fp);
+ save_connected_weights(*(l.uo), fp);
+ save_connected_weights(*(l.ug), fp);
+ } if (l.type == GRU) {
+ if(1){
+ save_connected_weights(*(l.wz), fp);
+ save_connected_weights(*(l.wr), fp);
+ save_connected_weights(*(l.wh), fp);
+ save_connected_weights(*(l.uz), fp);
+ save_connected_weights(*(l.ur), fp);
+ save_connected_weights(*(l.uh), fp);
+ }else{
+ save_connected_weights(*(l.reset_layer), fp);
+ save_connected_weights(*(l.update_layer), fp);
+ save_connected_weights(*(l.state_layer), fp);
+ }
+ } if(l.type == CRNN){
+ save_convolutional_weights(*(l.input_layer), fp);
+ save_convolutional_weights(*(l.self_layer), fp);
+ save_convolutional_weights(*(l.output_layer), fp);
+ } if(l.type == LOCAL){
+#ifdef GPU
+ if(gpu_index >= 0){
+ pull_local_layer(l);
+ }
+#endif
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ fwrite(l.biases, sizeof(float), l.outputs, fp);
+ fwrite(l.weights, sizeof(float), size, fp);
+ }
+ }
+ fclose(fp);
+}
+void save_weights(network *net, char *filename)
+{
+ save_weights_upto(net, filename, net->n);
+}
+
+void transpose_matrix(float *a, int rows, int cols)
+{
+ float *transpose = calloc(rows*cols, sizeof(float));
+ int x, y;
+ for(x = 0; x < rows; ++x){
+ for(y = 0; y < cols; ++y){
+ transpose[y*rows + x] = a[x*cols + y];
+ }
+ }
+ memcpy(a, transpose, rows*cols*sizeof(float));
+ free(transpose);
+}
+
+void load_connected_weights(layer l, FILE *fp, int transpose)
+{
+ fread(l.biases, sizeof(float), l.outputs, fp);
+ fread(l.weights, sizeof(float), l.outputs*l.inputs, fp);
+ if(transpose){
+ transpose_matrix(l.weights, l.inputs, l.outputs);
+ }
+ //printf("Biases: %f mean %f variance\n", mean_array(l.biases, l.outputs), variance_array(l.biases, l.outputs));
+ //printf("Weights: %f mean %f variance\n", mean_array(l.weights, l.outputs*l.inputs), variance_array(l.weights, l.outputs*l.inputs));
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.outputs, fp);
+ fread(l.rolling_mean, sizeof(float), l.outputs, fp);
+ fread(l.rolling_variance, sizeof(float), l.outputs, fp);
+ //printf("Scales: %f mean %f variance\n", mean_array(l.scales, l.outputs), variance_array(l.scales, l.outputs));
+ //printf("rolling_mean: %f mean %f variance\n", mean_array(l.rolling_mean, l.outputs), variance_array(l.rolling_mean, l.outputs));
+ //printf("rolling_variance: %f mean %f variance\n", mean_array(l.rolling_variance, l.outputs), variance_array(l.rolling_variance, l.outputs));
+ }
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_connected_layer(l);
+ }
+#endif
+}
+
+void load_batchnorm_weights(layer l, FILE *fp)
+{
+ fread(l.scales, sizeof(float), l.c, fp);
+ fread(l.rolling_mean, sizeof(float), l.c, fp);
+ fread(l.rolling_variance, sizeof(float), l.c, fp);
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_batchnorm_layer(l);
+ }
+#endif
+}
+
+void load_convolutional_weights_binary(layer l, FILE *fp)
+{
+ fread(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.n, fp);
+ fread(l.rolling_mean, sizeof(float), l.n, fp);
+ fread(l.rolling_variance, sizeof(float), l.n, fp);
+ }
+ int size = l.c*l.size*l.size;
+ int i, j, k;
+ for(i = 0; i < l.n; ++i){
+ float mean = 0;
+ fread(&mean, sizeof(float), 1, fp);
+ for(j = 0; j < size/8; ++j){
+ int index = i*size + j*8;
+ unsigned char c = 0;
+ fread(&c, sizeof(char), 1, fp);
+ for(k = 0; k < 8; ++k){
+ if (j*8 + k >= size) break;
+ l.weights[index + k] = (c & 1<= 0){
+ push_convolutional_layer(l);
+ }
+#endif
+}
+
+void load_convolutional_weights(layer l, FILE *fp)
+{
+ if(l.binary){
+ //load_convolutional_weights_binary(l, fp);
+ //return;
+ }
+ int num = l.nweights;
+ fread(l.biases, sizeof(float), l.n, fp);
+ if (l.batch_normalize && (!l.dontloadscales)){
+ fread(l.scales, sizeof(float), l.n, fp);
+ fread(l.rolling_mean, sizeof(float), l.n, fp);
+ fread(l.rolling_variance, sizeof(float), l.n, fp);
+ if(0){
+ int i;
+ for(i = 0; i < l.n; ++i){
+ printf("%g, ", l.rolling_mean[i]);
+ }
+ printf("\n");
+ for(i = 0; i < l.n; ++i){
+ printf("%g, ", l.rolling_variance[i]);
+ }
+ printf("\n");
+ }
+ if(0){
+ fill_cpu(l.n, 0, l.rolling_mean, 1);
+ fill_cpu(l.n, 0, l.rolling_variance, 1);
+ }
+ if(0){
+ int i;
+ for(i = 0; i < l.n; ++i){
+ printf("%g, ", l.rolling_mean[i]);
+ }
+ printf("\n");
+ for(i = 0; i < l.n; ++i){
+ printf("%g, ", l.rolling_variance[i]);
+ }
+ printf("\n");
+ }
+ }
+ fread(l.weights, sizeof(float), num, fp);
+ //if(l.c == 3) scal_cpu(num, 1./256, l.weights, 1);
+ if (l.flipped) {
+ transpose_matrix(l.weights, l.c*l.size*l.size, l.n);
+ }
+ //if (l.binary) binarize_weights(l.weights, l.n, l.c*l.size*l.size, l.weights);
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_convolutional_layer(l);
+ }
+#endif
+}
+
+
+void load_weights_upto(network *net, char *filename, int start, int cutoff)
+{
+#ifdef GPU
+ if(net->gpu_index >= 0){
+ cuda_set_device(net->gpu_index);
+ }
+#endif
+ fprintf(stderr, "Loading weights from %s...", filename);
+ fflush(stdout);
+ FILE *fp = fopen(filename, "rb");
+ if(!fp) file_error(filename);
+
+ int major;
+ int minor;
+ int revision;
+ fread(&major, sizeof(int), 1, fp);
+ fread(&minor, sizeof(int), 1, fp);
+ fread(&revision, sizeof(int), 1, fp);
+ if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
+ fread(net->seen, sizeof(size_t), 1, fp);
+ } else {
+ int iseen = 0;
+ fread(&iseen, sizeof(int), 1, fp);
+ *net->seen = iseen;
+ }
+ int transpose = (major > 1000) || (minor > 1000);
+
+ int i;
+ for(i = start; i < net->n && i < cutoff; ++i){
+ layer l = net->layers[i];
+ if (l.dontload) continue;
+ if(l.type == CONVOLUTIONAL || l.type == DECONVOLUTIONAL){
+ load_convolutional_weights(l, fp);
+ }
+ if(l.type == CONNECTED){
+ load_connected_weights(l, fp, transpose);
+ }
+ if(l.type == BATCHNORM){
+ load_batchnorm_weights(l, fp);
+ }
+ if(l.type == CRNN){
+ load_convolutional_weights(*(l.input_layer), fp);
+ load_convolutional_weights(*(l.self_layer), fp);
+ load_convolutional_weights(*(l.output_layer), fp);
+ }
+ if(l.type == RNN){
+ load_connected_weights(*(l.input_layer), fp, transpose);
+ load_connected_weights(*(l.self_layer), fp, transpose);
+ load_connected_weights(*(l.output_layer), fp, transpose);
+ }
+ if (l.type == LSTM) {
+ load_connected_weights(*(l.wi), fp, transpose);
+ load_connected_weights(*(l.wf), fp, transpose);
+ load_connected_weights(*(l.wo), fp, transpose);
+ load_connected_weights(*(l.wg), fp, transpose);
+ load_connected_weights(*(l.ui), fp, transpose);
+ load_connected_weights(*(l.uf), fp, transpose);
+ load_connected_weights(*(l.uo), fp, transpose);
+ load_connected_weights(*(l.ug), fp, transpose);
+ }
+ if (l.type == GRU) {
+ if(1){
+ load_connected_weights(*(l.wz), fp, transpose);
+ load_connected_weights(*(l.wr), fp, transpose);
+ load_connected_weights(*(l.wh), fp, transpose);
+ load_connected_weights(*(l.uz), fp, transpose);
+ load_connected_weights(*(l.ur), fp, transpose);
+ load_connected_weights(*(l.uh), fp, transpose);
+ }else{
+ load_connected_weights(*(l.reset_layer), fp, transpose);
+ load_connected_weights(*(l.update_layer), fp, transpose);
+ load_connected_weights(*(l.state_layer), fp, transpose);
+ }
+ }
+ if(l.type == LOCAL){
+ int locations = l.out_w*l.out_h;
+ int size = l.size*l.size*l.c*l.n*locations;
+ fread(l.biases, sizeof(float), l.outputs, fp);
+ fread(l.weights, sizeof(float), size, fp);
+#ifdef GPU
+ if(gpu_index >= 0){
+ push_local_layer(l);
+ }
+#endif
+ }
+ }
+ fprintf(stderr, "Done!\n");
+ fclose(fp);
+}
+
+void load_weights(network *net, char *filename)
+{
+ load_weights_upto(net, filename, 0, net->n);
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.h
new file mode 100644
index 00000000000..81aef2c86f3
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/parser.h
@@ -0,0 +1,9 @@
+#ifndef PARSER_H
+#define PARSER_H
+#include "darknet.h"
+#include "network.h"
+
+void save_network(network net, char *filename);
+void save_weights_double(network net, char *filename);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.c
new file mode 100644
index 00000000000..179f5e32a60
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.c
@@ -0,0 +1,507 @@
+#include "region_layer.h"
+#include "activations.h"
+#include "blas.h"
+#include "box.h"
+#include "cuda.h"
+#include "utils.h"
+
+#include
+#include
+#include
+#include
+
+layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
+{
+ layer l = {0};
+ l.type = REGION;
+
+ l.n = n;
+ l.batch = batch;
+ l.h = h;
+ l.w = w;
+ l.c = n*(classes + coords + 1);
+ l.out_w = l.w;
+ l.out_h = l.h;
+ l.out_c = l.c;
+ l.classes = classes;
+ l.coords = coords;
+ l.cost = calloc(1, sizeof(float));
+ l.biases = calloc(n*2, sizeof(float));
+ l.bias_updates = calloc(n*2, sizeof(float));
+ l.outputs = h*w*n*(classes + coords + 1);
+ l.inputs = l.outputs;
+ l.truths = 30*(l.coords + 1);
+ l.delta = calloc(batch*l.outputs, sizeof(float));
+ l.output = calloc(batch*l.outputs, sizeof(float));
+ int i;
+ for(i = 0; i < n*2; ++i){
+ l.biases[i] = .5;
+ }
+
+ l.forward = forward_region_layer;
+ l.backward = backward_region_layer;
+#ifdef GPU
+ l.forward_gpu = forward_region_layer_gpu;
+ l.backward_gpu = backward_region_layer_gpu;
+ l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
+ l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
+#endif
+
+ fprintf(stderr, "detection\n");
+ srand(0);
+
+ return l;
+}
+
+void resize_region_layer(layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+
+ l->outputs = h*w*l->n*(l->classes + l->coords + 1);
+ l->inputs = l->outputs;
+
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
+
+ l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
+ l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+#endif
+}
+
+box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
+{
+ box b;
+ b.x = (i + x[index + 0*stride]) / w;
+ b.y = (j + x[index + 1*stride]) / h;
+ b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
+ b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
+ return b;
+}
+
+float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale, int stride)
+{
+ box pred = get_region_box(x, biases, n, index, i, j, w, h, stride);
+ float iou = box_iou(pred, truth);
+
+ float tx = (truth.x*w - i);
+ float ty = (truth.y*h - j);
+ float tw = log(truth.w*w / biases[2*n]);
+ float th = log(truth.h*h / biases[2*n + 1]);
+
+ delta[index + 0*stride] = scale * (tx - x[index + 0*stride]);
+ delta[index + 1*stride] = scale * (ty - x[index + 1*stride]);
+ delta[index + 2*stride] = scale * (tw - x[index + 2*stride]);
+ delta[index + 3*stride] = scale * (th - x[index + 3*stride]);
+ return iou;
+}
+
+void delta_region_mask(float *truth, float *x, int n, int index, float *delta, int stride, int scale)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ delta[index + i*stride] = scale*(truth[i] - x[index + i*stride]);
+ }
+}
+
+
+void delta_region_class(float *output, float *delta, int index, int class, int classes, tree *hier, float scale, int stride, float *avg_cat, int tag)
+{
+ int i, n;
+ if(hier){
+ float pred = 1;
+ while(class >= 0){
+ pred *= output[index + stride*class];
+ int g = hier->group[class];
+ int offset = hier->group_offset[g];
+ for(i = 0; i < hier->group_size[g]; ++i){
+ delta[index + stride*(offset + i)] = scale * (0 - output[index + stride*(offset + i)]);
+ }
+ delta[index + stride*class] = scale * (1 - output[index + stride*class]);
+
+ class = hier->parent[class];
+ }
+ *avg_cat += pred;
+ } else {
+ if (delta[index] && tag){
+ delta[index + stride*class] = scale * (1 - output[index + stride*class]);
+ return;
+ }
+ for(n = 0; n < classes; ++n){
+ delta[index + stride*n] = scale * (((n == class)?1 : 0) - output[index + stride*n]);
+ if(n == class) *avg_cat += output[index + stride*n];
+ }
+ }
+}
+
+float logit(float x)
+{
+ return log(x/(1.-x));
+}
+
+float tisnan(float x)
+{
+ return (x != x);
+}
+
+int entry_index(layer l, int batch, int location, int entry)
+{
+ int n = location / (l.w*l.h);
+ int loc = location % (l.w*l.h);
+ return batch*l.outputs + n*l.w*l.h*(l.coords+l.classes+1) + entry*l.w*l.h + loc;
+}
+
+void forward_region_layer(const layer l, network net)
+{
+ int i,j,b,t,n;
+ memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
+
+#ifndef GPU
+ for (b = 0; b < l.batch; ++b){
+ for(n = 0; n < l.n; ++n){
+ int index = entry_index(l, b, n*l.w*l.h, 0);
+ activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
+ index = entry_index(l, b, n*l.w*l.h, l.coords);
+ if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
+ index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
+ if(!l.softmax && !l.softmax_tree) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
+ }
+ }
+ if (l.softmax_tree){
+ int i;
+ int count = l.coords + 1;
+ for (i = 0; i < l.softmax_tree->groups; ++i) {
+ int group_size = l.softmax_tree->group_size[i];
+ softmax_cpu(net.input + count, group_size, l.batch, l.inputs, l.n*l.w*l.h, 1, l.n*l.w*l.h, l.temperature, l.output + count);
+ count += group_size;
+ }
+ } else if (l.softmax){
+ int index = entry_index(l, 0, 0, l.coords + !l.background);
+ softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
+ }
+#endif
+
+ memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
+ if(!net.train) return;
+ float avg_iou = 0;
+ float recall = 0;
+ float avg_cat = 0;
+ float avg_obj = 0;
+ float avg_anyobj = 0;
+ int count = 0;
+ int class_count = 0;
+ *(l.cost) = 0;
+ for (b = 0; b < l.batch; ++b) {
+ if(l.softmax_tree){
+ int onlyclass = 0;
+ for(t = 0; t < 30; ++t){
+ box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+ if(!truth.x) break;
+ int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
+ float maxp = 0;
+ int maxi = 0;
+ if(truth.x > 100000 && truth.y > 100000){
+ for(n = 0; n < l.n*l.w*l.h; ++n){
+ int class_index = entry_index(l, b, n, l.coords + 1);
+ int obj_index = entry_index(l, b, n, l.coords);
+ float scale = l.output[obj_index];
+ l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
+ float p = scale*get_hierarchy_probability(l.output + class_index, l.softmax_tree, class, l.w*l.h);
+ if(p > maxp){
+ maxp = p;
+ maxi = n;
+ }
+ }
+ int class_index = entry_index(l, b, maxi, l.coords + 1);
+ int obj_index = entry_index(l, b, maxi, l.coords);
+ delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat, !l.softmax);
+ if(l.output[obj_index] < .3) l.delta[obj_index] = l.object_scale * (.3 - l.output[obj_index]);
+ else l.delta[obj_index] = 0;
+ l.delta[obj_index] = 0;
+ ++class_count;
+ onlyclass = 1;
+ break;
+ }
+ }
+ if(onlyclass) continue;
+ }
+ for (j = 0; j < l.h; ++j) {
+ for (i = 0; i < l.w; ++i) {
+ for (n = 0; n < l.n; ++n) {
+ int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
+ box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
+ float best_iou = 0;
+ for(t = 0; t < 30; ++t){
+ box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+ if(!truth.x) break;
+ float iou = box_iou(pred, truth);
+ if (iou > best_iou) {
+ best_iou = iou;
+ }
+ }
+ int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, l.coords);
+ avg_anyobj += l.output[obj_index];
+ l.delta[obj_index] = l.noobject_scale * (0 - l.output[obj_index]);
+ if(l.background) l.delta[obj_index] = l.noobject_scale * (1 - l.output[obj_index]);
+ if (best_iou > l.thresh) {
+ l.delta[obj_index] = 0;
+ }
+
+ if(*(net.seen) < 12800){
+ box truth = {0};
+ truth.x = (i + .5)/l.w;
+ truth.y = (j + .5)/l.h;
+ truth.w = l.biases[2*n]/l.w;
+ truth.h = l.biases[2*n+1]/l.h;
+ delta_region_box(truth, l.output, l.biases, n, box_index, i, j, l.w, l.h, l.delta, .01, l.w*l.h);
+ }
+ }
+ }
+ }
+ for(t = 0; t < 30; ++t){
+ box truth = float_to_box(net.truth + t*(l.coords + 1) + b*l.truths, 1);
+
+ if(!truth.x) break;
+ float best_iou = 0;
+ int best_n = 0;
+ i = (truth.x * l.w);
+ j = (truth.y * l.h);
+ box truth_shift = truth;
+ truth_shift.x = 0;
+ truth_shift.y = 0;
+ for(n = 0; n < l.n; ++n){
+ int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
+ box pred = get_region_box(l.output, l.biases, n, box_index, i, j, l.w, l.h, l.w*l.h);
+ if(l.bias_match){
+ pred.w = l.biases[2*n]/l.w;
+ pred.h = l.biases[2*n+1]/l.h;
+ }
+ pred.x = 0;
+ pred.y = 0;
+ float iou = box_iou(pred, truth_shift);
+ if (iou > best_iou){
+ best_iou = iou;
+ best_n = n;
+ }
+ }
+
+ int box_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, 0);
+ float iou = delta_region_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, l.delta, l.coord_scale * (2 - truth.w*truth.h), l.w*l.h);
+ if(l.coords > 4){
+ int mask_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, 4);
+ delta_region_mask(net.truth + t*(l.coords + 1) + b*l.truths + 5, l.output, l.coords - 4, mask_index, l.delta, l.w*l.h, l.mask_scale);
+ }
+ if(iou > .5) recall += 1;
+ avg_iou += iou;
+
+ int obj_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords);
+ avg_obj += l.output[obj_index];
+ l.delta[obj_index] = l.object_scale * (1 - l.output[obj_index]);
+ if (l.rescore) {
+ l.delta[obj_index] = l.object_scale * (iou - l.output[obj_index]);
+ }
+ if(l.background){
+ l.delta[obj_index] = l.object_scale * (0 - l.output[obj_index]);
+ }
+
+ int class = net.truth[t*(l.coords + 1) + b*l.truths + l.coords];
+ if (l.map) class = l.map[class];
+ int class_index = entry_index(l, b, best_n*l.w*l.h + j*l.w + i, l.coords + 1);
+ delta_region_class(l.output, l.delta, class_index, class, l.classes, l.softmax_tree, l.class_scale, l.w*l.h, &avg_cat, !l.softmax);
+ ++count;
+ ++class_count;
+ }
+ }
+ *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
+ printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count);
+}
+
+void backward_region_layer(const layer l, network net)
+{
+ /*
+ int b;
+ int size = l.coords + l.classes + 1;
+ for (b = 0; b < l.batch*l.n; ++b){
+ int index = (b*size + 4)*l.w*l.h;
+ gradient_array(l.output + index, l.w*l.h, LOGISTIC, l.delta + index);
+ }
+ axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
+ */
+}
+
+void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
+{
+ int i;
+ int new_w=0;
+ int new_h=0;
+ if (((float)netw/w) < ((float)neth/h)) {
+ new_w = netw;
+ new_h = (h * netw)/w;
+ } else {
+ new_h = neth;
+ new_w = (w * neth)/h;
+ }
+ for (i = 0; i < n; ++i){
+ box b = dets[i].bbox;
+ b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
+ b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
+ b.w *= (float)netw/new_w;
+ b.h *= (float)neth/new_h;
+ if(!relative){
+ b.x *= w;
+ b.w *= w;
+ b.y *= h;
+ b.h *= h;
+ }
+ dets[i].bbox = b;
+ }
+}
+
+void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
+{
+ int i,j,n,z;
+ float *predictions = l.output;
+ if (l.batch == 2) {
+ float *flip = l.output + l.outputs;
+ for (j = 0; j < l.h; ++j) {
+ for (i = 0; i < l.w/2; ++i) {
+ for (n = 0; n < l.n; ++n) {
+ for(z = 0; z < l.classes + l.coords + 1; ++z){
+ int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
+ int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
+ float swap = flip[i1];
+ flip[i1] = flip[i2];
+ flip[i2] = swap;
+ if(z == 0){
+ flip[i1] = -flip[i1];
+ flip[i2] = -flip[i2];
+ }
+ }
+ }
+ }
+ }
+ for(i = 0; i < l.outputs; ++i){
+ l.output[i] = (l.output[i] + flip[i])/2.;
+ }
+ }
+ for (i = 0; i < l.w*l.h; ++i){
+ int row = i / l.w;
+ int col = i % l.w;
+ for(n = 0; n < l.n; ++n){
+ int index = n*l.w*l.h + i;
+ for(j = 0; j < l.classes; ++j){
+ dets[index].prob[j] = 0;
+ }
+ int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
+ int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
+ int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
+ float scale = l.background ? 1 : predictions[obj_index];
+ dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
+ dets[index].objectness = scale > thresh ? scale : 0;
+ if(dets[index].mask){
+ for(j = 0; j < l.coords - 4; ++j){
+ dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
+ }
+ }
+
+ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
+ if(l.softmax_tree){
+
+ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0, l.w*l.h);
+ if(map){
+ for(j = 0; j < 200; ++j){
+ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]);
+ float prob = scale*predictions[class_index];
+ dets[index].prob[j] = (prob > thresh) ? prob : 0;
+ }
+ } else {
+ int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h);
+ dets[index].prob[j] = (scale > thresh) ? scale : 0;
+ }
+ } else {
+ if(dets[index].objectness){
+ for(j = 0; j < l.classes; ++j){
+ int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
+ float prob = scale*predictions[class_index];
+ dets[index].prob[j] = (prob > thresh) ? prob : 0;
+ }
+ }
+ }
+ }
+ }
+ correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
+}
+
+#ifdef GPU
+
+void forward_region_layer_gpu(const layer l, network net)
+{
+ copy_gpu(l.batch*l.inputs, net.input_gpu, 1, l.output_gpu, 1);
+ int b, n;
+ for (b = 0; b < l.batch; ++b){
+ for(n = 0; n < l.n; ++n){
+ int index = entry_index(l, b, n*l.w*l.h, 0);
+ activate_array_gpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC);
+ if(l.coords > 4){
+ index = entry_index(l, b, n*l.w*l.h, 4);
+ activate_array_gpu(l.output_gpu + index, (l.coords - 4)*l.w*l.h, LOGISTIC);
+ }
+ index = entry_index(l, b, n*l.w*l.h, l.coords);
+ if(!l.background) activate_array_gpu(l.output_gpu + index, l.w*l.h, LOGISTIC);
+ index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
+ if(!l.softmax && !l.softmax_tree) activate_array_gpu(l.output_gpu + index, l.classes*l.w*l.h, LOGISTIC);
+ }
+ }
+ if (l.softmax_tree){
+ int index = entry_index(l, 0, 0, l.coords + 1);
+ softmax_tree(net.input_gpu + index, l.w*l.h, l.batch*l.n, l.inputs/l.n, 1, l.output_gpu + index, *l.softmax_tree);
+ } else if (l.softmax) {
+ int index = entry_index(l, 0, 0, l.coords + !l.background);
+ softmax_gpu(net.input_gpu + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu + index);
+ }
+ if(!net.train || l.onlyforward){
+ cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
+ return;
+ }
+
+ cuda_pull_array(l.output_gpu, net.input, l.batch*l.inputs);
+ forward_region_layer(l, net);
+ //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs);
+ if(!net.train) return;
+ cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
+}
+
+void backward_region_layer_gpu(const layer l, network net)
+{
+ int b, n;
+ for (b = 0; b < l.batch; ++b){
+ for(n = 0; n < l.n; ++n){
+ int index = entry_index(l, b, n*l.w*l.h, 0);
+ gradient_array_gpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC, l.delta_gpu + index);
+ if(l.coords > 4){
+ index = entry_index(l, b, n*l.w*l.h, 4);
+ gradient_array_gpu(l.output_gpu + index, (l.coords - 4)*l.w*l.h, LOGISTIC, l.delta_gpu + index);
+ }
+ index = entry_index(l, b, n*l.w*l.h, l.coords);
+ if(!l.background) gradient_array_gpu(l.output_gpu + index, l.w*l.h, LOGISTIC, l.delta_gpu + index);
+ }
+ }
+ axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
+
+void zero_objectness(layer l)
+{
+ int i, n;
+ for (i = 0; i < l.w*l.h; ++i){
+ for(n = 0; n < l.n; ++n){
+ int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
+ l.output[obj_index] = 0;
+ }
+ }
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.h
new file mode 100644
index 00000000000..9f12fd187fd
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/region_layer.h
@@ -0,0 +1,18 @@
+#ifndef REGION_LAYER_H
+#define REGION_LAYER_H
+
+#include "darknet.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_region_layer(int batch, int w, int h, int n, int classes, int coords);
+void forward_region_layer(const layer l, network net);
+void backward_region_layer(const layer l, network net);
+void resize_region_layer(layer *l, int w, int h);
+
+#ifdef GPU
+void forward_region_layer_gpu(const layer l, network net);
+void backward_region_layer_gpu(layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.c
new file mode 100644
index 00000000000..31d6b843676
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.c
@@ -0,0 +1,173 @@
+#include "reorg_layer.h"
+#include "cuda.h"
+#include "blas.h"
+
+#include
+
+
+layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
+{
+ layer l = {0};
+ l.type = REORG;
+ l.batch = batch;
+ l.stride = stride;
+ l.extra = extra;
+ l.h = h;
+ l.w = w;
+ l.c = c;
+ l.flatten = flatten;
+ if(reverse){
+ l.out_w = w*stride;
+ l.out_h = h*stride;
+ l.out_c = c/(stride*stride);
+ }else{
+ l.out_w = w/stride;
+ l.out_h = h/stride;
+ l.out_c = c*(stride*stride);
+ }
+ l.reverse = reverse;
+
+ l.outputs = l.out_h * l.out_w * l.out_c;
+ l.inputs = h*w*c;
+ if(l.extra){
+ l.out_w = l.out_h = l.out_c = 0;
+ l.outputs = l.inputs + l.extra;
+ }
+
+ if(extra){
+ fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
+ } else {
+ fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
+ }
+ int output_size = l.outputs * batch;
+ l.output = calloc(output_size, sizeof(float));
+ l.delta = calloc(output_size, sizeof(float));
+
+ l.forward = forward_reorg_layer;
+ l.backward = backward_reorg_layer;
+#ifdef GPU
+ l.forward_gpu = forward_reorg_layer_gpu;
+ l.backward_gpu = backward_reorg_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, output_size);
+ l.delta_gpu = cuda_make_array(l.delta, output_size);
+#endif
+ return l;
+}
+
+void resize_reorg_layer(layer *l, int w, int h)
+{
+ int stride = l->stride;
+ int c = l->c;
+
+ l->h = h;
+ l->w = w;
+
+ if(l->reverse){
+ l->out_w = w*stride;
+ l->out_h = h*stride;
+ l->out_c = c/(stride*stride);
+ }else{
+ l->out_w = w/stride;
+ l->out_h = h/stride;
+ l->out_c = c*(stride*stride);
+ }
+
+ l->outputs = l->out_h * l->out_w * l->out_c;
+ l->inputs = l->outputs;
+ int output_size = l->outputs * l->batch;
+
+ l->output = realloc(l->output, output_size * sizeof(float));
+ l->delta = realloc(l->delta, output_size * sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->output_gpu);
+ cuda_free(l->delta_gpu);
+ l->output_gpu = cuda_make_array(l->output, output_size);
+ l->delta_gpu = cuda_make_array(l->delta, output_size);
+#endif
+}
+
+void forward_reorg_layer(const layer l, network net)
+{
+ int i;
+ if(l.flatten){
+ memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
+ if(l.reverse){
+ flatten(l.output, l.w*l.h, l.c, l.batch, 0);
+ }else{
+ flatten(l.output, l.w*l.h, l.c, l.batch, 1);
+ }
+ } else if (l.extra) {
+ for(i = 0; i < l.batch; ++i){
+ copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
+ }
+ } else if (l.reverse){
+ reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
+ } else {
+ reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
+ }
+}
+
+void backward_reorg_layer(const layer l, network net)
+{
+ int i;
+ if(l.flatten){
+ memcpy(net.delta, l.delta, l.outputs*l.batch*sizeof(float));
+ if(l.reverse){
+ flatten(net.delta, l.w*l.h, l.c, l.batch, 1);
+ }else{
+ flatten(net.delta, l.w*l.h, l.c, l.batch, 0);
+ }
+ } else if(l.reverse){
+ reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta);
+ } else if (l.extra) {
+ for(i = 0; i < l.batch; ++i){
+ copy_cpu(l.inputs, l.delta + i*l.outputs, 1, net.delta + i*l.inputs, 1);
+ }
+ }else{
+ reorg_cpu(l.delta, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta);
+ }
+}
+
+#ifdef GPU
+void forward_reorg_layer_gpu(layer l, network net)
+{
+ int i;
+ if(l.flatten){
+ if(l.reverse){
+ flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 0, l.output_gpu);
+ }else{
+ flatten_gpu(net.input_gpu, l.w*l.h, l.c, l.batch, 1, l.output_gpu);
+ }
+ } else if (l.extra) {
+ for(i = 0; i < l.batch; ++i){
+ copy_gpu(l.inputs, net.input_gpu + i*l.inputs, 1, l.output_gpu + i*l.outputs, 1);
+ }
+ } else if (l.reverse) {
+ reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.output_gpu);
+ }else {
+ reorg_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.output_gpu);
+ }
+}
+
+void backward_reorg_layer_gpu(layer l, network net)
+{
+ if(l.flatten){
+ if(l.reverse){
+ flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 1, net.delta_gpu);
+ }else{
+ flatten_gpu(l.delta_gpu, l.w*l.h, l.c, l.batch, 0, net.delta_gpu);
+ }
+ } else if (l.extra) {
+ int i;
+ for(i = 0; i < l.batch; ++i){
+ copy_gpu(l.inputs, l.delta_gpu + i*l.outputs, 1, net.delta_gpu + i*l.inputs, 1);
+ }
+ } else if(l.reverse){
+ reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, net.delta_gpu);
+ } else {
+ reorg_gpu(l.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, net.delta_gpu);
+ }
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.h
new file mode 100644
index 00000000000..e6513a5f441
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/reorg_layer.h
@@ -0,0 +1,20 @@
+#ifndef REORG_LAYER_H
+#define REORG_LAYER_H
+
+#include "image.h"
+#include "cuda.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra);
+void resize_reorg_layer(layer *l, int w, int h);
+void forward_reorg_layer(const layer l, network net);
+void backward_reorg_layer(const layer l, network net);
+
+#ifdef GPU
+void forward_reorg_layer_gpu(layer l, network net);
+void backward_reorg_layer_gpu(layer l, network net);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.c
new file mode 100644
index 00000000000..8c9b457e26e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.c
@@ -0,0 +1,292 @@
+#include "rnn_layer.h"
+#include "connected_layer.h"
+#include "utils.h"
+#include "cuda.h"
+#include "blas.h"
+#include "gemm.h"
+
+#include
+#include
+#include
+#include
+
+static void increment_layer(layer *l, int steps)
+{
+ int num = l->outputs*l->batch*steps;
+ l->output += num;
+ l->delta += num;
+ l->x += num;
+ l->x_norm += num;
+
+#ifdef GPU
+ l->output_gpu += num;
+ l->delta_gpu += num;
+ l->x_gpu += num;
+ l->x_norm_gpu += num;
+#endif
+}
+
+layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam)
+{
+ fprintf(stderr, "RNN Layer: %d inputs, %d outputs\n", inputs, outputs);
+ batch = batch / steps;
+ layer l = {0};
+ l.batch = batch;
+ l.type = RNN;
+ l.steps = steps;
+ l.inputs = inputs;
+
+ l.state = calloc(batch*outputs, sizeof(float));
+ l.prev_state = calloc(batch*outputs, sizeof(float));
+
+ l.input_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.input_layer) = make_connected_layer(batch*steps, inputs, outputs, activation, batch_normalize, adam);
+ l.input_layer->batch = batch;
+
+ l.self_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.self_layer) = make_connected_layer(batch*steps, outputs, outputs, activation, batch_normalize, adam);
+ l.self_layer->batch = batch;
+
+ l.output_layer = malloc(sizeof(layer));
+ fprintf(stderr, "\t\t");
+ *(l.output_layer) = make_connected_layer(batch*steps, outputs, outputs, activation, batch_normalize, adam);
+ l.output_layer->batch = batch;
+
+ l.outputs = outputs;
+ l.output = l.output_layer->output;
+ l.delta = l.output_layer->delta;
+
+ l.forward = forward_rnn_layer;
+ l.backward = backward_rnn_layer;
+ l.update = update_rnn_layer;
+#ifdef GPU
+ l.forward_gpu = forward_rnn_layer_gpu;
+ l.backward_gpu = backward_rnn_layer_gpu;
+ l.update_gpu = update_rnn_layer_gpu;
+ l.state_gpu = cuda_make_array(0, batch*outputs);
+ l.prev_state_gpu = cuda_make_array(0, batch*outputs);
+ l.output_gpu = l.output_layer->output_gpu;
+ l.delta_gpu = l.output_layer->delta_gpu;
+#ifdef CUDNN
+ cudnnSetTensor4dDescriptor(l.input_layer->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.input_layer->out_c, l.input_layer->out_h, l.input_layer->out_w);
+ cudnnSetTensor4dDescriptor(l.self_layer->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.self_layer->out_c, l.self_layer->out_h, l.self_layer->out_w);
+ cudnnSetTensor4dDescriptor(l.output_layer->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch, l.output_layer->out_c, l.output_layer->out_h, l.output_layer->out_w);
+#endif
+#endif
+
+ return l;
+}
+
+void update_rnn_layer(layer l, update_args a)
+{
+ update_connected_layer(*(l.input_layer), a);
+ update_connected_layer(*(l.self_layer), a);
+ update_connected_layer(*(l.output_layer), a);
+}
+
+void forward_rnn_layer(layer l, network net)
+{
+ network s = net;
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_cpu(l.outputs * l.batch * l.steps, 0, output_layer.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, self_layer.delta, 1);
+ fill_cpu(l.outputs * l.batch * l.steps, 0, input_layer.delta, 1);
+ if(net.train) fill_cpu(l.outputs * l.batch, 0, l.state, 1);
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input = net.input;
+ forward_connected_layer(input_layer, s);
+
+ s.input = l.state;
+ forward_connected_layer(self_layer, s);
+
+ float *old_state = l.state;
+ if(net.train) l.state += l.outputs*l.batch;
+ if(l.shortcut){
+ copy_cpu(l.outputs * l.batch, old_state, 1, l.state, 1);
+ }else{
+ fill_cpu(l.outputs * l.batch, 0, l.state, 1);
+ }
+ axpy_cpu(l.outputs * l.batch, 1, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.outputs * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ forward_connected_layer(output_layer, s);
+
+ net.input += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_rnn_layer(layer l, network net)
+{
+ network s = net;
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ increment_layer(&input_layer, l.steps-1);
+ increment_layer(&self_layer, l.steps-1);
+ increment_layer(&output_layer, l.steps-1);
+
+ l.state += l.outputs*l.batch*l.steps;
+ for (i = l.steps-1; i >= 0; --i) {
+ copy_cpu(l.outputs * l.batch, input_layer.output, 1, l.state, 1);
+ axpy_cpu(l.outputs * l.batch, 1, self_layer.output, 1, l.state, 1);
+
+ s.input = l.state;
+ s.delta = self_layer.delta;
+ backward_connected_layer(output_layer, s);
+
+ l.state -= l.outputs*l.batch;
+ /*
+ if(i > 0){
+ copy_cpu(l.outputs * l.batch, input_layer.output - l.outputs*l.batch, 1, l.state, 1);
+ axpy_cpu(l.outputs * l.batch, 1, self_layer.output - l.outputs*l.batch, 1, l.state, 1);
+ }else{
+ fill_cpu(l.outputs * l.batch, 0, l.state, 1);
+ }
+ */
+
+ s.input = l.state;
+ s.delta = self_layer.delta - l.outputs*l.batch;
+ if (i == 0) s.delta = 0;
+ backward_connected_layer(self_layer, s);
+
+ copy_cpu(l.outputs*l.batch, self_layer.delta, 1, input_layer.delta, 1);
+ if (i > 0 && l.shortcut) axpy_cpu(l.outputs*l.batch, 1, self_layer.delta, 1, self_layer.delta - l.outputs*l.batch, 1);
+ s.input = net.input + i*l.inputs*l.batch;
+ if(net.delta) s.delta = net.delta + i*l.inputs*l.batch;
+ else s.delta = 0;
+ backward_connected_layer(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+}
+
+#ifdef GPU
+
+void pull_rnn_layer(layer l)
+{
+ pull_connected_layer(*(l.input_layer));
+ pull_connected_layer(*(l.self_layer));
+ pull_connected_layer(*(l.output_layer));
+}
+
+void push_rnn_layer(layer l)
+{
+ push_connected_layer(*(l.input_layer));
+ push_connected_layer(*(l.self_layer));
+ push_connected_layer(*(l.output_layer));
+}
+
+void update_rnn_layer_gpu(layer l, update_args a)
+{
+ update_connected_layer_gpu(*(l.input_layer), a);
+ update_connected_layer_gpu(*(l.self_layer), a);
+ update_connected_layer_gpu(*(l.output_layer), a);
+}
+
+void forward_rnn_layer_gpu(layer l, network net)
+{
+ network s = {0};
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+
+ fill_gpu(l.outputs * l.batch * l.steps, 0, output_layer.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, self_layer.delta_gpu, 1);
+ fill_gpu(l.outputs * l.batch * l.steps, 0, input_layer.delta_gpu, 1);
+
+ if(net.train) {
+ fill_gpu(l.outputs * l.batch * l.steps, 0, l.delta_gpu, 1);
+ copy_gpu(l.outputs*l.batch, l.state_gpu, 1, l.prev_state_gpu, 1);
+ }
+
+ for (i = 0; i < l.steps; ++i) {
+ s.input_gpu = net.input_gpu;
+ forward_connected_layer_gpu(input_layer, s);
+
+ s.input_gpu = l.state_gpu;
+ forward_connected_layer_gpu(self_layer, s);
+
+ fill_gpu(l.outputs * l.batch, 0, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ forward_connected_layer_gpu(output_layer, s);
+
+ net.input_gpu += l.inputs*l.batch;
+ increment_layer(&input_layer, 1);
+ increment_layer(&self_layer, 1);
+ increment_layer(&output_layer, 1);
+ }
+}
+
+void backward_rnn_layer_gpu(layer l, network net)
+{
+ network s = {0};
+ s.train = net.train;
+ int i;
+ layer input_layer = *(l.input_layer);
+ layer self_layer = *(l.self_layer);
+ layer output_layer = *(l.output_layer);
+ increment_layer(&input_layer, l.steps - 1);
+ increment_layer(&self_layer, l.steps - 1);
+ increment_layer(&output_layer, l.steps - 1);
+ float *last_input = input_layer.output_gpu;
+ float *last_self = self_layer.output_gpu;
+ for (i = l.steps-1; i >= 0; --i) {
+ fill_gpu(l.outputs * l.batch, 0, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, input_layer.output_gpu, 1, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, self_layer.output_gpu, 1, l.state_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ s.delta_gpu = self_layer.delta_gpu;
+ backward_connected_layer_gpu(output_layer, s);
+
+ if(i != 0) {
+ fill_gpu(l.outputs * l.batch, 0, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, input_layer.output_gpu - l.outputs*l.batch, 1, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, self_layer.output_gpu - l.outputs*l.batch, 1, l.state_gpu, 1);
+ }else {
+ copy_gpu(l.outputs*l.batch, l.prev_state_gpu, 1, l.state_gpu, 1);
+ }
+
+ copy_gpu(l.outputs*l.batch, self_layer.delta_gpu, 1, input_layer.delta_gpu, 1);
+
+ s.input_gpu = l.state_gpu;
+ s.delta_gpu = (i > 0) ? self_layer.delta_gpu - l.outputs*l.batch : 0;
+ if (i == 0) s.delta_gpu = 0;
+ backward_connected_layer_gpu(self_layer, s);
+
+ s.input_gpu = net.input_gpu + i*l.inputs*l.batch;
+ if(net.delta_gpu) s.delta_gpu = net.delta_gpu + i*l.inputs*l.batch;
+ else s.delta_gpu = 0;
+ backward_connected_layer_gpu(input_layer, s);
+
+ increment_layer(&input_layer, -1);
+ increment_layer(&self_layer, -1);
+ increment_layer(&output_layer, -1);
+ }
+ fill_gpu(l.outputs * l.batch, 0, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, last_input, 1, l.state_gpu, 1);
+ axpy_gpu(l.outputs * l.batch, 1, last_self, 1, l.state_gpu, 1);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.h
new file mode 100644
index 00000000000..270a63ffafc
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/rnn_layer.h
@@ -0,0 +1,25 @@
+
+#ifndef RNN_LAYER_H
+#define RNN_LAYER_H
+
+#include "activations.h"
+#include "layer.h"
+#include "network.h"
+#define USET
+
+layer make_rnn_layer(int batch, int inputs, int outputs, int steps, ACTIVATION activation, int batch_normalize, int adam);
+
+void forward_rnn_layer(layer l, network net);
+void backward_rnn_layer(layer l, network net);
+void update_rnn_layer(layer l, update_args a);
+
+#ifdef GPU
+void forward_rnn_layer_gpu(layer l, network net);
+void backward_rnn_layer_gpu(layer l, network net);
+void update_rnn_layer_gpu(layer l, update_args a);
+void push_rnn_layer(layer l);
+void pull_rnn_layer(layer l);
+#endif
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.c
new file mode 100644
index 00000000000..a8970a46001
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.c
@@ -0,0 +1,134 @@
+#include "route_layer.h"
+#include "cuda.h"
+#include "blas.h"
+
+#include
+
+route_layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
+{
+ fprintf(stderr,"route ");
+ route_layer l = {0};
+ l.type = ROUTE;
+ l.batch = batch;
+ l.n = n;
+ l.input_layers = input_layers;
+ l.input_sizes = input_sizes;
+ int i;
+ int outputs = 0;
+ for(i = 0; i < n; ++i){
+ fprintf(stderr," %d", input_layers[i]);
+ outputs += input_sizes[i];
+ }
+ fprintf(stderr, "\n");
+ l.outputs = outputs;
+ l.inputs = outputs;
+ l.delta = calloc(outputs*batch, sizeof(float));
+ l.output = calloc(outputs*batch, sizeof(float));;
+
+ l.forward = forward_route_layer;
+ l.backward = backward_route_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_route_layer_gpu;
+ l.backward_gpu = backward_route_layer_gpu;
+
+ l.delta_gpu = cuda_make_array(l.delta, outputs*batch);
+ l.output_gpu = cuda_make_array(l.output, outputs*batch);
+ #endif
+ return l;
+}
+
+void resize_route_layer(route_layer *l, network *net)
+{
+ int i;
+ layer first = net->layers[l->input_layers[0]];
+ l->out_w = first.out_w;
+ l->out_h = first.out_h;
+ l->out_c = first.out_c;
+ l->outputs = first.outputs;
+ l->input_sizes[0] = first.outputs;
+ for(i = 1; i < l->n; ++i){
+ int index = l->input_layers[i];
+ layer next = net->layers[index];
+ l->outputs += next.outputs;
+ l->input_sizes[i] = next.outputs;
+ if(next.out_w == first.out_w && next.out_h == first.out_h){
+ l->out_c += next.out_c;
+ }else{
+ printf("%d %d, %d %d\n", next.out_w, next.out_h, first.out_w, first.out_h);
+ l->out_h = l->out_w = l->out_c = 0;
+ }
+ }
+ l->inputs = l->outputs;
+ l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float));
+ l->output = realloc(l->output, l->outputs*l->batch*sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->output_gpu);
+ cuda_free(l->delta_gpu);
+ l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
+ l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
+#endif
+
+}
+
+void forward_route_layer(const route_layer l, network net)
+{
+ int i, j;
+ int offset = 0;
+ for(i = 0; i < l.n; ++i){
+ int index = l.input_layers[i];
+ float *input = net.layers[index].output;
+ int input_size = l.input_sizes[i];
+ for(j = 0; j < l.batch; ++j){
+ copy_cpu(input_size, input + j*input_size, 1, l.output + offset + j*l.outputs, 1);
+ }
+ offset += input_size;
+ }
+}
+
+void backward_route_layer(const route_layer l, network net)
+{
+ int i, j;
+ int offset = 0;
+ for(i = 0; i < l.n; ++i){
+ int index = l.input_layers[i];
+ float *delta = net.layers[index].delta;
+ int input_size = l.input_sizes[i];
+ for(j = 0; j < l.batch; ++j){
+ axpy_cpu(input_size, 1, l.delta + offset + j*l.outputs, 1, delta + j*input_size, 1);
+ }
+ offset += input_size;
+ }
+}
+
+#ifdef GPU
+void forward_route_layer_gpu(const route_layer l, network net)
+{
+ int i, j;
+ int offset = 0;
+ for(i = 0; i < l.n; ++i){
+ int index = l.input_layers[i];
+ float *input = net.layers[index].output_gpu;
+ int input_size = l.input_sizes[i];
+ for(j = 0; j < l.batch; ++j){
+ copy_gpu(input_size, input + j*input_size, 1, l.output_gpu + offset + j*l.outputs, 1);
+ }
+ offset += input_size;
+ }
+}
+
+void backward_route_layer_gpu(const route_layer l, network net)
+{
+ int i, j;
+ int offset = 0;
+ for(i = 0; i < l.n; ++i){
+ int index = l.input_layers[i];
+ float *delta = net.layers[index].delta_gpu;
+ int input_size = l.input_sizes[i];
+ for(j = 0; j < l.batch; ++j){
+ axpy_gpu(input_size, 1, l.delta_gpu + offset + j*l.outputs, 1, delta + j*input_size, 1);
+ }
+ offset += input_size;
+ }
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.h
new file mode 100644
index 00000000000..1d40330ff30
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/route_layer.h
@@ -0,0 +1,18 @@
+#ifndef ROUTE_LAYER_H
+#define ROUTE_LAYER_H
+#include "network.h"
+#include "layer.h"
+
+typedef layer route_layer;
+
+route_layer make_route_layer(int batch, int n, int *input_layers, int *input_size);
+void forward_route_layer(const route_layer l, network net);
+void backward_route_layer(const route_layer l, network net);
+void resize_route_layer(route_layer *l, network *net);
+
+#ifdef GPU
+void forward_route_layer_gpu(const route_layer l, network net);
+void backward_route_layer_gpu(const route_layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.c
new file mode 100644
index 00000000000..49d17f56f66
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.c
@@ -0,0 +1,90 @@
+#include "shortcut_layer.h"
+#include "cuda.h"
+#include "blas.h"
+#include "activations.h"
+
+#include
+#include
+
+layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
+{
+ fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
+ layer l = {0};
+ l.type = SHORTCUT;
+ l.batch = batch;
+ l.w = w2;
+ l.h = h2;
+ l.c = c2;
+ l.out_w = w;
+ l.out_h = h;
+ l.out_c = c;
+ l.outputs = w*h*c;
+ l.inputs = l.outputs;
+
+ l.index = index;
+
+ l.delta = calloc(l.outputs*batch, sizeof(float));
+ l.output = calloc(l.outputs*batch, sizeof(float));;
+
+ l.forward = forward_shortcut_layer;
+ l.backward = backward_shortcut_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_shortcut_layer_gpu;
+ l.backward_gpu = backward_shortcut_layer_gpu;
+
+ l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
+ l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
+ #endif
+ return l;
+}
+
+void resize_shortcut_layer(layer *l, int w, int h)
+{
+ assert(l->w == l->out_w);
+ assert(l->h == l->out_h);
+ l->w = l->out_w = w;
+ l->h = l->out_h = h;
+ l->outputs = w*h*l->out_c;
+ l->inputs = l->outputs;
+ l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float));
+ l->output = realloc(l->output, l->outputs*l->batch*sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->output_gpu);
+ cuda_free(l->delta_gpu);
+ l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
+ l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
+#endif
+
+}
+
+
+void forward_shortcut_layer(const layer l, network net)
+{
+ copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
+ shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
+ activate_array(l.output, l.outputs*l.batch, l.activation);
+}
+
+void backward_shortcut_layer(const layer l, network net)
+{
+ gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
+ axpy_cpu(l.outputs*l.batch, l.alpha, l.delta, 1, net.delta, 1);
+ shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, 1, l.beta, net.layers[l.index].delta);
+}
+
+#ifdef GPU
+void forward_shortcut_layer_gpu(const layer l, network net)
+{
+ copy_gpu(l.outputs*l.batch, net.input_gpu, 1, l.output_gpu, 1);
+ shortcut_gpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output_gpu);
+ activate_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation);
+}
+
+void backward_shortcut_layer_gpu(const layer l, network net)
+{
+ gradient_array_gpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
+ axpy_gpu(l.outputs*l.batch, l.alpha, l.delta_gpu, 1, net.delta_gpu, 1);
+ shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, 1, l.beta, net.layers[l.index].delta_gpu);
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.h
new file mode 100644
index 00000000000..5f684fc1ead
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/shortcut_layer.h
@@ -0,0 +1,17 @@
+#ifndef SHORTCUT_LAYER_H
+#define SHORTCUT_LAYER_H
+
+#include "layer.h"
+#include "network.h"
+
+layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2);
+void forward_shortcut_layer(const layer l, network net);
+void backward_shortcut_layer(const layer l, network net);
+void resize_shortcut_layer(layer *l, int w, int h);
+
+#ifdef GPU
+void forward_shortcut_layer_gpu(const layer l, network net);
+void backward_shortcut_layer_gpu(const layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.c
new file mode 100644
index 00000000000..afcc6342483
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.c
@@ -0,0 +1,107 @@
+#include "softmax_layer.h"
+#include "blas.h"
+#include "cuda.h"
+
+#include
+#include
+#include
+#include
+#include
+
+softmax_layer make_softmax_layer(int batch, int inputs, int groups)
+{
+ assert(inputs%groups == 0);
+ fprintf(stderr, "softmax %4d\n", inputs);
+ softmax_layer l = {0};
+ l.type = SOFTMAX;
+ l.batch = batch;
+ l.groups = groups;
+ l.inputs = inputs;
+ l.outputs = inputs;
+ l.loss = calloc(inputs*batch, sizeof(float));
+ l.output = calloc(inputs*batch, sizeof(float));
+ l.delta = calloc(inputs*batch, sizeof(float));
+ l.cost = calloc(1, sizeof(float));
+
+ l.forward = forward_softmax_layer;
+ l.backward = backward_softmax_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_softmax_layer_gpu;
+ l.backward_gpu = backward_softmax_layer_gpu;
+
+ l.output_gpu = cuda_make_array(l.output, inputs*batch);
+ l.loss_gpu = cuda_make_array(l.loss, inputs*batch);
+ l.delta_gpu = cuda_make_array(l.delta, inputs*batch);
+ #endif
+ return l;
+}
+
+void forward_softmax_layer(const softmax_layer l, network net)
+{
+ if(l.softmax_tree){
+ int i;
+ int count = 0;
+ for (i = 0; i < l.softmax_tree->groups; ++i) {
+ int group_size = l.softmax_tree->group_size[i];
+ softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count);
+ count += group_size;
+ }
+ } else {
+ softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output);
+ }
+
+ if(net.truth){
+ softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss);
+ l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
+ }
+}
+
+void backward_softmax_layer(const softmax_layer l, network net)
+{
+ axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1);
+}
+
+#ifdef GPU
+
+void pull_softmax_layer_output(const softmax_layer layer)
+{
+ cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch);
+}
+
+void forward_softmax_layer_gpu(const softmax_layer l, network net)
+{
+ if(l.softmax_tree){
+ softmax_tree(net.input_gpu, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree);
+ /*
+ int i;
+ int count = 0;
+ for (i = 0; i < l.softmax_tree->groups; ++i) {
+ int group_size = l.softmax_tree->group_size[i];
+ softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count);
+ count += group_size;
+ }
+ */
+ } else {
+ if(l.spatial){
+ softmax_gpu(net.input_gpu, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu);
+ }else{
+ softmax_gpu(net.input_gpu, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu);
+ }
+ }
+ if(net.truth){
+ softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth_gpu, l.delta_gpu, l.loss_gpu);
+ if(l.softmax_tree){
+ mask_gpu(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth_gpu, 0);
+ mask_gpu(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth_gpu, 0);
+ }
+ cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs);
+ l.cost[0] = sum_array(l.loss, l.batch*l.inputs);
+ }
+}
+
+void backward_softmax_layer_gpu(const softmax_layer layer, network net)
+{
+ axpy_gpu(layer.batch*layer.inputs, 1, layer.delta_gpu, 1, net.delta_gpu, 1);
+}
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.h
new file mode 100644
index 00000000000..2e3ffe01a6c
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/softmax_layer.h
@@ -0,0 +1,19 @@
+#ifndef SOFTMAX_LAYER_H
+#define SOFTMAX_LAYER_H
+#include "layer.h"
+#include "network.h"
+
+typedef layer softmax_layer;
+
+void softmax_array(float *input, int n, float temp, float *output);
+softmax_layer make_softmax_layer(int batch, int inputs, int groups);
+void forward_softmax_layer(const softmax_layer l, network net);
+void backward_softmax_layer(const softmax_layer l, network net);
+
+#ifdef GPU
+void pull_softmax_layer_output(const softmax_layer l);
+void forward_softmax_layer_gpu(const softmax_layer l, network net);
+void backward_softmax_layer_gpu(const softmax_layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image.h
new file mode 100644
index 00000000000..d9c21bc813f
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image.h
@@ -0,0 +1,7462 @@
+/* stb_image - v2.19 - public domain image loader - http://nothings.org/stb
+ no warranty implied; use at your own risk
+
+ Do this:
+ #define STB_IMAGE_IMPLEMENTATION
+ before you include this file in *one* C or C++ file to create the implementation.
+
+ // i.e. it should look like this:
+ #include ...
+ #include ...
+ #include ...
+ #define STB_IMAGE_IMPLEMENTATION
+ #include "stb_image.h"
+
+ You can #define STBI_ASSERT(x) before the #include to avoid using assert.h.
+ And #define STBI_MALLOC, STBI_REALLOC, and STBI_FREE to avoid using malloc,realloc,free
+
+
+ QUICK NOTES:
+ Primarily of interest to game developers and other people who can
+ avoid problematic images and only need the trivial interface
+
+ JPEG baseline & progressive (12 bpc/arithmetic not supported, same as stock IJG lib)
+ PNG 1/2/4/8/16-bit-per-channel
+
+ TGA (not sure what subset, if a subset)
+ BMP non-1bpp, non-RLE
+ PSD (composited view only, no extra channels, 8/16 bit-per-channel)
+
+ GIF (*comp always reports as 4-channel)
+ HDR (radiance rgbE format)
+ PIC (Softimage PIC)
+ PNM (PPM and PGM binary only)
+
+ Animated GIF still needs a proper API, but here's one way to do it:
+ http://gist.github.com/urraka/685d9a6340b26b830d49
+
+ - decode from memory or through FILE (define STBI_NO_STDIO to remove code)
+ - decode from arbitrary I/O callbacks
+ - SIMD acceleration on x86/x64 (SSE2) and ARM (NEON)
+
+ Full documentation under "DOCUMENTATION" below.
+
+
+LICENSE
+
+ See end of file for license information.
+
+RECENT REVISION HISTORY:
+
+ 2.19 (2018-02-11) fix warning
+ 2.18 (2018-01-30) fix warnings
+ 2.17 (2018-01-29) bugfix, 1-bit BMP, 16-bitness query, fix warnings
+ 2.16 (2017-07-23) all functions have 16-bit variants; optimizations; bugfixes
+ 2.15 (2017-03-18) fix png-1,2,4; all Imagenet JPGs; no runtime SSE detection on GCC
+ 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
+ 2.13 (2016-12-04) experimental 16-bit API, only for PNG so far; fixes
+ 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
+ 2.11 (2016-04-02) 16-bit PNGS; enable SSE2 in non-gcc x64
+ RGB-format JPEG; remove white matting in PSD;
+ allocate large structures on the stack;
+ correct channel count for PNG & BMP
+ 2.10 (2016-01-22) avoid warning introduced in 2.09
+ 2.09 (2016-01-16) 16-bit TGA; comments in PNM files; STBI_REALLOC_SIZED
+
+ See end of file for full revision history.
+
+
+ ============================ Contributors =========================
+
+ Image formats Extensions, features
+ Sean Barrett (jpeg, png, bmp) Jetro Lauha (stbi_info)
+ Nicolas Schulz (hdr, psd) Martin "SpartanJ" Golini (stbi_info)
+ Jonathan Dummer (tga) James "moose2000" Brown (iPhone PNG)
+ Jean-Marc Lienher (gif) Ben "Disch" Wenger (io callbacks)
+ Tom Seddon (pic) Omar Cornut (1/2/4-bit PNG)
+ Thatcher Ulrich (psd) Nicolas Guillemot (vertical flip)
+ Ken Miller (pgm, ppm) Richard Mitton (16-bit PSD)
+ github:urraka (animated gif) Junggon Kim (PNM comments)
+ Christopher Forseth (animated gif) Daniel Gibson (16-bit TGA)
+ socks-the-fox (16-bit PNG)
+ Jeremy Sawicki (handle all ImageNet JPGs)
+ Optimizations & bugfixes Mikhail Morozov (1-bit BMP)
+ Fabian "ryg" Giesen Anael Seghezzi (is-16-bit query)
+ Arseny Kapoulkine
+ John-Mark Allen
+
+ Bug & warning fixes
+ Marc LeBlanc David Woo Guillaume George Martins Mozeiko
+ Christpher Lloyd Jerry Jansson Joseph Thomson Phil Jordan
+ Dave Moore Roy Eltham Hayaki Saito Nathan Reed
+ Won Chun Luke Graham Johan Duparc Nick Verigakis
+ the Horde3D community Thomas Ruf Ronny Chevalier github:rlyeh
+ Janez Zemva John Bartholomew Michal Cichon github:romigrou
+ Jonathan Blow Ken Hamada Tero Hanninen github:svdijk
+ Laurent Gomila Cort Stratton Sergio Gonzalez github:snagar
+ Aruelien Pocheville Thibault Reuille Cass Everitt github:Zelex
+ Ryamond Barbiero Paul Du Bois Engin Manap github:grim210
+ Aldo Culquicondor Philipp Wiesemann Dale Weiler github:sammyhw
+ Oriol Ferrer Mesia Josh Tobin Matthew Gregan github:phprus
+ Julian Raschke Gregory Mullen Baldur Karlsson github:poppolopoppo
+ Christian Floisand Kevin Schmidt github:darealshinji
+ Blazej Dariusz Roszkowski github:Michaelangel007
+*/
+
+#ifndef STBI_INCLUDE_STB_IMAGE_H
+#define STBI_INCLUDE_STB_IMAGE_H
+
+// DOCUMENTATION
+//
+// Limitations:
+// - no 12-bit-per-channel JPEG
+// - no JPEGs with arithmetic coding
+// - GIF always returns *comp=4
+//
+// Basic usage (see HDR discussion below for HDR usage):
+// int x,y,n;
+// unsigned char *data = stbi_load(filename, &x, &y, &n, 0);
+// // ... process data if not NULL ...
+// // ... x = width, y = height, n = # 8-bit components per pixel ...
+// // ... replace '0' with '1'..'4' to force that many components per pixel
+// // ... but 'n' will always be the number that it would have been if you said 0
+// stbi_image_free(data)
+//
+// Standard parameters:
+// int *x -- outputs image width in pixels
+// int *y -- outputs image height in pixels
+// int *channels_in_file -- outputs # of image components in image file
+// int desired_channels -- if non-zero, # of image components requested in result
+//
+// The return value from an image loader is an 'unsigned char *' which points
+// to the pixel data, or NULL on an allocation failure or if the image is
+// corrupt or invalid. The pixel data consists of *y scanlines of *x pixels,
+// with each pixel consisting of N interleaved 8-bit components; the first
+// pixel pointed to is top-left-most in the image. There is no padding between
+// image scanlines or between pixels, regardless of format. The number of
+// components N is 'desired_channels' if desired_channels is non-zero, or
+// *channels_in_file otherwise. If desired_channels is non-zero,
+// *channels_in_file has the number of components that _would_ have been
+// output otherwise. E.g. if you set desired_channels to 4, you will always
+// get RGBA output, but you can check *channels_in_file to see if it's trivially
+// opaque because e.g. there were only 3 channels in the source image.
+//
+// An output image with N components has the following components interleaved
+// in this order in each pixel:
+//
+// N=#comp components
+// 1 grey
+// 2 grey, alpha
+// 3 red, green, blue
+// 4 red, green, blue, alpha
+//
+// If image loading fails for any reason, the return value will be NULL,
+// and *x, *y, *channels_in_file will be unchanged. The function
+// stbi_failure_reason() can be queried for an extremely brief, end-user
+// unfriendly explanation of why the load failed. Define STBI_NO_FAILURE_STRINGS
+// to avoid compiling these strings at all, and STBI_FAILURE_USERMSG to get slightly
+// more user-friendly ones.
+//
+// Paletted PNG, BMP, GIF, and PIC images are automatically depalettized.
+//
+// ===========================================================================
+//
+// Philosophy
+//
+// stb libraries are designed with the following priorities:
+//
+// 1. easy to use
+// 2. easy to maintain
+// 3. good performance
+//
+// Sometimes I let "good performance" creep up in priority over "easy to maintain",
+// and for best performance I may provide less-easy-to-use APIs that give higher
+// performance, in addition to the easy to use ones. Nevertheless, it's important
+// to keep in mind that from the standpoint of you, a client of this library,
+// all you care about is #1 and #3, and stb libraries DO NOT emphasize #3 above all.
+//
+// Some secondary priorities arise directly from the first two, some of which
+// make more explicit reasons why performance can't be emphasized.
+//
+// - Portable ("ease of use")
+// - Small source code footprint ("easy to maintain")
+// - No dependencies ("ease of use")
+//
+// ===========================================================================
+//
+// I/O callbacks
+//
+// I/O callbacks allow you to read from arbitrary sources, like packaged
+// files or some other source. Data read from callbacks are processed
+// through a small internal buffer (currently 128 bytes) to try to reduce
+// overhead.
+//
+// The three functions you must define are "read" (reads some bytes of data),
+// "skip" (skips some bytes of data), "eof" (reports if the stream is at the end).
+//
+// ===========================================================================
+//
+// SIMD support
+//
+// The JPEG decoder will try to automatically use SIMD kernels on x86 when
+// supported by the compiler. For ARM Neon support, you must explicitly
+// request it.
+//
+// (The old do-it-yourself SIMD API is no longer supported in the current
+// code.)
+//
+// On x86, SSE2 will automatically be used when available based on a run-time
+// test; if not, the generic C versions are used as a fall-back. On ARM targets,
+// the typical path is to have separate builds for NEON and non-NEON devices
+// (at least this is true for iOS and Android). Therefore, the NEON support is
+// toggled by a build flag: define STBI_NEON to get NEON loops.
+//
+// If for some reason you do not want to use any of SIMD code, or if
+// you have issues compiling it, you can disable it entirely by
+// defining STBI_NO_SIMD.
+//
+// ===========================================================================
+//
+// HDR image support (disable by defining STBI_NO_HDR)
+//
+// stb_image now supports loading HDR images in general, and currently
+// the Radiance .HDR file format, although the support is provided
+// generically. You can still load any file through the existing interface;
+// if you attempt to load an HDR file, it will be automatically remapped to
+// LDR, assuming gamma 2.2 and an arbitrary scale factor defaulting to 1;
+// both of these constants can be reconfigured through this interface:
+//
+// stbi_hdr_to_ldr_gamma(2.2f);
+// stbi_hdr_to_ldr_scale(1.0f);
+//
+// (note, do not use _inverse_ constants; stbi_image will invert them
+// appropriately).
+//
+// Additionally, there is a new, parallel interface for loading files as
+// (linear) floats to preserve the full dynamic range:
+//
+// float *data = stbi_loadf(filename, &x, &y, &n, 0);
+//
+// If you load LDR images through this interface, those images will
+// be promoted to floating point values, run through the inverse of
+// constants corresponding to the above:
+//
+// stbi_ldr_to_hdr_scale(1.0f);
+// stbi_ldr_to_hdr_gamma(2.2f);
+//
+// Finally, given a filename (or an open file or memory block--see header
+// file for details) containing image data, you can query for the "most
+// appropriate" interface to use (that is, whether the image is HDR or
+// not), using:
+//
+// stbi_is_hdr(char *filename);
+//
+// ===========================================================================
+//
+// iPhone PNG support:
+//
+// By default we convert iphone-formatted PNGs back to RGB, even though
+// they are internally encoded differently. You can disable this conversion
+// by by calling stbi_convert_iphone_png_to_rgb(0), in which case
+// you will always just get the native iphone "format" through (which
+// is BGR stored in RGB).
+//
+// Call stbi_set_unpremultiply_on_load(1) as well to force a divide per
+// pixel to remove any premultiplied alpha *only* if the image file explicitly
+// says there's premultiplied data (currently only happens in iPhone images,
+// and only if iPhone convert-to-rgb processing is on).
+//
+// ===========================================================================
+//
+// ADDITIONAL CONFIGURATION
+//
+// - You can suppress implementation of any of the decoders to reduce
+// your code footprint by #defining one or more of the following
+// symbols before creating the implementation.
+//
+// STBI_NO_JPEG
+// STBI_NO_PNG
+// STBI_NO_BMP
+// STBI_NO_PSD
+// STBI_NO_TGA
+// STBI_NO_GIF
+// STBI_NO_HDR
+// STBI_NO_PIC
+// STBI_NO_PNM (.ppm and .pgm)
+//
+// - You can request *only* certain decoders and suppress all other ones
+// (this will be more forward-compatible, as addition of new decoders
+// doesn't require you to disable them explicitly):
+//
+// STBI_ONLY_JPEG
+// STBI_ONLY_PNG
+// STBI_ONLY_BMP
+// STBI_ONLY_PSD
+// STBI_ONLY_TGA
+// STBI_ONLY_GIF
+// STBI_ONLY_HDR
+// STBI_ONLY_PIC
+// STBI_ONLY_PNM (.ppm and .pgm)
+//
+// - If you use STBI_NO_PNG (or _ONLY_ without PNG), and you still
+// want the zlib decoder to be available, #define STBI_SUPPORT_ZLIB
+//
+
+
+#ifndef STBI_NO_STDIO
+#include
+#endif // STBI_NO_STDIO
+
+#define STBI_VERSION 1
+
+enum
+{
+ STBI_default = 0, // only used for desired_channels
+
+ STBI_grey = 1,
+ STBI_grey_alpha = 2,
+ STBI_rgb = 3,
+ STBI_rgb_alpha = 4
+};
+
+typedef unsigned char stbi_uc;
+typedef unsigned short stbi_us;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef STB_IMAGE_STATIC
+#define STBIDEF static
+#else
+#define STBIDEF extern
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// PRIMARY API - works on images of any type
+//
+
+//
+// load image by filename, open file, or memory buffer
+//
+
+typedef struct
+{
+ int (*read) (void *user,char *data,int size); // fill 'data' with 'size' bytes. return number of bytes actually read
+ void (*skip) (void *user,int n); // skip the next 'n' bytes, or 'unget' the last -n bytes if negative
+ int (*eof) (void *user); // returns nonzero if we are at end of file/data
+} stbi_io_callbacks;
+
+////////////////////////////////////
+//
+// 8-bits-per-channel interface
+//
+
+STBIDEF stbi_uc *stbi_load_from_memory (stbi_uc const *buffer, int len , int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk , void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+#ifndef STBI_NO_GIF
+STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
+#endif
+
+
+#ifndef STBI_NO_STDIO
+STBIDEF stbi_uc *stbi_load (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_uc *stbi_load_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+// for stbi_load_from_file, file pointer is left pointing immediately after image
+#endif
+
+////////////////////////////////////
+//
+// 16-bits-per-channel interface
+//
+
+STBIDEF stbi_us *stbi_load_16_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+
+#ifndef STBI_NO_STDIO
+STBIDEF stbi_us *stbi_load_16 (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+STBIDEF stbi_us *stbi_load_from_file_16(FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+#endif
+
+////////////////////////////////////
+//
+// float-per-channel interface
+//
+#ifndef STBI_NO_LINEAR
+ STBIDEF float *stbi_loadf_from_memory (stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels);
+ STBIDEF float *stbi_loadf_from_callbacks (stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels);
+
+ #ifndef STBI_NO_STDIO
+ STBIDEF float *stbi_loadf (char const *filename, int *x, int *y, int *channels_in_file, int desired_channels);
+ STBIDEF float *stbi_loadf_from_file (FILE *f, int *x, int *y, int *channels_in_file, int desired_channels);
+ #endif
+#endif
+
+#ifndef STBI_NO_HDR
+ STBIDEF void stbi_hdr_to_ldr_gamma(float gamma);
+ STBIDEF void stbi_hdr_to_ldr_scale(float scale);
+#endif // STBI_NO_HDR
+
+#ifndef STBI_NO_LINEAR
+ STBIDEF void stbi_ldr_to_hdr_gamma(float gamma);
+ STBIDEF void stbi_ldr_to_hdr_scale(float scale);
+#endif // STBI_NO_LINEAR
+
+// stbi_is_hdr is always defined, but always returns false if STBI_NO_HDR
+STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user);
+STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len);
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_is_hdr (char const *filename);
+STBIDEF int stbi_is_hdr_from_file(FILE *f);
+#endif // STBI_NO_STDIO
+
+
+// get a VERY brief reason for failure
+// NOT THREADSAFE
+STBIDEF const char *stbi_failure_reason (void);
+
+// free the loaded image -- this is just free()
+STBIDEF void stbi_image_free (void *retval_from_stbi_load);
+
+// get image dimensions & components without fully decoding
+STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp);
+STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp);
+STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len);
+STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *clbk, void *user);
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_info (char const *filename, int *x, int *y, int *comp);
+STBIDEF int stbi_info_from_file (FILE *f, int *x, int *y, int *comp);
+STBIDEF int stbi_is_16_bit (char const *filename);
+STBIDEF int stbi_is_16_bit_from_file(FILE *f);
+#endif
+
+
+
+// for image formats that explicitly notate that they have premultiplied alpha,
+// we just return the colors as stored in the file. set this flag to force
+// unpremultiplication. results are undefined if the unpremultiply overflow.
+STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply);
+
+// indicate whether we should process iphone images back to canonical format,
+// or just pass them through "as-is"
+STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert);
+
+// flip the image vertically, so the first pixel in the output array is the bottom left
+STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip);
+
+// ZLIB client - used by PNG, available for other purposes
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen);
+STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header);
+STBIDEF char *stbi_zlib_decode_malloc(const char *buffer, int len, int *outlen);
+STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
+
+STBIDEF char *stbi_zlib_decode_noheader_malloc(const char *buffer, int len, int *outlen);
+STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+//
+//
+//// end header file /////////////////////////////////////////////////////
+#endif // STBI_INCLUDE_STB_IMAGE_H
+
+#ifdef STB_IMAGE_IMPLEMENTATION
+
+#if defined(STBI_ONLY_JPEG) || defined(STBI_ONLY_PNG) || defined(STBI_ONLY_BMP) \
+ || defined(STBI_ONLY_TGA) || defined(STBI_ONLY_GIF) || defined(STBI_ONLY_PSD) \
+ || defined(STBI_ONLY_HDR) || defined(STBI_ONLY_PIC) || defined(STBI_ONLY_PNM) \
+ || defined(STBI_ONLY_ZLIB)
+ #ifndef STBI_ONLY_JPEG
+ #define STBI_NO_JPEG
+ #endif
+ #ifndef STBI_ONLY_PNG
+ #define STBI_NO_PNG
+ #endif
+ #ifndef STBI_ONLY_BMP
+ #define STBI_NO_BMP
+ #endif
+ #ifndef STBI_ONLY_PSD
+ #define STBI_NO_PSD
+ #endif
+ #ifndef STBI_ONLY_TGA
+ #define STBI_NO_TGA
+ #endif
+ #ifndef STBI_ONLY_GIF
+ #define STBI_NO_GIF
+ #endif
+ #ifndef STBI_ONLY_HDR
+ #define STBI_NO_HDR
+ #endif
+ #ifndef STBI_ONLY_PIC
+ #define STBI_NO_PIC
+ #endif
+ #ifndef STBI_ONLY_PNM
+ #define STBI_NO_PNM
+ #endif
+#endif
+
+#if defined(STBI_NO_PNG) && !defined(STBI_SUPPORT_ZLIB) && !defined(STBI_NO_ZLIB)
+#define STBI_NO_ZLIB
+#endif
+
+
+#include
+#include // ptrdiff_t on osx
+#include
+#include
+#include
+
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+#include // ldexp, pow
+#endif
+
+#ifndef STBI_NO_STDIO
+#include
+#endif
+
+#ifndef STBI_ASSERT
+#include
+#define STBI_ASSERT(x) assert(x)
+#endif
+
+
+#ifndef _MSC_VER
+ #ifdef __cplusplus
+ #define stbi_inline inline
+ #else
+ #define stbi_inline
+ #endif
+#else
+ #define stbi_inline __forceinline
+#endif
+
+
+#ifdef _MSC_VER
+typedef unsigned short stbi__uint16;
+typedef signed short stbi__int16;
+typedef unsigned int stbi__uint32;
+typedef signed int stbi__int32;
+#else
+#include
+typedef uint16_t stbi__uint16;
+typedef int16_t stbi__int16;
+typedef uint32_t stbi__uint32;
+typedef int32_t stbi__int32;
+#endif
+
+// should produce compiler error if size is wrong
+typedef unsigned char validate_uint32[sizeof(stbi__uint32)==4 ? 1 : -1];
+
+#ifdef _MSC_VER
+#define STBI_NOTUSED(v) (void)(v)
+#else
+#define STBI_NOTUSED(v) (void)sizeof(v)
+#endif
+
+#ifdef _MSC_VER
+#define STBI_HAS_LROTL
+#endif
+
+#ifdef STBI_HAS_LROTL
+ #define stbi_lrot(x,y) _lrotl(x,y)
+#else
+ #define stbi_lrot(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
+#endif
+
+#if defined(STBI_MALLOC) && defined(STBI_FREE) && (defined(STBI_REALLOC) || defined(STBI_REALLOC_SIZED))
+// ok
+#elif !defined(STBI_MALLOC) && !defined(STBI_FREE) && !defined(STBI_REALLOC) && !defined(STBI_REALLOC_SIZED)
+// ok
+#else
+#error "Must define all or none of STBI_MALLOC, STBI_FREE, and STBI_REALLOC (or STBI_REALLOC_SIZED)."
+#endif
+
+#ifndef STBI_MALLOC
+#define STBI_MALLOC(sz) malloc(sz)
+#define STBI_REALLOC(p,newsz) realloc(p,newsz)
+#define STBI_FREE(p) free(p)
+#endif
+
+#ifndef STBI_REALLOC_SIZED
+#define STBI_REALLOC_SIZED(p,oldsz,newsz) STBI_REALLOC(p,newsz)
+#endif
+
+// x86/x64 detection
+#if defined(__x86_64__) || defined(_M_X64)
+#define STBI__X64_TARGET
+#elif defined(__i386) || defined(_M_IX86)
+#define STBI__X86_TARGET
+#endif
+
+#if defined(__GNUC__) && defined(STBI__X86_TARGET) && !defined(__SSE2__) && !defined(STBI_NO_SIMD)
+// gcc doesn't support sse2 intrinsics unless you compile with -msse2,
+// which in turn means it gets to use SSE2 everywhere. This is unfortunate,
+// but previous attempts to provide the SSE2 functions with runtime
+// detection caused numerous issues. The way architecture extensions are
+// exposed in GCC/Clang is, sadly, not really suited for one-file libs.
+// New behavior: if compiled with -msse2, we use SSE2 without any
+// detection; if not, we don't use it at all.
+#define STBI_NO_SIMD
+#endif
+
+#if defined(__MINGW32__) && defined(STBI__X86_TARGET) && !defined(STBI_MINGW_ENABLE_SSE2) && !defined(STBI_NO_SIMD)
+// Note that __MINGW32__ doesn't actually mean 32-bit, so we have to avoid STBI__X64_TARGET
+//
+// 32-bit MinGW wants ESP to be 16-byte aligned, but this is not in the
+// Windows ABI and VC++ as well as Windows DLLs don't maintain that invariant.
+// As a result, enabling SSE2 on 32-bit MinGW is dangerous when not
+// simultaneously enabling "-mstackrealign".
+//
+// See https://github.com/nothings/stb/issues/81 for more information.
+//
+// So default to no SSE2 on 32-bit MinGW. If you've read this far and added
+// -mstackrealign to your build settings, feel free to #define STBI_MINGW_ENABLE_SSE2.
+#define STBI_NO_SIMD
+#endif
+
+#if !defined(STBI_NO_SIMD) && (defined(STBI__X86_TARGET) || defined(STBI__X64_TARGET))
+#define STBI_SSE2
+#include
+
+#ifdef _MSC_VER
+
+#if _MSC_VER >= 1400 // not VC6
+#include // __cpuid
+static int stbi__cpuid3(void)
+{
+ int info[4];
+ __cpuid(info,1);
+ return info[3];
+}
+#else
+static int stbi__cpuid3(void)
+{
+ int res;
+ __asm {
+ mov eax,1
+ cpuid
+ mov res,edx
+ }
+ return res;
+}
+#endif
+
+#define STBI_SIMD_ALIGN(type, name) __declspec(align(16)) type name
+
+static int stbi__sse2_available(void)
+{
+ int info3 = stbi__cpuid3();
+ return ((info3 >> 26) & 1) != 0;
+}
+#else // assume GCC-style if not VC++
+#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
+
+static int stbi__sse2_available(void)
+{
+ // If we're even attempting to compile this on GCC/Clang, that means
+ // -msse2 is on, which means the compiler is allowed to use SSE2
+ // instructions at will, and so are we.
+ return 1;
+}
+#endif
+#endif
+
+// ARM NEON
+#if defined(STBI_NO_SIMD) && defined(STBI_NEON)
+#undef STBI_NEON
+#endif
+
+#ifdef STBI_NEON
+#include
+// assume GCC or Clang on ARM targets
+#define STBI_SIMD_ALIGN(type, name) type name __attribute__((aligned(16)))
+#endif
+
+#ifndef STBI_SIMD_ALIGN
+#define STBI_SIMD_ALIGN(type, name) type name
+#endif
+
+///////////////////////////////////////////////
+//
+// stbi__context struct and start_xxx functions
+
+// stbi__context structure is our basic context used by all images, so it
+// contains all the IO context, plus some basic image information
+typedef struct
+{
+ stbi__uint32 img_x, img_y;
+ int img_n, img_out_n;
+
+ stbi_io_callbacks io;
+ void *io_user_data;
+
+ int read_from_callbacks;
+ int buflen;
+ stbi_uc buffer_start[128];
+
+ stbi_uc *img_buffer, *img_buffer_end;
+ stbi_uc *img_buffer_original, *img_buffer_original_end;
+} stbi__context;
+
+
+static void stbi__refill_buffer(stbi__context *s);
+
+// initialize a memory-decode context
+static void stbi__start_mem(stbi__context *s, stbi_uc const *buffer, int len)
+{
+ s->io.read = NULL;
+ s->read_from_callbacks = 0;
+ s->img_buffer = s->img_buffer_original = (stbi_uc *) buffer;
+ s->img_buffer_end = s->img_buffer_original_end = (stbi_uc *) buffer+len;
+}
+
+// initialize a callback-based context
+static void stbi__start_callbacks(stbi__context *s, stbi_io_callbacks *c, void *user)
+{
+ s->io = *c;
+ s->io_user_data = user;
+ s->buflen = sizeof(s->buffer_start);
+ s->read_from_callbacks = 1;
+ s->img_buffer_original = s->buffer_start;
+ stbi__refill_buffer(s);
+ s->img_buffer_original_end = s->img_buffer_end;
+}
+
+#ifndef STBI_NO_STDIO
+
+static int stbi__stdio_read(void *user, char *data, int size)
+{
+ return (int) fread(data,1,size,(FILE*) user);
+}
+
+static void stbi__stdio_skip(void *user, int n)
+{
+ fseek((FILE*) user, n, SEEK_CUR);
+}
+
+static int stbi__stdio_eof(void *user)
+{
+ return feof((FILE*) user);
+}
+
+static stbi_io_callbacks stbi__stdio_callbacks =
+{
+ stbi__stdio_read,
+ stbi__stdio_skip,
+ stbi__stdio_eof,
+};
+
+static void stbi__start_file(stbi__context *s, FILE *f)
+{
+ stbi__start_callbacks(s, &stbi__stdio_callbacks, (void *) f);
+}
+
+//static void stop_file(stbi__context *s) { }
+
+#endif // !STBI_NO_STDIO
+
+static void stbi__rewind(stbi__context *s)
+{
+ // conceptually rewind SHOULD rewind to the beginning of the stream,
+ // but we just rewind to the beginning of the initial buffer, because
+ // we only use it after doing 'test', which only ever looks at at most 92 bytes
+ s->img_buffer = s->img_buffer_original;
+ s->img_buffer_end = s->img_buffer_original_end;
+}
+
+enum
+{
+ STBI_ORDER_RGB,
+ STBI_ORDER_BGR
+};
+
+typedef struct
+{
+ int bits_per_channel;
+ int num_channels;
+ int channel_order;
+} stbi__result_info;
+
+#ifndef STBI_NO_JPEG
+static int stbi__jpeg_test(stbi__context *s);
+static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PNG
+static int stbi__png_test(stbi__context *s);
+static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp);
+static int stbi__png_is16(stbi__context *s);
+#endif
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_test(stbi__context *s);
+static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_TGA
+static int stbi__tga_test(stbi__context *s);
+static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_test(stbi__context *s);
+static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc);
+static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp);
+static int stbi__psd_is16(stbi__context *s);
+#endif
+
+#ifndef STBI_NO_HDR
+static int stbi__hdr_test(stbi__context *s);
+static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_test(stbi__context *s);
+static void *stbi__pic_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_GIF
+static int stbi__gif_test(stbi__context *s);
+static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp);
+static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+#ifndef STBI_NO_PNM
+static int stbi__pnm_test(stbi__context *s);
+static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri);
+static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp);
+#endif
+
+// this is not threadsafe
+static const char *stbi__g_failure_reason;
+
+STBIDEF const char *stbi_failure_reason(void)
+{
+ return stbi__g_failure_reason;
+}
+
+static int stbi__err(const char *str)
+{
+ stbi__g_failure_reason = str;
+ return 0;
+}
+
+static void *stbi__malloc(size_t size)
+{
+ return STBI_MALLOC(size);
+}
+
+// stb_image uses ints pervasively, including for offset calculations.
+// therefore the largest decoded image size we can support with the
+// current code, even on 64-bit targets, is INT_MAX. this is not a
+// significant limitation for the intended use case.
+//
+// we do, however, need to make sure our size calculations don't
+// overflow. hence a few helper functions for size calculations that
+// multiply integers together, making sure that they're non-negative
+// and no overflow occurs.
+
+// return 1 if the sum is valid, 0 on overflow.
+// negative terms are considered invalid.
+static int stbi__addsizes_valid(int a, int b)
+{
+ if (b < 0) return 0;
+ // now 0 <= b <= INT_MAX, hence also
+ // 0 <= INT_MAX - b <= INTMAX.
+ // And "a + b <= INT_MAX" (which might overflow) is the
+ // same as a <= INT_MAX - b (no overflow)
+ return a <= INT_MAX - b;
+}
+
+// returns 1 if the product is valid, 0 on overflow.
+// negative factors are considered invalid.
+static int stbi__mul2sizes_valid(int a, int b)
+{
+ if (a < 0 || b < 0) return 0;
+ if (b == 0) return 1; // mul-by-0 is always safe
+ // portable way to check for no overflows in a*b
+ return a <= INT_MAX/b;
+}
+
+// returns 1 if "a*b + add" has no negative terms/factors and doesn't overflow
+static int stbi__mad2sizes_valid(int a, int b, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__addsizes_valid(a*b, add);
+}
+
+// returns 1 if "a*b*c + add" has no negative terms/factors and doesn't overflow
+static int stbi__mad3sizes_valid(int a, int b, int c, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
+ stbi__addsizes_valid(a*b*c, add);
+}
+
+// returns 1 if "a*b*c*d + add" has no negative terms/factors and doesn't overflow
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+static int stbi__mad4sizes_valid(int a, int b, int c, int d, int add)
+{
+ return stbi__mul2sizes_valid(a, b) && stbi__mul2sizes_valid(a*b, c) &&
+ stbi__mul2sizes_valid(a*b*c, d) && stbi__addsizes_valid(a*b*c*d, add);
+}
+#endif
+
+// mallocs with size overflow checking
+static void *stbi__malloc_mad2(int a, int b, int add)
+{
+ if (!stbi__mad2sizes_valid(a, b, add)) return NULL;
+ return stbi__malloc(a*b + add);
+}
+
+static void *stbi__malloc_mad3(int a, int b, int c, int add)
+{
+ if (!stbi__mad3sizes_valid(a, b, c, add)) return NULL;
+ return stbi__malloc(a*b*c + add);
+}
+
+#if !defined(STBI_NO_LINEAR) || !defined(STBI_NO_HDR)
+static void *stbi__malloc_mad4(int a, int b, int c, int d, int add)
+{
+ if (!stbi__mad4sizes_valid(a, b, c, d, add)) return NULL;
+ return stbi__malloc(a*b*c*d + add);
+}
+#endif
+
+// stbi__err - error
+// stbi__errpf - error returning pointer to float
+// stbi__errpuc - error returning pointer to unsigned char
+
+#ifdef STBI_NO_FAILURE_STRINGS
+ #define stbi__err(x,y) 0
+#elif defined(STBI_FAILURE_USERMSG)
+ #define stbi__err(x,y) stbi__err(y)
+#else
+ #define stbi__err(x,y) stbi__err(x)
+#endif
+
+#define stbi__errpf(x,y) ((float *)(size_t) (stbi__err(x,y)?NULL:NULL))
+#define stbi__errpuc(x,y) ((unsigned char *)(size_t) (stbi__err(x,y)?NULL:NULL))
+
+STBIDEF void stbi_image_free(void *retval_from_stbi_load)
+{
+ STBI_FREE(retval_from_stbi_load);
+}
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp);
+#endif
+
+#ifndef STBI_NO_HDR
+static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp);
+#endif
+
+static int stbi__vertically_flip_on_load = 0;
+
+STBIDEF void stbi_set_flip_vertically_on_load(int flag_true_if_should_flip)
+{
+ stbi__vertically_flip_on_load = flag_true_if_should_flip;
+}
+
+static void *stbi__load_main(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc)
+{
+ memset(ri, 0, sizeof(*ri)); // make sure it's initialized if we add new fields
+ ri->bits_per_channel = 8; // default is 8 so most paths don't have to be changed
+ ri->channel_order = STBI_ORDER_RGB; // all current input & output are this, but this is here so we can add BGR order
+ ri->num_channels = 0;
+
+ #ifndef STBI_NO_JPEG
+ if (stbi__jpeg_test(s)) return stbi__jpeg_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PNG
+ if (stbi__png_test(s)) return stbi__png_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_BMP
+ if (stbi__bmp_test(s)) return stbi__bmp_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_GIF
+ if (stbi__gif_test(s)) return stbi__gif_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_test(s)) return stbi__psd_load(s,x,y,comp,req_comp, ri, bpc);
+ #endif
+ #ifndef STBI_NO_PIC
+ if (stbi__pic_test(s)) return stbi__pic_load(s,x,y,comp,req_comp, ri);
+ #endif
+ #ifndef STBI_NO_PNM
+ if (stbi__pnm_test(s)) return stbi__pnm_load(s,x,y,comp,req_comp, ri);
+ #endif
+
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_test(s)) {
+ float *hdr = stbi__hdr_load(s, x,y,comp,req_comp, ri);
+ return stbi__hdr_to_ldr(hdr, *x, *y, req_comp ? req_comp : *comp);
+ }
+ #endif
+
+ #ifndef STBI_NO_TGA
+ // test tga last because it's a crappy test!
+ if (stbi__tga_test(s))
+ return stbi__tga_load(s,x,y,comp,req_comp, ri);
+ #endif
+
+ return stbi__errpuc("unknown image type", "Image not of any known type, or corrupt");
+}
+
+static stbi_uc *stbi__convert_16_to_8(stbi__uint16 *orig, int w, int h, int channels)
+{
+ int i;
+ int img_len = w * h * channels;
+ stbi_uc *reduced;
+
+ reduced = (stbi_uc *) stbi__malloc(img_len);
+ if (reduced == NULL) return stbi__errpuc("outofmem", "Out of memory");
+
+ for (i = 0; i < img_len; ++i)
+ reduced[i] = (stbi_uc)((orig[i] >> 8) & 0xFF); // top half of each byte is sufficient approx of 16->8 bit scaling
+
+ STBI_FREE(orig);
+ return reduced;
+}
+
+static stbi__uint16 *stbi__convert_8_to_16(stbi_uc *orig, int w, int h, int channels)
+{
+ int i;
+ int img_len = w * h * channels;
+ stbi__uint16 *enlarged;
+
+ enlarged = (stbi__uint16 *) stbi__malloc(img_len*2);
+ if (enlarged == NULL) return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
+
+ for (i = 0; i < img_len; ++i)
+ enlarged[i] = (stbi__uint16)((orig[i] << 8) + orig[i]); // replicate to high and low byte, maps 0->0, 255->0xffff
+
+ STBI_FREE(orig);
+ return enlarged;
+}
+
+static void stbi__vertical_flip(void *image, int w, int h, int bytes_per_pixel)
+{
+ int row;
+ size_t bytes_per_row = (size_t)w * bytes_per_pixel;
+ stbi_uc temp[2048];
+ stbi_uc *bytes = (stbi_uc *)image;
+
+ for (row = 0; row < (h>>1); row++) {
+ stbi_uc *row0 = bytes + row*bytes_per_row;
+ stbi_uc *row1 = bytes + (h - row - 1)*bytes_per_row;
+ // swap row0 with row1
+ size_t bytes_left = bytes_per_row;
+ while (bytes_left) {
+ size_t bytes_copy = (bytes_left < sizeof(temp)) ? bytes_left : sizeof(temp);
+ memcpy(temp, row0, bytes_copy);
+ memcpy(row0, row1, bytes_copy);
+ memcpy(row1, temp, bytes_copy);
+ row0 += bytes_copy;
+ row1 += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+ }
+}
+
+static void stbi__vertical_flip_slices(void *image, int w, int h, int z, int bytes_per_pixel)
+{
+ int slice;
+ int slice_size = w * h * bytes_per_pixel;
+
+ stbi_uc *bytes = (stbi_uc *)image;
+ for (slice = 0; slice < z; ++slice) {
+ stbi__vertical_flip(bytes, w, h, bytes_per_pixel);
+ bytes += slice_size;
+ }
+}
+
+static unsigned char *stbi__load_and_postprocess_8bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__result_info ri;
+ void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 8);
+
+ if (result == NULL)
+ return NULL;
+
+ if (ri.bits_per_channel != 8) {
+ STBI_ASSERT(ri.bits_per_channel == 16);
+ result = stbi__convert_16_to_8((stbi__uint16 *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
+ ri.bits_per_channel = 8;
+ }
+
+ // @TODO: move stbi__convert_format to here
+
+ if (stbi__vertically_flip_on_load) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi_uc));
+ }
+
+ return (unsigned char *) result;
+}
+
+static stbi__uint16 *stbi__load_and_postprocess_16bit(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__result_info ri;
+ void *result = stbi__load_main(s, x, y, comp, req_comp, &ri, 16);
+
+ if (result == NULL)
+ return NULL;
+
+ if (ri.bits_per_channel != 16) {
+ STBI_ASSERT(ri.bits_per_channel == 8);
+ result = stbi__convert_8_to_16((stbi_uc *) result, *x, *y, req_comp == 0 ? *comp : req_comp);
+ ri.bits_per_channel = 16;
+ }
+
+ // @TODO: move stbi__convert_format16 to here
+ // @TODO: special case RGB-to-Y (and RGBA-to-YA) for 8-bit-to-16-bit case to keep more precision
+
+ if (stbi__vertically_flip_on_load) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(stbi__uint16));
+ }
+
+ return (stbi__uint16 *) result;
+}
+
+#if !defined(STBI_NO_HDR) || !defined(STBI_NO_LINEAR)
+static void stbi__float_postprocess(float *result, int *x, int *y, int *comp, int req_comp)
+{
+ if (stbi__vertically_flip_on_load && result != NULL) {
+ int channels = req_comp ? req_comp : *comp;
+ stbi__vertical_flip(result, *x, *y, channels * sizeof(float));
+ }
+}
+#endif
+
+#ifndef STBI_NO_STDIO
+
+static FILE *stbi__fopen(char const *filename, char const *mode)
+{
+ FILE *f;
+#if defined(_MSC_VER) && _MSC_VER >= 1400
+ if (0 != fopen_s(&f, filename, mode))
+ f=0;
+#else
+ f = fopen(filename, mode);
+#endif
+ return f;
+}
+
+
+STBIDEF stbi_uc *stbi_load(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ unsigned char *result;
+ if (!f) return stbi__errpuc("can't fopen", "Unable to open file");
+ result = stbi_load_from_file(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF stbi_uc *stbi_load_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ unsigned char *result;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ result = stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+ if (result) {
+ // need to 'unget' all the characters in the IO buffer
+ fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
+ }
+ return result;
+}
+
+STBIDEF stbi__uint16 *stbi_load_from_file_16(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__uint16 *result;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ result = stbi__load_and_postprocess_16bit(&s,x,y,comp,req_comp);
+ if (result) {
+ // need to 'unget' all the characters in the IO buffer
+ fseek(f, - (int) (s.img_buffer_end - s.img_buffer), SEEK_CUR);
+ }
+ return result;
+}
+
+STBIDEF stbi_us *stbi_load_16(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ stbi__uint16 *result;
+ if (!f) return (stbi_us *) stbi__errpuc("can't fopen", "Unable to open file");
+ result = stbi_load_from_file_16(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+
+#endif //!STBI_NO_STDIO
+
+STBIDEF stbi_us *stbi_load_16_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *channels_in_file, int desired_channels)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
+}
+
+STBIDEF stbi_us *stbi_load_16_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *channels_in_file, int desired_channels)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *)clbk, user);
+ return stbi__load_and_postprocess_16bit(&s,x,y,channels_in_file,desired_channels);
+}
+
+STBIDEF stbi_uc *stbi_load_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+}
+
+STBIDEF stbi_uc *stbi_load_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__load_and_postprocess_8bit(&s,x,y,comp,req_comp);
+}
+
+#ifndef STBI_NO_GIF
+STBIDEF stbi_uc *stbi_load_gif_from_memory(stbi_uc const *buffer, int len, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
+{
+ unsigned char *result;
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+
+ result = (unsigned char*) stbi__load_gif_main(&s, delays, x, y, z, comp, req_comp);
+ if (stbi__vertically_flip_on_load) {
+ stbi__vertical_flip_slices( result, *x, *y, *z, *comp );
+ }
+
+ return result;
+}
+#endif
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__loadf_main(stbi__context *s, int *x, int *y, int *comp, int req_comp)
+{
+ unsigned char *data;
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_test(s)) {
+ stbi__result_info ri;
+ float *hdr_data = stbi__hdr_load(s,x,y,comp,req_comp, &ri);
+ if (hdr_data)
+ stbi__float_postprocess(hdr_data,x,y,comp,req_comp);
+ return hdr_data;
+ }
+ #endif
+ data = stbi__load_and_postprocess_8bit(s, x, y, comp, req_comp);
+ if (data)
+ return stbi__ldr_to_hdr(data, *x, *y, req_comp ? req_comp : *comp);
+ return stbi__errpf("unknown image type", "Image not of any known type, or corrupt");
+}
+
+STBIDEF float *stbi_loadf_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+
+STBIDEF float *stbi_loadf_from_callbacks(stbi_io_callbacks const *clbk, void *user, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF float *stbi_loadf(char const *filename, int *x, int *y, int *comp, int req_comp)
+{
+ float *result;
+ FILE *f = stbi__fopen(filename, "rb");
+ if (!f) return stbi__errpf("can't fopen", "Unable to open file");
+ result = stbi_loadf_from_file(f,x,y,comp,req_comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF float *stbi_loadf_from_file(FILE *f, int *x, int *y, int *comp, int req_comp)
+{
+ stbi__context s;
+ stbi__start_file(&s,f);
+ return stbi__loadf_main(&s,x,y,comp,req_comp);
+}
+#endif // !STBI_NO_STDIO
+
+#endif // !STBI_NO_LINEAR
+
+// these is-hdr-or-not is defined independent of whether STBI_NO_LINEAR is
+// defined, for API simplicity; if STBI_NO_LINEAR is defined, it always
+// reports false!
+
+STBIDEF int stbi_is_hdr_from_memory(stbi_uc const *buffer, int len)
+{
+ #ifndef STBI_NO_HDR
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__hdr_test(&s);
+ #else
+ STBI_NOTUSED(buffer);
+ STBI_NOTUSED(len);
+ return 0;
+ #endif
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_is_hdr (char const *filename)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result=0;
+ if (f) {
+ result = stbi_is_hdr_from_file(f);
+ fclose(f);
+ }
+ return result;
+}
+
+STBIDEF int stbi_is_hdr_from_file(FILE *f)
+{
+ #ifndef STBI_NO_HDR
+ long pos = ftell(f);
+ int res;
+ stbi__context s;
+ stbi__start_file(&s,f);
+ res = stbi__hdr_test(&s);
+ fseek(f, pos, SEEK_SET);
+ return res;
+ #else
+ STBI_NOTUSED(f);
+ return 0;
+ #endif
+}
+#endif // !STBI_NO_STDIO
+
+STBIDEF int stbi_is_hdr_from_callbacks(stbi_io_callbacks const *clbk, void *user)
+{
+ #ifndef STBI_NO_HDR
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) clbk, user);
+ return stbi__hdr_test(&s);
+ #else
+ STBI_NOTUSED(clbk);
+ STBI_NOTUSED(user);
+ return 0;
+ #endif
+}
+
+#ifndef STBI_NO_LINEAR
+static float stbi__l2h_gamma=2.2f, stbi__l2h_scale=1.0f;
+
+STBIDEF void stbi_ldr_to_hdr_gamma(float gamma) { stbi__l2h_gamma = gamma; }
+STBIDEF void stbi_ldr_to_hdr_scale(float scale) { stbi__l2h_scale = scale; }
+#endif
+
+static float stbi__h2l_gamma_i=1.0f/2.2f, stbi__h2l_scale_i=1.0f;
+
+STBIDEF void stbi_hdr_to_ldr_gamma(float gamma) { stbi__h2l_gamma_i = 1/gamma; }
+STBIDEF void stbi_hdr_to_ldr_scale(float scale) { stbi__h2l_scale_i = 1/scale; }
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Common code used by all image loaders
+//
+
+enum
+{
+ STBI__SCAN_load=0,
+ STBI__SCAN_type,
+ STBI__SCAN_header
+};
+
+static void stbi__refill_buffer(stbi__context *s)
+{
+ int n = (s->io.read)(s->io_user_data,(char*)s->buffer_start,s->buflen);
+ if (n == 0) {
+ // at end of file, treat same as if from memory, but need to handle case
+ // where s->img_buffer isn't pointing to safe memory, e.g. 0-byte file
+ s->read_from_callbacks = 0;
+ s->img_buffer = s->buffer_start;
+ s->img_buffer_end = s->buffer_start+1;
+ *s->img_buffer = 0;
+ } else {
+ s->img_buffer = s->buffer_start;
+ s->img_buffer_end = s->buffer_start + n;
+ }
+}
+
+stbi_inline static stbi_uc stbi__get8(stbi__context *s)
+{
+ if (s->img_buffer < s->img_buffer_end)
+ return *s->img_buffer++;
+ if (s->read_from_callbacks) {
+ stbi__refill_buffer(s);
+ return *s->img_buffer++;
+ }
+ return 0;
+}
+
+stbi_inline static int stbi__at_eof(stbi__context *s)
+{
+ if (s->io.read) {
+ if (!(s->io.eof)(s->io_user_data)) return 0;
+ // if feof() is true, check if buffer = end
+ // special case: we've only got the special 0 character at the end
+ if (s->read_from_callbacks == 0) return 1;
+ }
+
+ return s->img_buffer >= s->img_buffer_end;
+}
+
+static void stbi__skip(stbi__context *s, int n)
+{
+ if (n < 0) {
+ s->img_buffer = s->img_buffer_end;
+ return;
+ }
+ if (s->io.read) {
+ int blen = (int) (s->img_buffer_end - s->img_buffer);
+ if (blen < n) {
+ s->img_buffer = s->img_buffer_end;
+ (s->io.skip)(s->io_user_data, n - blen);
+ return;
+ }
+ }
+ s->img_buffer += n;
+}
+
+static int stbi__getn(stbi__context *s, stbi_uc *buffer, int n)
+{
+ if (s->io.read) {
+ int blen = (int) (s->img_buffer_end - s->img_buffer);
+ if (blen < n) {
+ int res, count;
+
+ memcpy(buffer, s->img_buffer, blen);
+
+ count = (s->io.read)(s->io_user_data, (char*) buffer + blen, n - blen);
+ res = (count == (n-blen));
+ s->img_buffer = s->img_buffer_end;
+ return res;
+ }
+ }
+
+ if (s->img_buffer+n <= s->img_buffer_end) {
+ memcpy(buffer, s->img_buffer, n);
+ s->img_buffer += n;
+ return 1;
+ } else
+ return 0;
+}
+
+static int stbi__get16be(stbi__context *s)
+{
+ int z = stbi__get8(s);
+ return (z << 8) + stbi__get8(s);
+}
+
+static stbi__uint32 stbi__get32be(stbi__context *s)
+{
+ stbi__uint32 z = stbi__get16be(s);
+ return (z << 16) + stbi__get16be(s);
+}
+
+#if defined(STBI_NO_BMP) && defined(STBI_NO_TGA) && defined(STBI_NO_GIF)
+// nothing
+#else
+static int stbi__get16le(stbi__context *s)
+{
+ int z = stbi__get8(s);
+ return z + (stbi__get8(s) << 8);
+}
+#endif
+
+#ifndef STBI_NO_BMP
+static stbi__uint32 stbi__get32le(stbi__context *s)
+{
+ stbi__uint32 z = stbi__get16le(s);
+ return z + (stbi__get16le(s) << 16);
+}
+#endif
+
+#define STBI__BYTECAST(x) ((stbi_uc) ((x) & 255)) // truncate int to byte without warnings
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// generic converter from built-in img_n to req_comp
+// individual types do this automatically as much as possible (e.g. jpeg
+// does all cases internally since it needs to colorspace convert anyway,
+// and it never has alpha, so very few cases ). png can automatically
+// interleave an alpha=255 channel, but falls back to this for other cases
+//
+// assume data buffer is malloced, so malloc a new one and free that one
+// only failure mode is malloc failing
+
+static stbi_uc stbi__compute_y(int r, int g, int b)
+{
+ return (stbi_uc) (((r*77) + (g*150) + (29*b)) >> 8);
+}
+
+static unsigned char *stbi__convert_format(unsigned char *data, int img_n, int req_comp, unsigned int x, unsigned int y)
+{
+ int i,j;
+ unsigned char *good;
+
+ if (req_comp == img_n) return data;
+ STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
+
+ good = (unsigned char *) stbi__malloc_mad3(req_comp, x, y, 0);
+ if (good == NULL) {
+ STBI_FREE(data);
+ return stbi__errpuc("outofmem", "Out of memory");
+ }
+
+ for (j=0; j < (int) y; ++j) {
+ unsigned char *src = data + j * x * img_n ;
+ unsigned char *dest = good + j * x * req_comp;
+
+ #define STBI__COMBO(a,b) ((a)*8+(b))
+ #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
+ // convert source image with img_n components to one with req_comp components;
+ // avoid switch per pixel, so use switch per scanline and massive macros
+ switch (STBI__COMBO(img_n, req_comp)) {
+ STBI__CASE(1,2) { dest[0]=src[0], dest[1]=255; } break;
+ STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=255; } break;
+ STBI__CASE(2,1) { dest[0]=src[0]; } break;
+ STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break;
+ STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=255; } break;
+ STBI__CASE(3,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
+ STBI__CASE(3,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = 255; } break;
+ STBI__CASE(4,1) { dest[0]=stbi__compute_y(src[0],src[1],src[2]); } break;
+ STBI__CASE(4,2) { dest[0]=stbi__compute_y(src[0],src[1],src[2]), dest[1] = src[3]; } break;
+ STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break;
+ default: STBI_ASSERT(0);
+ }
+ #undef STBI__CASE
+ }
+
+ STBI_FREE(data);
+ return good;
+}
+
+static stbi__uint16 stbi__compute_y_16(int r, int g, int b)
+{
+ return (stbi__uint16) (((r*77) + (g*150) + (29*b)) >> 8);
+}
+
+static stbi__uint16 *stbi__convert_format16(stbi__uint16 *data, int img_n, int req_comp, unsigned int x, unsigned int y)
+{
+ int i,j;
+ stbi__uint16 *good;
+
+ if (req_comp == img_n) return data;
+ STBI_ASSERT(req_comp >= 1 && req_comp <= 4);
+
+ good = (stbi__uint16 *) stbi__malloc(req_comp * x * y * 2);
+ if (good == NULL) {
+ STBI_FREE(data);
+ return (stbi__uint16 *) stbi__errpuc("outofmem", "Out of memory");
+ }
+
+ for (j=0; j < (int) y; ++j) {
+ stbi__uint16 *src = data + j * x * img_n ;
+ stbi__uint16 *dest = good + j * x * req_comp;
+
+ #define STBI__COMBO(a,b) ((a)*8+(b))
+ #define STBI__CASE(a,b) case STBI__COMBO(a,b): for(i=x-1; i >= 0; --i, src += a, dest += b)
+ // convert source image with img_n components to one with req_comp components;
+ // avoid switch per pixel, so use switch per scanline and massive macros
+ switch (STBI__COMBO(img_n, req_comp)) {
+ STBI__CASE(1,2) { dest[0]=src[0], dest[1]=0xffff; } break;
+ STBI__CASE(1,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(1,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=0xffff; } break;
+ STBI__CASE(2,1) { dest[0]=src[0]; } break;
+ STBI__CASE(2,3) { dest[0]=dest[1]=dest[2]=src[0]; } break;
+ STBI__CASE(2,4) { dest[0]=dest[1]=dest[2]=src[0], dest[3]=src[1]; } break;
+ STBI__CASE(3,4) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2],dest[3]=0xffff; } break;
+ STBI__CASE(3,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
+ STBI__CASE(3,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = 0xffff; } break;
+ STBI__CASE(4,1) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]); } break;
+ STBI__CASE(4,2) { dest[0]=stbi__compute_y_16(src[0],src[1],src[2]), dest[1] = src[3]; } break;
+ STBI__CASE(4,3) { dest[0]=src[0],dest[1]=src[1],dest[2]=src[2]; } break;
+ default: STBI_ASSERT(0);
+ }
+ #undef STBI__CASE
+ }
+
+ STBI_FREE(data);
+ return good;
+}
+
+#ifndef STBI_NO_LINEAR
+static float *stbi__ldr_to_hdr(stbi_uc *data, int x, int y, int comp)
+{
+ int i,k,n;
+ float *output;
+ if (!data) return NULL;
+ output = (float *) stbi__malloc_mad4(x, y, comp, sizeof(float), 0);
+ if (output == NULL) { STBI_FREE(data); return stbi__errpf("outofmem", "Out of memory"); }
+ // compute number of non-alpha components
+ if (comp & 1) n = comp; else n = comp-1;
+ for (i=0; i < x*y; ++i) {
+ for (k=0; k < n; ++k) {
+ output[i*comp + k] = (float) (pow(data[i*comp+k]/255.0f, stbi__l2h_gamma) * stbi__l2h_scale);
+ }
+ if (k < comp) output[i*comp + k] = data[i*comp+k]/255.0f;
+ }
+ STBI_FREE(data);
+ return output;
+}
+#endif
+
+#ifndef STBI_NO_HDR
+#define stbi__float2int(x) ((int) (x))
+static stbi_uc *stbi__hdr_to_ldr(float *data, int x, int y, int comp)
+{
+ int i,k,n;
+ stbi_uc *output;
+ if (!data) return NULL;
+ output = (stbi_uc *) stbi__malloc_mad3(x, y, comp, 0);
+ if (output == NULL) { STBI_FREE(data); return stbi__errpuc("outofmem", "Out of memory"); }
+ // compute number of non-alpha components
+ if (comp & 1) n = comp; else n = comp-1;
+ for (i=0; i < x*y; ++i) {
+ for (k=0; k < n; ++k) {
+ float z = (float) pow(data[i*comp+k]*stbi__h2l_scale_i, stbi__h2l_gamma_i) * 255 + 0.5f;
+ if (z < 0) z = 0;
+ if (z > 255) z = 255;
+ output[i*comp + k] = (stbi_uc) stbi__float2int(z);
+ }
+ if (k < comp) {
+ float z = data[i*comp+k] * 255 + 0.5f;
+ if (z < 0) z = 0;
+ if (z > 255) z = 255;
+ output[i*comp + k] = (stbi_uc) stbi__float2int(z);
+ }
+ }
+ STBI_FREE(data);
+ return output;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// "baseline" JPEG/JFIF decoder
+//
+// simple implementation
+// - doesn't support delayed output of y-dimension
+// - simple interface (only one output format: 8-bit interleaved RGB)
+// - doesn't try to recover corrupt jpegs
+// - doesn't allow partial loading, loading multiple at once
+// - still fast on x86 (copying globals into locals doesn't help x86)
+// - allocates lots of intermediate memory (full size of all components)
+// - non-interleaved case requires this anyway
+// - allows good upsampling (see next)
+// high-quality
+// - upsampled channels are bilinearly interpolated, even across blocks
+// - quality integer IDCT derived from IJG's 'slow'
+// performance
+// - fast huffman; reasonable integer IDCT
+// - some SIMD kernels for common paths on targets with SSE2/NEON
+// - uses a lot of intermediate memory, could cache poorly
+
+#ifndef STBI_NO_JPEG
+
+// huffman decoding acceleration
+#define FAST_BITS 9 // larger handles more cases; smaller stomps less cache
+
+typedef struct
+{
+ stbi_uc fast[1 << FAST_BITS];
+ // weirdly, repacking this into AoS is a 10% speed loss, instead of a win
+ stbi__uint16 code[256];
+ stbi_uc values[256];
+ stbi_uc size[257];
+ unsigned int maxcode[18];
+ int delta[17]; // old 'firstsymbol' - old 'firstcode'
+} stbi__huffman;
+
+typedef struct
+{
+ stbi__context *s;
+ stbi__huffman huff_dc[4];
+ stbi__huffman huff_ac[4];
+ stbi__uint16 dequant[4][64];
+ stbi__int16 fast_ac[4][1 << FAST_BITS];
+
+// sizes for components, interleaved MCUs
+ int img_h_max, img_v_max;
+ int img_mcu_x, img_mcu_y;
+ int img_mcu_w, img_mcu_h;
+
+// definition of jpeg image component
+ struct
+ {
+ int id;
+ int h,v;
+ int tq;
+ int hd,ha;
+ int dc_pred;
+
+ int x,y,w2,h2;
+ stbi_uc *data;
+ void *raw_data, *raw_coeff;
+ stbi_uc *linebuf;
+ short *coeff; // progressive only
+ int coeff_w, coeff_h; // number of 8x8 coefficient blocks
+ } img_comp[4];
+
+ stbi__uint32 code_buffer; // jpeg entropy-coded buffer
+ int code_bits; // number of valid bits
+ unsigned char marker; // marker seen while filling entropy buffer
+ int nomore; // flag if we saw a marker so must stop
+
+ int progressive;
+ int spec_start;
+ int spec_end;
+ int succ_high;
+ int succ_low;
+ int eob_run;
+ int jfif;
+ int app14_color_transform; // Adobe APP14 tag
+ int rgb;
+
+ int scan_n, order[4];
+ int restart_interval, todo;
+
+// kernels
+ void (*idct_block_kernel)(stbi_uc *out, int out_stride, short data[64]);
+ void (*YCbCr_to_RGB_kernel)(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step);
+ stbi_uc *(*resample_row_hv_2_kernel)(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs);
+} stbi__jpeg;
+
+static int stbi__build_huffman(stbi__huffman *h, int *count)
+{
+ int i,j,k=0;
+ unsigned int code;
+ // build size list for each symbol (from JPEG spec)
+ for (i=0; i < 16; ++i)
+ for (j=0; j < count[i]; ++j)
+ h->size[k++] = (stbi_uc) (i+1);
+ h->size[k] = 0;
+
+ // compute actual symbols (from jpeg spec)
+ code = 0;
+ k = 0;
+ for(j=1; j <= 16; ++j) {
+ // compute delta to add to code to compute symbol id
+ h->delta[j] = k - code;
+ if (h->size[k] == j) {
+ while (h->size[k] == j)
+ h->code[k++] = (stbi__uint16) (code++);
+ if (code-1 >= (1u << j)) return stbi__err("bad code lengths","Corrupt JPEG");
+ }
+ // compute largest code + 1 for this size, preshifted as needed later
+ h->maxcode[j] = code << (16-j);
+ code <<= 1;
+ }
+ h->maxcode[j] = 0xffffffff;
+
+ // build non-spec acceleration table; 255 is flag for not-accelerated
+ memset(h->fast, 255, 1 << FAST_BITS);
+ for (i=0; i < k; ++i) {
+ int s = h->size[i];
+ if (s <= FAST_BITS) {
+ int c = h->code[i] << (FAST_BITS-s);
+ int m = 1 << (FAST_BITS-s);
+ for (j=0; j < m; ++j) {
+ h->fast[c+j] = (stbi_uc) i;
+ }
+ }
+ }
+ return 1;
+}
+
+// build a table that decodes both magnitude and value of small ACs in
+// one go.
+static void stbi__build_fast_ac(stbi__int16 *fast_ac, stbi__huffman *h)
+{
+ int i;
+ for (i=0; i < (1 << FAST_BITS); ++i) {
+ stbi_uc fast = h->fast[i];
+ fast_ac[i] = 0;
+ if (fast < 255) {
+ int rs = h->values[fast];
+ int run = (rs >> 4) & 15;
+ int magbits = rs & 15;
+ int len = h->size[fast];
+
+ if (magbits && len + magbits <= FAST_BITS) {
+ // magnitude code followed by receive_extend code
+ int k = ((i << len) & ((1 << FAST_BITS) - 1)) >> (FAST_BITS - magbits);
+ int m = 1 << (magbits - 1);
+ if (k < m) k += (~0U << magbits) + 1;
+ // if the result is small enough, we can fit it in fast_ac table
+ if (k >= -128 && k <= 127)
+ fast_ac[i] = (stbi__int16) ((k * 256) + (run * 16) + (len + magbits));
+ }
+ }
+ }
+}
+
+static void stbi__grow_buffer_unsafe(stbi__jpeg *j)
+{
+ do {
+ unsigned int b = j->nomore ? 0 : stbi__get8(j->s);
+ if (b == 0xff) {
+ int c = stbi__get8(j->s);
+ while (c == 0xff) c = stbi__get8(j->s); // consume fill bytes
+ if (c != 0) {
+ j->marker = (unsigned char) c;
+ j->nomore = 1;
+ return;
+ }
+ }
+ j->code_buffer |= b << (24 - j->code_bits);
+ j->code_bits += 8;
+ } while (j->code_bits <= 24);
+}
+
+// (1 << n) - 1
+static const stbi__uint32 stbi__bmask[17]={0,1,3,7,15,31,63,127,255,511,1023,2047,4095,8191,16383,32767,65535};
+
+// decode a jpeg huffman value from the bitstream
+stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h)
+{
+ unsigned int temp;
+ int c,k;
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+
+ // look at the top FAST_BITS and determine what symbol ID it is,
+ // if the code is <= FAST_BITS
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ k = h->fast[c];
+ if (k < 255) {
+ int s = h->size[k];
+ if (s > j->code_bits)
+ return -1;
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ return h->values[k];
+ }
+
+ // naive test is to shift the code_buffer down so k bits are
+ // valid, then test against maxcode. To speed this up, we've
+ // preshifted maxcode left so that it has (16-k) 0s at the
+ // end; in other words, regardless of the number of bits, it
+ // wants to be compared against something shifted to have 16;
+ // that way we don't need to shift inside the loop.
+ temp = j->code_buffer >> 16;
+ for (k=FAST_BITS+1 ; ; ++k)
+ if (temp < h->maxcode[k])
+ break;
+ if (k == 17) {
+ // error! code not found
+ j->code_bits -= 16;
+ return -1;
+ }
+
+ if (k > j->code_bits)
+ return -1;
+
+ // convert the huffman code to the symbol id
+ c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k];
+ STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]);
+
+ // convert the id to a symbol
+ j->code_bits -= k;
+ j->code_buffer <<= k;
+ return h->values[c];
+}
+
+// bias[n] = (-1<code_bits < n) stbi__grow_buffer_unsafe(j);
+
+ sgn = (stbi__int32)j->code_buffer >> 31; // sign bit is always in MSB
+ k = stbi_lrot(j->code_buffer, n);
+ STBI_ASSERT(n >= 0 && n < (int) (sizeof(stbi__bmask)/sizeof(*stbi__bmask)));
+ j->code_buffer = k & ~stbi__bmask[n];
+ k &= stbi__bmask[n];
+ j->code_bits -= n;
+ return k + (stbi__jbias[n] & ~sgn);
+}
+
+// get some unsigned bits
+stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n)
+{
+ unsigned int k;
+ if (j->code_bits < n) stbi__grow_buffer_unsafe(j);
+ k = stbi_lrot(j->code_buffer, n);
+ j->code_buffer = k & ~stbi__bmask[n];
+ k &= stbi__bmask[n];
+ j->code_bits -= n;
+ return k;
+}
+
+stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j)
+{
+ unsigned int k;
+ if (j->code_bits < 1) stbi__grow_buffer_unsafe(j);
+ k = j->code_buffer;
+ j->code_buffer <<= 1;
+ --j->code_bits;
+ return k & 0x80000000;
+}
+
+// given a value that's at position X in the zigzag stream,
+// where does it appear in the 8x8 matrix coded as row-major?
+static const stbi_uc stbi__jpeg_dezigzag[64+15] =
+{
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63,
+ // let corrupt input sample past end
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63
+};
+
+// decode one 64-entry block--
+static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman *hdc, stbi__huffman *hac, stbi__int16 *fac, int b, stbi__uint16 *dequant)
+{
+ int diff,dc,k;
+ int t;
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ t = stbi__jpeg_huff_decode(j, hdc);
+ if (t < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+
+ // 0 all the ac values now so we can do it 32-bits at a time
+ memset(data,0,64*sizeof(data[0]));
+
+ diff = t ? stbi__extend_receive(j, t) : 0;
+ dc = j->img_comp[b].dc_pred + diff;
+ j->img_comp[b].dc_pred = dc;
+ data[0] = (short) (dc * dequant[0]);
+
+ // decode AC components, see JPEG spec
+ k = 1;
+ do {
+ unsigned int zig;
+ int c,r,s;
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ r = fac[c];
+ if (r) { // fast-AC path
+ k += (r >> 4) & 15; // run
+ s = r & 15; // combined length
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ // decode into unzigzag'd location
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) ((r >> 8) * dequant[zig]);
+ } else {
+ int rs = stbi__jpeg_huff_decode(j, hac);
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (rs != 0xf0) break; // end block
+ k += 16;
+ } else {
+ k += r;
+ // decode into unzigzag'd location
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) (stbi__extend_receive(j,s) * dequant[zig]);
+ }
+ }
+ } while (k < 64);
+ return 1;
+}
+
+static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__huffman *hdc, int b)
+{
+ int diff,dc;
+ int t;
+ if (j->spec_end != 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
+
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+
+ if (j->succ_high == 0) {
+ // first scan for DC coefficient, must be first
+ memset(data,0,64*sizeof(data[0])); // 0 all the ac values now
+ t = stbi__jpeg_huff_decode(j, hdc);
+ diff = t ? stbi__extend_receive(j, t) : 0;
+
+ dc = j->img_comp[b].dc_pred + diff;
+ j->img_comp[b].dc_pred = dc;
+ data[0] = (short) (dc << j->succ_low);
+ } else {
+ // refinement scan for DC coefficient
+ if (stbi__jpeg_get_bit(j))
+ data[0] += (short) (1 << j->succ_low);
+ }
+ return 1;
+}
+
+// @OPTIMIZE: store non-zigzagged during the decode passes,
+// and only de-zigzag when dequantizing
+static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__huffman *hac, stbi__int16 *fac)
+{
+ int k;
+ if (j->spec_start == 0) return stbi__err("can't merge dc and ac", "Corrupt JPEG");
+
+ if (j->succ_high == 0) {
+ int shift = j->succ_low;
+
+ if (j->eob_run) {
+ --j->eob_run;
+ return 1;
+ }
+
+ k = j->spec_start;
+ do {
+ unsigned int zig;
+ int c,r,s;
+ if (j->code_bits < 16) stbi__grow_buffer_unsafe(j);
+ c = (j->code_buffer >> (32 - FAST_BITS)) & ((1 << FAST_BITS)-1);
+ r = fac[c];
+ if (r) { // fast-AC path
+ k += (r >> 4) & 15; // run
+ s = r & 15; // combined length
+ j->code_buffer <<= s;
+ j->code_bits -= s;
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) ((r >> 8) << shift);
+ } else {
+ int rs = stbi__jpeg_huff_decode(j, hac);
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (r < 15) {
+ j->eob_run = (1 << r);
+ if (r)
+ j->eob_run += stbi__jpeg_get_bits(j, r);
+ --j->eob_run;
+ break;
+ }
+ k += 16;
+ } else {
+ k += r;
+ zig = stbi__jpeg_dezigzag[k++];
+ data[zig] = (short) (stbi__extend_receive(j,s) << shift);
+ }
+ }
+ } while (k <= j->spec_end);
+ } else {
+ // refinement scan for these AC coefficients
+
+ short bit = (short) (1 << j->succ_low);
+
+ if (j->eob_run) {
+ --j->eob_run;
+ for (k = j->spec_start; k <= j->spec_end; ++k) {
+ short *p = &data[stbi__jpeg_dezigzag[k]];
+ if (*p != 0)
+ if (stbi__jpeg_get_bit(j))
+ if ((*p & bit)==0) {
+ if (*p > 0)
+ *p += bit;
+ else
+ *p -= bit;
+ }
+ }
+ } else {
+ k = j->spec_start;
+ do {
+ int r,s;
+ int rs = stbi__jpeg_huff_decode(j, hac); // @OPTIMIZE see if we can use the fast path here, advance-by-r is so slow, eh
+ if (rs < 0) return stbi__err("bad huffman code","Corrupt JPEG");
+ s = rs & 15;
+ r = rs >> 4;
+ if (s == 0) {
+ if (r < 15) {
+ j->eob_run = (1 << r) - 1;
+ if (r)
+ j->eob_run += stbi__jpeg_get_bits(j, r);
+ r = 64; // force end of block
+ } else {
+ // r=15 s=0 should write 16 0s, so we just do
+ // a run of 15 0s and then write s (which is 0),
+ // so we don't have to do anything special here
+ }
+ } else {
+ if (s != 1) return stbi__err("bad huffman code", "Corrupt JPEG");
+ // sign bit
+ if (stbi__jpeg_get_bit(j))
+ s = bit;
+ else
+ s = -bit;
+ }
+
+ // advance by r
+ while (k <= j->spec_end) {
+ short *p = &data[stbi__jpeg_dezigzag[k++]];
+ if (*p != 0) {
+ if (stbi__jpeg_get_bit(j))
+ if ((*p & bit)==0) {
+ if (*p > 0)
+ *p += bit;
+ else
+ *p -= bit;
+ }
+ } else {
+ if (r == 0) {
+ *p = (short) s;
+ break;
+ }
+ --r;
+ }
+ }
+ } while (k <= j->spec_end);
+ }
+ }
+ return 1;
+}
+
+// take a -128..127 value and stbi__clamp it and convert to 0..255
+stbi_inline static stbi_uc stbi__clamp(int x)
+{
+ // trick to use a single test to catch both cases
+ if ((unsigned int) x > 255) {
+ if (x < 0) return 0;
+ if (x > 255) return 255;
+ }
+ return (stbi_uc) x;
+}
+
+#define stbi__f2f(x) ((int) (((x) * 4096 + 0.5)))
+#define stbi__fsh(x) ((x) * 4096)
+
+// derived from jidctint -- DCT_ISLOW
+#define STBI__IDCT_1D(s0,s1,s2,s3,s4,s5,s6,s7) \
+ int t0,t1,t2,t3,p1,p2,p3,p4,p5,x0,x1,x2,x3; \
+ p2 = s2; \
+ p3 = s6; \
+ p1 = (p2+p3) * stbi__f2f(0.5411961f); \
+ t2 = p1 + p3*stbi__f2f(-1.847759065f); \
+ t3 = p1 + p2*stbi__f2f( 0.765366865f); \
+ p2 = s0; \
+ p3 = s4; \
+ t0 = stbi__fsh(p2+p3); \
+ t1 = stbi__fsh(p2-p3); \
+ x0 = t0+t3; \
+ x3 = t0-t3; \
+ x1 = t1+t2; \
+ x2 = t1-t2; \
+ t0 = s7; \
+ t1 = s5; \
+ t2 = s3; \
+ t3 = s1; \
+ p3 = t0+t2; \
+ p4 = t1+t3; \
+ p1 = t0+t3; \
+ p2 = t1+t2; \
+ p5 = (p3+p4)*stbi__f2f( 1.175875602f); \
+ t0 = t0*stbi__f2f( 0.298631336f); \
+ t1 = t1*stbi__f2f( 2.053119869f); \
+ t2 = t2*stbi__f2f( 3.072711026f); \
+ t3 = t3*stbi__f2f( 1.501321110f); \
+ p1 = p5 + p1*stbi__f2f(-0.899976223f); \
+ p2 = p5 + p2*stbi__f2f(-2.562915447f); \
+ p3 = p3*stbi__f2f(-1.961570560f); \
+ p4 = p4*stbi__f2f(-0.390180644f); \
+ t3 += p1+p4; \
+ t2 += p2+p3; \
+ t1 += p2+p4; \
+ t0 += p1+p3;
+
+static void stbi__idct_block(stbi_uc *out, int out_stride, short data[64])
+{
+ int i,val[64],*v=val;
+ stbi_uc *o;
+ short *d = data;
+
+ // columns
+ for (i=0; i < 8; ++i,++d, ++v) {
+ // if all zeroes, shortcut -- this avoids dequantizing 0s and IDCTing
+ if (d[ 8]==0 && d[16]==0 && d[24]==0 && d[32]==0
+ && d[40]==0 && d[48]==0 && d[56]==0) {
+ // no shortcut 0 seconds
+ // (1|2|3|4|5|6|7)==0 0 seconds
+ // all separate -0.047 seconds
+ // 1 && 2|3 && 4|5 && 6|7: -0.047 seconds
+ int dcterm = d[0]*4;
+ v[0] = v[8] = v[16] = v[24] = v[32] = v[40] = v[48] = v[56] = dcterm;
+ } else {
+ STBI__IDCT_1D(d[ 0],d[ 8],d[16],d[24],d[32],d[40],d[48],d[56])
+ // constants scaled things up by 1<<12; let's bring them back
+ // down, but keep 2 extra bits of precision
+ x0 += 512; x1 += 512; x2 += 512; x3 += 512;
+ v[ 0] = (x0+t3) >> 10;
+ v[56] = (x0-t3) >> 10;
+ v[ 8] = (x1+t2) >> 10;
+ v[48] = (x1-t2) >> 10;
+ v[16] = (x2+t1) >> 10;
+ v[40] = (x2-t1) >> 10;
+ v[24] = (x3+t0) >> 10;
+ v[32] = (x3-t0) >> 10;
+ }
+ }
+
+ for (i=0, v=val, o=out; i < 8; ++i,v+=8,o+=out_stride) {
+ // no fast case since the first 1D IDCT spread components out
+ STBI__IDCT_1D(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7])
+ // constants scaled things up by 1<<12, plus we had 1<<2 from first
+ // loop, plus horizontal and vertical each scale by sqrt(8) so together
+ // we've got an extra 1<<3, so 1<<17 total we need to remove.
+ // so we want to round that, which means adding 0.5 * 1<<17,
+ // aka 65536. Also, we'll end up with -128 to 127 that we want
+ // to encode as 0..255 by adding 128, so we'll add that before the shift
+ x0 += 65536 + (128<<17);
+ x1 += 65536 + (128<<17);
+ x2 += 65536 + (128<<17);
+ x3 += 65536 + (128<<17);
+ // tried computing the shifts into temps, or'ing the temps to see
+ // if any were out of range, but that was slower
+ o[0] = stbi__clamp((x0+t3) >> 17);
+ o[7] = stbi__clamp((x0-t3) >> 17);
+ o[1] = stbi__clamp((x1+t2) >> 17);
+ o[6] = stbi__clamp((x1-t2) >> 17);
+ o[2] = stbi__clamp((x2+t1) >> 17);
+ o[5] = stbi__clamp((x2-t1) >> 17);
+ o[3] = stbi__clamp((x3+t0) >> 17);
+ o[4] = stbi__clamp((x3-t0) >> 17);
+ }
+}
+
+#ifdef STBI_SSE2
+// sse2 integer IDCT. not the fastest possible implementation but it
+// produces bit-identical results to the generic C version so it's
+// fully "transparent".
+static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
+{
+ // This is constructed to match our regular (generic) integer IDCT exactly.
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7;
+ __m128i tmp;
+
+ // dot product constant: even elems=x, odd elems=y
+ #define dct_const(x,y) _mm_setr_epi16((x),(y),(x),(y),(x),(y),(x),(y))
+
+ // out(0) = c0[even]*x + c0[odd]*y (c0, x, y 16-bit, out 32-bit)
+ // out(1) = c1[even]*x + c1[odd]*y
+ #define dct_rot(out0,out1, x,y,c0,c1) \
+ __m128i c0##lo = _mm_unpacklo_epi16((x),(y)); \
+ __m128i c0##hi = _mm_unpackhi_epi16((x),(y)); \
+ __m128i out0##_l = _mm_madd_epi16(c0##lo, c0); \
+ __m128i out0##_h = _mm_madd_epi16(c0##hi, c0); \
+ __m128i out1##_l = _mm_madd_epi16(c0##lo, c1); \
+ __m128i out1##_h = _mm_madd_epi16(c0##hi, c1)
+
+ // out = in << 12 (in 16-bit, out 32-bit)
+ #define dct_widen(out, in) \
+ __m128i out##_l = _mm_srai_epi32(_mm_unpacklo_epi16(_mm_setzero_si128(), (in)), 4); \
+ __m128i out##_h = _mm_srai_epi32(_mm_unpackhi_epi16(_mm_setzero_si128(), (in)), 4)
+
+ // wide add
+ #define dct_wadd(out, a, b) \
+ __m128i out##_l = _mm_add_epi32(a##_l, b##_l); \
+ __m128i out##_h = _mm_add_epi32(a##_h, b##_h)
+
+ // wide sub
+ #define dct_wsub(out, a, b) \
+ __m128i out##_l = _mm_sub_epi32(a##_l, b##_l); \
+ __m128i out##_h = _mm_sub_epi32(a##_h, b##_h)
+
+ // butterfly a/b, add bias, then shift by "s" and pack
+ #define dct_bfly32o(out0, out1, a,b,bias,s) \
+ { \
+ __m128i abiased_l = _mm_add_epi32(a##_l, bias); \
+ __m128i abiased_h = _mm_add_epi32(a##_h, bias); \
+ dct_wadd(sum, abiased, b); \
+ dct_wsub(dif, abiased, b); \
+ out0 = _mm_packs_epi32(_mm_srai_epi32(sum_l, s), _mm_srai_epi32(sum_h, s)); \
+ out1 = _mm_packs_epi32(_mm_srai_epi32(dif_l, s), _mm_srai_epi32(dif_h, s)); \
+ }
+
+ // 8-bit interleave step (for transposes)
+ #define dct_interleave8(a, b) \
+ tmp = a; \
+ a = _mm_unpacklo_epi8(a, b); \
+ b = _mm_unpackhi_epi8(tmp, b)
+
+ // 16-bit interleave step (for transposes)
+ #define dct_interleave16(a, b) \
+ tmp = a; \
+ a = _mm_unpacklo_epi16(a, b); \
+ b = _mm_unpackhi_epi16(tmp, b)
+
+ #define dct_pass(bias,shift) \
+ { \
+ /* even part */ \
+ dct_rot(t2e,t3e, row2,row6, rot0_0,rot0_1); \
+ __m128i sum04 = _mm_add_epi16(row0, row4); \
+ __m128i dif04 = _mm_sub_epi16(row0, row4); \
+ dct_widen(t0e, sum04); \
+ dct_widen(t1e, dif04); \
+ dct_wadd(x0, t0e, t3e); \
+ dct_wsub(x3, t0e, t3e); \
+ dct_wadd(x1, t1e, t2e); \
+ dct_wsub(x2, t1e, t2e); \
+ /* odd part */ \
+ dct_rot(y0o,y2o, row7,row3, rot2_0,rot2_1); \
+ dct_rot(y1o,y3o, row5,row1, rot3_0,rot3_1); \
+ __m128i sum17 = _mm_add_epi16(row1, row7); \
+ __m128i sum35 = _mm_add_epi16(row3, row5); \
+ dct_rot(y4o,y5o, sum17,sum35, rot1_0,rot1_1); \
+ dct_wadd(x4, y0o, y4o); \
+ dct_wadd(x5, y1o, y5o); \
+ dct_wadd(x6, y2o, y5o); \
+ dct_wadd(x7, y3o, y4o); \
+ dct_bfly32o(row0,row7, x0,x7,bias,shift); \
+ dct_bfly32o(row1,row6, x1,x6,bias,shift); \
+ dct_bfly32o(row2,row5, x2,x5,bias,shift); \
+ dct_bfly32o(row3,row4, x3,x4,bias,shift); \
+ }
+
+ __m128i rot0_0 = dct_const(stbi__f2f(0.5411961f), stbi__f2f(0.5411961f) + stbi__f2f(-1.847759065f));
+ __m128i rot0_1 = dct_const(stbi__f2f(0.5411961f) + stbi__f2f( 0.765366865f), stbi__f2f(0.5411961f));
+ __m128i rot1_0 = dct_const(stbi__f2f(1.175875602f) + stbi__f2f(-0.899976223f), stbi__f2f(1.175875602f));
+ __m128i rot1_1 = dct_const(stbi__f2f(1.175875602f), stbi__f2f(1.175875602f) + stbi__f2f(-2.562915447f));
+ __m128i rot2_0 = dct_const(stbi__f2f(-1.961570560f) + stbi__f2f( 0.298631336f), stbi__f2f(-1.961570560f));
+ __m128i rot2_1 = dct_const(stbi__f2f(-1.961570560f), stbi__f2f(-1.961570560f) + stbi__f2f( 3.072711026f));
+ __m128i rot3_0 = dct_const(stbi__f2f(-0.390180644f) + stbi__f2f( 2.053119869f), stbi__f2f(-0.390180644f));
+ __m128i rot3_1 = dct_const(stbi__f2f(-0.390180644f), stbi__f2f(-0.390180644f) + stbi__f2f( 1.501321110f));
+
+ // rounding biases in column/row passes, see stbi__idct_block for explanation.
+ __m128i bias_0 = _mm_set1_epi32(512);
+ __m128i bias_1 = _mm_set1_epi32(65536 + (128<<17));
+
+ // load
+ row0 = _mm_load_si128((const __m128i *) (data + 0*8));
+ row1 = _mm_load_si128((const __m128i *) (data + 1*8));
+ row2 = _mm_load_si128((const __m128i *) (data + 2*8));
+ row3 = _mm_load_si128((const __m128i *) (data + 3*8));
+ row4 = _mm_load_si128((const __m128i *) (data + 4*8));
+ row5 = _mm_load_si128((const __m128i *) (data + 5*8));
+ row6 = _mm_load_si128((const __m128i *) (data + 6*8));
+ row7 = _mm_load_si128((const __m128i *) (data + 7*8));
+
+ // column pass
+ dct_pass(bias_0, 10);
+
+ {
+ // 16bit 8x8 transpose pass 1
+ dct_interleave16(row0, row4);
+ dct_interleave16(row1, row5);
+ dct_interleave16(row2, row6);
+ dct_interleave16(row3, row7);
+
+ // transpose pass 2
+ dct_interleave16(row0, row2);
+ dct_interleave16(row1, row3);
+ dct_interleave16(row4, row6);
+ dct_interleave16(row5, row7);
+
+ // transpose pass 3
+ dct_interleave16(row0, row1);
+ dct_interleave16(row2, row3);
+ dct_interleave16(row4, row5);
+ dct_interleave16(row6, row7);
+ }
+
+ // row pass
+ dct_pass(bias_1, 17);
+
+ {
+ // pack
+ __m128i p0 = _mm_packus_epi16(row0, row1); // a0a1a2a3...a7b0b1b2b3...b7
+ __m128i p1 = _mm_packus_epi16(row2, row3);
+ __m128i p2 = _mm_packus_epi16(row4, row5);
+ __m128i p3 = _mm_packus_epi16(row6, row7);
+
+ // 8bit 8x8 transpose pass 1
+ dct_interleave8(p0, p2); // a0e0a1e1...
+ dct_interleave8(p1, p3); // c0g0c1g1...
+
+ // transpose pass 2
+ dct_interleave8(p0, p1); // a0c0e0g0...
+ dct_interleave8(p2, p3); // b0d0f0h0...
+
+ // transpose pass 3
+ dct_interleave8(p0, p2); // a0b0c0d0...
+ dct_interleave8(p1, p3); // a4b4c4d4...
+
+ // store
+ _mm_storel_epi64((__m128i *) out, p0); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p0, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p2); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p2, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p1); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p1, 0x4e)); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, p3); out += out_stride;
+ _mm_storel_epi64((__m128i *) out, _mm_shuffle_epi32(p3, 0x4e));
+ }
+
+#undef dct_const
+#undef dct_rot
+#undef dct_widen
+#undef dct_wadd
+#undef dct_wsub
+#undef dct_bfly32o
+#undef dct_interleave8
+#undef dct_interleave16
+#undef dct_pass
+}
+
+#endif // STBI_SSE2
+
+#ifdef STBI_NEON
+
+// NEON integer IDCT. should produce bit-identical
+// results to the generic C version.
+static void stbi__idct_simd(stbi_uc *out, int out_stride, short data[64])
+{
+ int16x8_t row0, row1, row2, row3, row4, row5, row6, row7;
+
+ int16x4_t rot0_0 = vdup_n_s16(stbi__f2f(0.5411961f));
+ int16x4_t rot0_1 = vdup_n_s16(stbi__f2f(-1.847759065f));
+ int16x4_t rot0_2 = vdup_n_s16(stbi__f2f( 0.765366865f));
+ int16x4_t rot1_0 = vdup_n_s16(stbi__f2f( 1.175875602f));
+ int16x4_t rot1_1 = vdup_n_s16(stbi__f2f(-0.899976223f));
+ int16x4_t rot1_2 = vdup_n_s16(stbi__f2f(-2.562915447f));
+ int16x4_t rot2_0 = vdup_n_s16(stbi__f2f(-1.961570560f));
+ int16x4_t rot2_1 = vdup_n_s16(stbi__f2f(-0.390180644f));
+ int16x4_t rot3_0 = vdup_n_s16(stbi__f2f( 0.298631336f));
+ int16x4_t rot3_1 = vdup_n_s16(stbi__f2f( 2.053119869f));
+ int16x4_t rot3_2 = vdup_n_s16(stbi__f2f( 3.072711026f));
+ int16x4_t rot3_3 = vdup_n_s16(stbi__f2f( 1.501321110f));
+
+#define dct_long_mul(out, inq, coeff) \
+ int32x4_t out##_l = vmull_s16(vget_low_s16(inq), coeff); \
+ int32x4_t out##_h = vmull_s16(vget_high_s16(inq), coeff)
+
+#define dct_long_mac(out, acc, inq, coeff) \
+ int32x4_t out##_l = vmlal_s16(acc##_l, vget_low_s16(inq), coeff); \
+ int32x4_t out##_h = vmlal_s16(acc##_h, vget_high_s16(inq), coeff)
+
+#define dct_widen(out, inq) \
+ int32x4_t out##_l = vshll_n_s16(vget_low_s16(inq), 12); \
+ int32x4_t out##_h = vshll_n_s16(vget_high_s16(inq), 12)
+
+// wide add
+#define dct_wadd(out, a, b) \
+ int32x4_t out##_l = vaddq_s32(a##_l, b##_l); \
+ int32x4_t out##_h = vaddq_s32(a##_h, b##_h)
+
+// wide sub
+#define dct_wsub(out, a, b) \
+ int32x4_t out##_l = vsubq_s32(a##_l, b##_l); \
+ int32x4_t out##_h = vsubq_s32(a##_h, b##_h)
+
+// butterfly a/b, then shift using "shiftop" by "s" and pack
+#define dct_bfly32o(out0,out1, a,b,shiftop,s) \
+ { \
+ dct_wadd(sum, a, b); \
+ dct_wsub(dif, a, b); \
+ out0 = vcombine_s16(shiftop(sum_l, s), shiftop(sum_h, s)); \
+ out1 = vcombine_s16(shiftop(dif_l, s), shiftop(dif_h, s)); \
+ }
+
+#define dct_pass(shiftop, shift) \
+ { \
+ /* even part */ \
+ int16x8_t sum26 = vaddq_s16(row2, row6); \
+ dct_long_mul(p1e, sum26, rot0_0); \
+ dct_long_mac(t2e, p1e, row6, rot0_1); \
+ dct_long_mac(t3e, p1e, row2, rot0_2); \
+ int16x8_t sum04 = vaddq_s16(row0, row4); \
+ int16x8_t dif04 = vsubq_s16(row0, row4); \
+ dct_widen(t0e, sum04); \
+ dct_widen(t1e, dif04); \
+ dct_wadd(x0, t0e, t3e); \
+ dct_wsub(x3, t0e, t3e); \
+ dct_wadd(x1, t1e, t2e); \
+ dct_wsub(x2, t1e, t2e); \
+ /* odd part */ \
+ int16x8_t sum15 = vaddq_s16(row1, row5); \
+ int16x8_t sum17 = vaddq_s16(row1, row7); \
+ int16x8_t sum35 = vaddq_s16(row3, row5); \
+ int16x8_t sum37 = vaddq_s16(row3, row7); \
+ int16x8_t sumodd = vaddq_s16(sum17, sum35); \
+ dct_long_mul(p5o, sumodd, rot1_0); \
+ dct_long_mac(p1o, p5o, sum17, rot1_1); \
+ dct_long_mac(p2o, p5o, sum35, rot1_2); \
+ dct_long_mul(p3o, sum37, rot2_0); \
+ dct_long_mul(p4o, sum15, rot2_1); \
+ dct_wadd(sump13o, p1o, p3o); \
+ dct_wadd(sump24o, p2o, p4o); \
+ dct_wadd(sump23o, p2o, p3o); \
+ dct_wadd(sump14o, p1o, p4o); \
+ dct_long_mac(x4, sump13o, row7, rot3_0); \
+ dct_long_mac(x5, sump24o, row5, rot3_1); \
+ dct_long_mac(x6, sump23o, row3, rot3_2); \
+ dct_long_mac(x7, sump14o, row1, rot3_3); \
+ dct_bfly32o(row0,row7, x0,x7,shiftop,shift); \
+ dct_bfly32o(row1,row6, x1,x6,shiftop,shift); \
+ dct_bfly32o(row2,row5, x2,x5,shiftop,shift); \
+ dct_bfly32o(row3,row4, x3,x4,shiftop,shift); \
+ }
+
+ // load
+ row0 = vld1q_s16(data + 0*8);
+ row1 = vld1q_s16(data + 1*8);
+ row2 = vld1q_s16(data + 2*8);
+ row3 = vld1q_s16(data + 3*8);
+ row4 = vld1q_s16(data + 4*8);
+ row5 = vld1q_s16(data + 5*8);
+ row6 = vld1q_s16(data + 6*8);
+ row7 = vld1q_s16(data + 7*8);
+
+ // add DC bias
+ row0 = vaddq_s16(row0, vsetq_lane_s16(1024, vdupq_n_s16(0), 0));
+
+ // column pass
+ dct_pass(vrshrn_n_s32, 10);
+
+ // 16bit 8x8 transpose
+ {
+// these three map to a single VTRN.16, VTRN.32, and VSWP, respectively.
+// whether compilers actually get this is another story, sadly.
+#define dct_trn16(x, y) { int16x8x2_t t = vtrnq_s16(x, y); x = t.val[0]; y = t.val[1]; }
+#define dct_trn32(x, y) { int32x4x2_t t = vtrnq_s32(vreinterpretq_s32_s16(x), vreinterpretq_s32_s16(y)); x = vreinterpretq_s16_s32(t.val[0]); y = vreinterpretq_s16_s32(t.val[1]); }
+#define dct_trn64(x, y) { int16x8_t x0 = x; int16x8_t y0 = y; x = vcombine_s16(vget_low_s16(x0), vget_low_s16(y0)); y = vcombine_s16(vget_high_s16(x0), vget_high_s16(y0)); }
+
+ // pass 1
+ dct_trn16(row0, row1); // a0b0a2b2a4b4a6b6
+ dct_trn16(row2, row3);
+ dct_trn16(row4, row5);
+ dct_trn16(row6, row7);
+
+ // pass 2
+ dct_trn32(row0, row2); // a0b0c0d0a4b4c4d4
+ dct_trn32(row1, row3);
+ dct_trn32(row4, row6);
+ dct_trn32(row5, row7);
+
+ // pass 3
+ dct_trn64(row0, row4); // a0b0c0d0e0f0g0h0
+ dct_trn64(row1, row5);
+ dct_trn64(row2, row6);
+ dct_trn64(row3, row7);
+
+#undef dct_trn16
+#undef dct_trn32
+#undef dct_trn64
+ }
+
+ // row pass
+ // vrshrn_n_s32 only supports shifts up to 16, we need
+ // 17. so do a non-rounding shift of 16 first then follow
+ // up with a rounding shift by 1.
+ dct_pass(vshrn_n_s32, 16);
+
+ {
+ // pack and round
+ uint8x8_t p0 = vqrshrun_n_s16(row0, 1);
+ uint8x8_t p1 = vqrshrun_n_s16(row1, 1);
+ uint8x8_t p2 = vqrshrun_n_s16(row2, 1);
+ uint8x8_t p3 = vqrshrun_n_s16(row3, 1);
+ uint8x8_t p4 = vqrshrun_n_s16(row4, 1);
+ uint8x8_t p5 = vqrshrun_n_s16(row5, 1);
+ uint8x8_t p6 = vqrshrun_n_s16(row6, 1);
+ uint8x8_t p7 = vqrshrun_n_s16(row7, 1);
+
+ // again, these can translate into one instruction, but often don't.
+#define dct_trn8_8(x, y) { uint8x8x2_t t = vtrn_u8(x, y); x = t.val[0]; y = t.val[1]; }
+#define dct_trn8_16(x, y) { uint16x4x2_t t = vtrn_u16(vreinterpret_u16_u8(x), vreinterpret_u16_u8(y)); x = vreinterpret_u8_u16(t.val[0]); y = vreinterpret_u8_u16(t.val[1]); }
+#define dct_trn8_32(x, y) { uint32x2x2_t t = vtrn_u32(vreinterpret_u32_u8(x), vreinterpret_u32_u8(y)); x = vreinterpret_u8_u32(t.val[0]); y = vreinterpret_u8_u32(t.val[1]); }
+
+ // sadly can't use interleaved stores here since we only write
+ // 8 bytes to each scan line!
+
+ // 8x8 8-bit transpose pass 1
+ dct_trn8_8(p0, p1);
+ dct_trn8_8(p2, p3);
+ dct_trn8_8(p4, p5);
+ dct_trn8_8(p6, p7);
+
+ // pass 2
+ dct_trn8_16(p0, p2);
+ dct_trn8_16(p1, p3);
+ dct_trn8_16(p4, p6);
+ dct_trn8_16(p5, p7);
+
+ // pass 3
+ dct_trn8_32(p0, p4);
+ dct_trn8_32(p1, p5);
+ dct_trn8_32(p2, p6);
+ dct_trn8_32(p3, p7);
+
+ // store
+ vst1_u8(out, p0); out += out_stride;
+ vst1_u8(out, p1); out += out_stride;
+ vst1_u8(out, p2); out += out_stride;
+ vst1_u8(out, p3); out += out_stride;
+ vst1_u8(out, p4); out += out_stride;
+ vst1_u8(out, p5); out += out_stride;
+ vst1_u8(out, p6); out += out_stride;
+ vst1_u8(out, p7);
+
+#undef dct_trn8_8
+#undef dct_trn8_16
+#undef dct_trn8_32
+ }
+
+#undef dct_long_mul
+#undef dct_long_mac
+#undef dct_widen
+#undef dct_wadd
+#undef dct_wsub
+#undef dct_bfly32o
+#undef dct_pass
+}
+
+#endif // STBI_NEON
+
+#define STBI__MARKER_none 0xff
+// if there's a pending marker from the entropy stream, return that
+// otherwise, fetch from the stream and get a marker. if there's no
+// marker, return 0xff, which is never a valid marker value
+static stbi_uc stbi__get_marker(stbi__jpeg *j)
+{
+ stbi_uc x;
+ if (j->marker != STBI__MARKER_none) { x = j->marker; j->marker = STBI__MARKER_none; return x; }
+ x = stbi__get8(j->s);
+ if (x != 0xff) return STBI__MARKER_none;
+ while (x == 0xff)
+ x = stbi__get8(j->s); // consume repeated 0xff fill bytes
+ return x;
+}
+
+// in each scan, we'll have scan_n components, and the order
+// of the components is specified by order[]
+#define STBI__RESTART(x) ((x) >= 0xd0 && (x) <= 0xd7)
+
+// after a restart interval, stbi__jpeg_reset the entropy decoder and
+// the dc prediction
+static void stbi__jpeg_reset(stbi__jpeg *j)
+{
+ j->code_bits = 0;
+ j->code_buffer = 0;
+ j->nomore = 0;
+ j->img_comp[0].dc_pred = j->img_comp[1].dc_pred = j->img_comp[2].dc_pred = j->img_comp[3].dc_pred = 0;
+ j->marker = STBI__MARKER_none;
+ j->todo = j->restart_interval ? j->restart_interval : 0x7fffffff;
+ j->eob_run = 0;
+ // no more than 1<<31 MCUs if no restart_interal? that's plenty safe,
+ // since we don't even allow 1<<30 pixels
+}
+
+static int stbi__parse_entropy_coded_data(stbi__jpeg *z)
+{
+ stbi__jpeg_reset(z);
+ if (!z->progressive) {
+ if (z->scan_n == 1) {
+ int i,j;
+ STBI_SIMD_ALIGN(short, data[64]);
+ int n = z->order[0];
+ // non-interleaved data, we just need to process one block at a time,
+ // in trivial scanline order
+ // number of blocks to do just depends on how many actual "pixels" this
+ // component has, independent of interleaved MCU blocking and such
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
+ // every data block is an MCU, so countdown the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ // if it's NOT a restart, then just bail, so we get corrupt data
+ // rather than no data
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ } else { // interleaved
+ int i,j,k,x,y;
+ STBI_SIMD_ALIGN(short, data[64]);
+ for (j=0; j < z->img_mcu_y; ++j) {
+ for (i=0; i < z->img_mcu_x; ++i) {
+ // scan an interleaved mcu... process scan_n components in order
+ for (k=0; k < z->scan_n; ++k) {
+ int n = z->order[k];
+ // scan out an mcu's worth of this component; that's just determined
+ // by the basic H and V specified for the component
+ for (y=0; y < z->img_comp[n].v; ++y) {
+ for (x=0; x < z->img_comp[n].h; ++x) {
+ int x2 = (i*z->img_comp[n].h + x)*8;
+ int y2 = (j*z->img_comp[n].v + y)*8;
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block(z, data, z->huff_dc+z->img_comp[n].hd, z->huff_ac+ha, z->fast_ac[ha], n, z->dequant[z->img_comp[n].tq])) return 0;
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*y2+x2, z->img_comp[n].w2, data);
+ }
+ }
+ }
+ // after all interleaved components, that's an interleaved MCU,
+ // so now count down the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ }
+ } else {
+ if (z->scan_n == 1) {
+ int i,j;
+ int n = z->order[0];
+ // non-interleaved data, we just need to process one block at a time,
+ // in trivial scanline order
+ // number of blocks to do just depends on how many actual "pixels" this
+ // component has, independent of interleaved MCU blocking and such
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
+ if (z->spec_start == 0) {
+ if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
+ return 0;
+ } else {
+ int ha = z->img_comp[n].ha;
+ if (!stbi__jpeg_decode_block_prog_ac(z, data, &z->huff_ac[ha], z->fast_ac[ha]))
+ return 0;
+ }
+ // every data block is an MCU, so countdown the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ } else { // interleaved
+ int i,j,k,x,y;
+ for (j=0; j < z->img_mcu_y; ++j) {
+ for (i=0; i < z->img_mcu_x; ++i) {
+ // scan an interleaved mcu... process scan_n components in order
+ for (k=0; k < z->scan_n; ++k) {
+ int n = z->order[k];
+ // scan out an mcu's worth of this component; that's just determined
+ // by the basic H and V specified for the component
+ for (y=0; y < z->img_comp[n].v; ++y) {
+ for (x=0; x < z->img_comp[n].h; ++x) {
+ int x2 = (i*z->img_comp[n].h + x);
+ int y2 = (j*z->img_comp[n].v + y);
+ short *data = z->img_comp[n].coeff + 64 * (x2 + y2 * z->img_comp[n].coeff_w);
+ if (!stbi__jpeg_decode_block_prog_dc(z, data, &z->huff_dc[z->img_comp[n].hd], n))
+ return 0;
+ }
+ }
+ }
+ // after all interleaved components, that's an interleaved MCU,
+ // so now count down the restart interval
+ if (--z->todo <= 0) {
+ if (z->code_bits < 24) stbi__grow_buffer_unsafe(z);
+ if (!STBI__RESTART(z->marker)) return 1;
+ stbi__jpeg_reset(z);
+ }
+ }
+ }
+ return 1;
+ }
+ }
+}
+
+static void stbi__jpeg_dequantize(short *data, stbi__uint16 *dequant)
+{
+ int i;
+ for (i=0; i < 64; ++i)
+ data[i] *= dequant[i];
+}
+
+static void stbi__jpeg_finish(stbi__jpeg *z)
+{
+ if (z->progressive) {
+ // dequantize and idct the data
+ int i,j,n;
+ for (n=0; n < z->s->img_n; ++n) {
+ int w = (z->img_comp[n].x+7) >> 3;
+ int h = (z->img_comp[n].y+7) >> 3;
+ for (j=0; j < h; ++j) {
+ for (i=0; i < w; ++i) {
+ short *data = z->img_comp[n].coeff + 64 * (i + j * z->img_comp[n].coeff_w);
+ stbi__jpeg_dequantize(data, z->dequant[z->img_comp[n].tq]);
+ z->idct_block_kernel(z->img_comp[n].data+z->img_comp[n].w2*j*8+i*8, z->img_comp[n].w2, data);
+ }
+ }
+ }
+ }
+}
+
+static int stbi__process_marker(stbi__jpeg *z, int m)
+{
+ int L;
+ switch (m) {
+ case STBI__MARKER_none: // no marker found
+ return stbi__err("expected marker","Corrupt JPEG");
+
+ case 0xDD: // DRI - specify restart interval
+ if (stbi__get16be(z->s) != 4) return stbi__err("bad DRI len","Corrupt JPEG");
+ z->restart_interval = stbi__get16be(z->s);
+ return 1;
+
+ case 0xDB: // DQT - define quantization table
+ L = stbi__get16be(z->s)-2;
+ while (L > 0) {
+ int q = stbi__get8(z->s);
+ int p = q >> 4, sixteen = (p != 0);
+ int t = q & 15,i;
+ if (p != 0 && p != 1) return stbi__err("bad DQT type","Corrupt JPEG");
+ if (t > 3) return stbi__err("bad DQT table","Corrupt JPEG");
+
+ for (i=0; i < 64; ++i)
+ z->dequant[t][stbi__jpeg_dezigzag[i]] = (stbi__uint16)(sixteen ? stbi__get16be(z->s) : stbi__get8(z->s));
+ L -= (sixteen ? 129 : 65);
+ }
+ return L==0;
+
+ case 0xC4: // DHT - define huffman table
+ L = stbi__get16be(z->s)-2;
+ while (L > 0) {
+ stbi_uc *v;
+ int sizes[16],i,n=0;
+ int q = stbi__get8(z->s);
+ int tc = q >> 4;
+ int th = q & 15;
+ if (tc > 1 || th > 3) return stbi__err("bad DHT header","Corrupt JPEG");
+ for (i=0; i < 16; ++i) {
+ sizes[i] = stbi__get8(z->s);
+ n += sizes[i];
+ }
+ L -= 17;
+ if (tc == 0) {
+ if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0;
+ v = z->huff_dc[th].values;
+ } else {
+ if (!stbi__build_huffman(z->huff_ac+th, sizes)) return 0;
+ v = z->huff_ac[th].values;
+ }
+ for (i=0; i < n; ++i)
+ v[i] = stbi__get8(z->s);
+ if (tc != 0)
+ stbi__build_fast_ac(z->fast_ac[th], z->huff_ac + th);
+ L -= n;
+ }
+ return L==0;
+ }
+
+ // check for comment block or APP blocks
+ if ((m >= 0xE0 && m <= 0xEF) || m == 0xFE) {
+ L = stbi__get16be(z->s);
+ if (L < 2) {
+ if (m == 0xFE)
+ return stbi__err("bad COM len","Corrupt JPEG");
+ else
+ return stbi__err("bad APP len","Corrupt JPEG");
+ }
+ L -= 2;
+
+ if (m == 0xE0 && L >= 5) { // JFIF APP0 segment
+ static const unsigned char tag[5] = {'J','F','I','F','\0'};
+ int ok = 1;
+ int i;
+ for (i=0; i < 5; ++i)
+ if (stbi__get8(z->s) != tag[i])
+ ok = 0;
+ L -= 5;
+ if (ok)
+ z->jfif = 1;
+ } else if (m == 0xEE && L >= 12) { // Adobe APP14 segment
+ static const unsigned char tag[6] = {'A','d','o','b','e','\0'};
+ int ok = 1;
+ int i;
+ for (i=0; i < 6; ++i)
+ if (stbi__get8(z->s) != tag[i])
+ ok = 0;
+ L -= 6;
+ if (ok) {
+ stbi__get8(z->s); // version
+ stbi__get16be(z->s); // flags0
+ stbi__get16be(z->s); // flags1
+ z->app14_color_transform = stbi__get8(z->s); // color transform
+ L -= 6;
+ }
+ }
+
+ stbi__skip(z->s, L);
+ return 1;
+ }
+
+ return stbi__err("unknown marker","Corrupt JPEG");
+}
+
+// after we see SOS
+static int stbi__process_scan_header(stbi__jpeg *z)
+{
+ int i;
+ int Ls = stbi__get16be(z->s);
+ z->scan_n = stbi__get8(z->s);
+ if (z->scan_n < 1 || z->scan_n > 4 || z->scan_n > (int) z->s->img_n) return stbi__err("bad SOS component count","Corrupt JPEG");
+ if (Ls != 6+2*z->scan_n) return stbi__err("bad SOS len","Corrupt JPEG");
+ for (i=0; i < z->scan_n; ++i) {
+ int id = stbi__get8(z->s), which;
+ int q = stbi__get8(z->s);
+ for (which = 0; which < z->s->img_n; ++which)
+ if (z->img_comp[which].id == id)
+ break;
+ if (which == z->s->img_n) return 0; // no match
+ z->img_comp[which].hd = q >> 4; if (z->img_comp[which].hd > 3) return stbi__err("bad DC huff","Corrupt JPEG");
+ z->img_comp[which].ha = q & 15; if (z->img_comp[which].ha > 3) return stbi__err("bad AC huff","Corrupt JPEG");
+ z->order[i] = which;
+ }
+
+ {
+ int aa;
+ z->spec_start = stbi__get8(z->s);
+ z->spec_end = stbi__get8(z->s); // should be 63, but might be 0
+ aa = stbi__get8(z->s);
+ z->succ_high = (aa >> 4);
+ z->succ_low = (aa & 15);
+ if (z->progressive) {
+ if (z->spec_start > 63 || z->spec_end > 63 || z->spec_start > z->spec_end || z->succ_high > 13 || z->succ_low > 13)
+ return stbi__err("bad SOS", "Corrupt JPEG");
+ } else {
+ if (z->spec_start != 0) return stbi__err("bad SOS","Corrupt JPEG");
+ if (z->succ_high != 0 || z->succ_low != 0) return stbi__err("bad SOS","Corrupt JPEG");
+ z->spec_end = 63;
+ }
+ }
+
+ return 1;
+}
+
+static int stbi__free_jpeg_components(stbi__jpeg *z, int ncomp, int why)
+{
+ int i;
+ for (i=0; i < ncomp; ++i) {
+ if (z->img_comp[i].raw_data) {
+ STBI_FREE(z->img_comp[i].raw_data);
+ z->img_comp[i].raw_data = NULL;
+ z->img_comp[i].data = NULL;
+ }
+ if (z->img_comp[i].raw_coeff) {
+ STBI_FREE(z->img_comp[i].raw_coeff);
+ z->img_comp[i].raw_coeff = 0;
+ z->img_comp[i].coeff = 0;
+ }
+ if (z->img_comp[i].linebuf) {
+ STBI_FREE(z->img_comp[i].linebuf);
+ z->img_comp[i].linebuf = NULL;
+ }
+ }
+ return why;
+}
+
+static int stbi__process_frame_header(stbi__jpeg *z, int scan)
+{
+ stbi__context *s = z->s;
+ int Lf,p,i,q, h_max=1,v_max=1,c;
+ Lf = stbi__get16be(s); if (Lf < 11) return stbi__err("bad SOF len","Corrupt JPEG"); // JPEG
+ p = stbi__get8(s); if (p != 8) return stbi__err("only 8-bit","JPEG format not supported: 8-bit only"); // JPEG baseline
+ s->img_y = stbi__get16be(s); if (s->img_y == 0) return stbi__err("no header height", "JPEG format not supported: delayed height"); // Legal, but we don't handle it--but neither does IJG
+ s->img_x = stbi__get16be(s); if (s->img_x == 0) return stbi__err("0 width","Corrupt JPEG"); // JPEG requires
+ c = stbi__get8(s);
+ if (c != 3 && c != 1 && c != 4) return stbi__err("bad component count","Corrupt JPEG");
+ s->img_n = c;
+ for (i=0; i < c; ++i) {
+ z->img_comp[i].data = NULL;
+ z->img_comp[i].linebuf = NULL;
+ }
+
+ if (Lf != 8+3*s->img_n) return stbi__err("bad SOF len","Corrupt JPEG");
+
+ z->rgb = 0;
+ for (i=0; i < s->img_n; ++i) {
+ static const unsigned char rgb[3] = { 'R', 'G', 'B' };
+ z->img_comp[i].id = stbi__get8(s);
+ if (s->img_n == 3 && z->img_comp[i].id == rgb[i])
+ ++z->rgb;
+ q = stbi__get8(s);
+ z->img_comp[i].h = (q >> 4); if (!z->img_comp[i].h || z->img_comp[i].h > 4) return stbi__err("bad H","Corrupt JPEG");
+ z->img_comp[i].v = q & 15; if (!z->img_comp[i].v || z->img_comp[i].v > 4) return stbi__err("bad V","Corrupt JPEG");
+ z->img_comp[i].tq = stbi__get8(s); if (z->img_comp[i].tq > 3) return stbi__err("bad TQ","Corrupt JPEG");
+ }
+
+ if (scan != STBI__SCAN_load) return 1;
+
+ if (!stbi__mad3sizes_valid(s->img_x, s->img_y, s->img_n, 0)) return stbi__err("too large", "Image too large to decode");
+
+ for (i=0; i < s->img_n; ++i) {
+ if (z->img_comp[i].h > h_max) h_max = z->img_comp[i].h;
+ if (z->img_comp[i].v > v_max) v_max = z->img_comp[i].v;
+ }
+
+ // compute interleaved mcu info
+ z->img_h_max = h_max;
+ z->img_v_max = v_max;
+ z->img_mcu_w = h_max * 8;
+ z->img_mcu_h = v_max * 8;
+ // these sizes can't be more than 17 bits
+ z->img_mcu_x = (s->img_x + z->img_mcu_w-1) / z->img_mcu_w;
+ z->img_mcu_y = (s->img_y + z->img_mcu_h-1) / z->img_mcu_h;
+
+ for (i=0; i < s->img_n; ++i) {
+ // number of effective pixels (e.g. for non-interleaved MCU)
+ z->img_comp[i].x = (s->img_x * z->img_comp[i].h + h_max-1) / h_max;
+ z->img_comp[i].y = (s->img_y * z->img_comp[i].v + v_max-1) / v_max;
+ // to simplify generation, we'll allocate enough memory to decode
+ // the bogus oversized data from using interleaved MCUs and their
+ // big blocks (e.g. a 16x16 iMCU on an image of width 33); we won't
+ // discard the extra data until colorspace conversion
+ //
+ // img_mcu_x, img_mcu_y: <=17 bits; comp[i].h and .v are <=4 (checked earlier)
+ // so these muls can't overflow with 32-bit ints (which we require)
+ z->img_comp[i].w2 = z->img_mcu_x * z->img_comp[i].h * 8;
+ z->img_comp[i].h2 = z->img_mcu_y * z->img_comp[i].v * 8;
+ z->img_comp[i].coeff = 0;
+ z->img_comp[i].raw_coeff = 0;
+ z->img_comp[i].linebuf = NULL;
+ z->img_comp[i].raw_data = stbi__malloc_mad2(z->img_comp[i].w2, z->img_comp[i].h2, 15);
+ if (z->img_comp[i].raw_data == NULL)
+ return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
+ // align blocks for idct using mmx/sse
+ z->img_comp[i].data = (stbi_uc*) (((size_t) z->img_comp[i].raw_data + 15) & ~15);
+ if (z->progressive) {
+ // w2, h2 are multiples of 8 (see above)
+ z->img_comp[i].coeff_w = z->img_comp[i].w2 / 8;
+ z->img_comp[i].coeff_h = z->img_comp[i].h2 / 8;
+ z->img_comp[i].raw_coeff = stbi__malloc_mad3(z->img_comp[i].w2, z->img_comp[i].h2, sizeof(short), 15);
+ if (z->img_comp[i].raw_coeff == NULL)
+ return stbi__free_jpeg_components(z, i+1, stbi__err("outofmem", "Out of memory"));
+ z->img_comp[i].coeff = (short*) (((size_t) z->img_comp[i].raw_coeff + 15) & ~15);
+ }
+ }
+
+ return 1;
+}
+
+// use comparisons since in some cases we handle more than one case (e.g. SOF)
+#define stbi__DNL(x) ((x) == 0xdc)
+#define stbi__SOI(x) ((x) == 0xd8)
+#define stbi__EOI(x) ((x) == 0xd9)
+#define stbi__SOF(x) ((x) == 0xc0 || (x) == 0xc1 || (x) == 0xc2)
+#define stbi__SOS(x) ((x) == 0xda)
+
+#define stbi__SOF_progressive(x) ((x) == 0xc2)
+
+static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan)
+{
+ int m;
+ z->jfif = 0;
+ z->app14_color_transform = -1; // valid values are 0,1,2
+ z->marker = STBI__MARKER_none; // initialize cached marker to empty
+ m = stbi__get_marker(z);
+ if (!stbi__SOI(m)) return stbi__err("no SOI","Corrupt JPEG");
+ if (scan == STBI__SCAN_type) return 1;
+ m = stbi__get_marker(z);
+ while (!stbi__SOF(m)) {
+ if (!stbi__process_marker(z,m)) return 0;
+ m = stbi__get_marker(z);
+ while (m == STBI__MARKER_none) {
+ // some files have extra padding after their blocks, so ok, we'll scan
+ if (stbi__at_eof(z->s)) return stbi__err("no SOF", "Corrupt JPEG");
+ m = stbi__get_marker(z);
+ }
+ }
+ z->progressive = stbi__SOF_progressive(m);
+ if (!stbi__process_frame_header(z, scan)) return 0;
+ return 1;
+}
+
+// decode image to YCbCr format
+static int stbi__decode_jpeg_image(stbi__jpeg *j)
+{
+ int m;
+ for (m = 0; m < 4; m++) {
+ j->img_comp[m].raw_data = NULL;
+ j->img_comp[m].raw_coeff = NULL;
+ }
+ j->restart_interval = 0;
+ if (!stbi__decode_jpeg_header(j, STBI__SCAN_load)) return 0;
+ m = stbi__get_marker(j);
+ while (!stbi__EOI(m)) {
+ if (stbi__SOS(m)) {
+ if (!stbi__process_scan_header(j)) return 0;
+ if (!stbi__parse_entropy_coded_data(j)) return 0;
+ if (j->marker == STBI__MARKER_none ) {
+ // handle 0s at the end of image data from IP Kamera 9060
+ while (!stbi__at_eof(j->s)) {
+ int x = stbi__get8(j->s);
+ if (x == 255) {
+ j->marker = stbi__get8(j->s);
+ break;
+ }
+ }
+ // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0
+ }
+ } else if (stbi__DNL(m)) {
+ int Ld = stbi__get16be(j->s);
+ stbi__uint32 NL = stbi__get16be(j->s);
+ if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG");
+ if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG");
+ } else {
+ if (!stbi__process_marker(j, m)) return 0;
+ }
+ m = stbi__get_marker(j);
+ }
+ if (j->progressive)
+ stbi__jpeg_finish(j);
+ return 1;
+}
+
+// static jfif-centered resampling (across block boundaries)
+
+typedef stbi_uc *(*resample_row_func)(stbi_uc *out, stbi_uc *in0, stbi_uc *in1,
+ int w, int hs);
+
+#define stbi__div4(x) ((stbi_uc) ((x) >> 2))
+
+static stbi_uc *resample_row_1(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ STBI_NOTUSED(out);
+ STBI_NOTUSED(in_far);
+ STBI_NOTUSED(w);
+ STBI_NOTUSED(hs);
+ return in_near;
+}
+
+static stbi_uc* stbi__resample_row_v_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate two samples vertically for every one in input
+ int i;
+ STBI_NOTUSED(hs);
+ for (i=0; i < w; ++i)
+ out[i] = stbi__div4(3*in_near[i] + in_far[i] + 2);
+ return out;
+}
+
+static stbi_uc* stbi__resample_row_h_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate two samples horizontally for every one in input
+ int i;
+ stbi_uc *input = in_near;
+
+ if (w == 1) {
+ // if only one sample, can't do any interpolation
+ out[0] = out[1] = input[0];
+ return out;
+ }
+
+ out[0] = input[0];
+ out[1] = stbi__div4(input[0]*3 + input[1] + 2);
+ for (i=1; i < w-1; ++i) {
+ int n = 3*input[i]+2;
+ out[i*2+0] = stbi__div4(n+input[i-1]);
+ out[i*2+1] = stbi__div4(n+input[i+1]);
+ }
+ out[i*2+0] = stbi__div4(input[w-2]*3 + input[w-1] + 2);
+ out[i*2+1] = input[w-1];
+
+ STBI_NOTUSED(in_far);
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+
+#define stbi__div16(x) ((stbi_uc) ((x) >> 4))
+
+static stbi_uc *stbi__resample_row_hv_2(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate 2x2 samples for every one in input
+ int i,t0,t1;
+ if (w == 1) {
+ out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
+ return out;
+ }
+
+ t1 = 3*in_near[0] + in_far[0];
+ out[0] = stbi__div4(t1+2);
+ for (i=1; i < w; ++i) {
+ t0 = t1;
+ t1 = 3*in_near[i]+in_far[i];
+ out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
+ out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
+ }
+ out[w*2-1] = stbi__div4(t1+2);
+
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+
+#if defined(STBI_SSE2) || defined(STBI_NEON)
+static stbi_uc *stbi__resample_row_hv_2_simd(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // need to generate 2x2 samples for every one in input
+ int i=0,t0,t1;
+
+ if (w == 1) {
+ out[0] = out[1] = stbi__div4(3*in_near[0] + in_far[0] + 2);
+ return out;
+ }
+
+ t1 = 3*in_near[0] + in_far[0];
+ // process groups of 8 pixels for as long as we can.
+ // note we can't handle the last pixel in a row in this loop
+ // because we need to handle the filter boundary conditions.
+ for (; i < ((w-1) & ~7); i += 8) {
+#if defined(STBI_SSE2)
+ // load and perform the vertical filtering pass
+ // this uses 3*x + y = 4*x + (y - x)
+ __m128i zero = _mm_setzero_si128();
+ __m128i farb = _mm_loadl_epi64((__m128i *) (in_far + i));
+ __m128i nearb = _mm_loadl_epi64((__m128i *) (in_near + i));
+ __m128i farw = _mm_unpacklo_epi8(farb, zero);
+ __m128i nearw = _mm_unpacklo_epi8(nearb, zero);
+ __m128i diff = _mm_sub_epi16(farw, nearw);
+ __m128i nears = _mm_slli_epi16(nearw, 2);
+ __m128i curr = _mm_add_epi16(nears, diff); // current row
+
+ // horizontal filter works the same based on shifted vers of current
+ // row. "prev" is current row shifted right by 1 pixel; we need to
+ // insert the previous pixel value (from t1).
+ // "next" is current row shifted left by 1 pixel, with first pixel
+ // of next block of 8 pixels added in.
+ __m128i prv0 = _mm_slli_si128(curr, 2);
+ __m128i nxt0 = _mm_srli_si128(curr, 2);
+ __m128i prev = _mm_insert_epi16(prv0, t1, 0);
+ __m128i next = _mm_insert_epi16(nxt0, 3*in_near[i+8] + in_far[i+8], 7);
+
+ // horizontal filter, polyphase implementation since it's convenient:
+ // even pixels = 3*cur + prev = cur*4 + (prev - cur)
+ // odd pixels = 3*cur + next = cur*4 + (next - cur)
+ // note the shared term.
+ __m128i bias = _mm_set1_epi16(8);
+ __m128i curs = _mm_slli_epi16(curr, 2);
+ __m128i prvd = _mm_sub_epi16(prev, curr);
+ __m128i nxtd = _mm_sub_epi16(next, curr);
+ __m128i curb = _mm_add_epi16(curs, bias);
+ __m128i even = _mm_add_epi16(prvd, curb);
+ __m128i odd = _mm_add_epi16(nxtd, curb);
+
+ // interleave even and odd pixels, then undo scaling.
+ __m128i int0 = _mm_unpacklo_epi16(even, odd);
+ __m128i int1 = _mm_unpackhi_epi16(even, odd);
+ __m128i de0 = _mm_srli_epi16(int0, 4);
+ __m128i de1 = _mm_srli_epi16(int1, 4);
+
+ // pack and write output
+ __m128i outv = _mm_packus_epi16(de0, de1);
+ _mm_storeu_si128((__m128i *) (out + i*2), outv);
+#elif defined(STBI_NEON)
+ // load and perform the vertical filtering pass
+ // this uses 3*x + y = 4*x + (y - x)
+ uint8x8_t farb = vld1_u8(in_far + i);
+ uint8x8_t nearb = vld1_u8(in_near + i);
+ int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(farb, nearb));
+ int16x8_t nears = vreinterpretq_s16_u16(vshll_n_u8(nearb, 2));
+ int16x8_t curr = vaddq_s16(nears, diff); // current row
+
+ // horizontal filter works the same based on shifted vers of current
+ // row. "prev" is current row shifted right by 1 pixel; we need to
+ // insert the previous pixel value (from t1).
+ // "next" is current row shifted left by 1 pixel, with first pixel
+ // of next block of 8 pixels added in.
+ int16x8_t prv0 = vextq_s16(curr, curr, 7);
+ int16x8_t nxt0 = vextq_s16(curr, curr, 1);
+ int16x8_t prev = vsetq_lane_s16(t1, prv0, 0);
+ int16x8_t next = vsetq_lane_s16(3*in_near[i+8] + in_far[i+8], nxt0, 7);
+
+ // horizontal filter, polyphase implementation since it's convenient:
+ // even pixels = 3*cur + prev = cur*4 + (prev - cur)
+ // odd pixels = 3*cur + next = cur*4 + (next - cur)
+ // note the shared term.
+ int16x8_t curs = vshlq_n_s16(curr, 2);
+ int16x8_t prvd = vsubq_s16(prev, curr);
+ int16x8_t nxtd = vsubq_s16(next, curr);
+ int16x8_t even = vaddq_s16(curs, prvd);
+ int16x8_t odd = vaddq_s16(curs, nxtd);
+
+ // undo scaling and round, then store with even/odd phases interleaved
+ uint8x8x2_t o;
+ o.val[0] = vqrshrun_n_s16(even, 4);
+ o.val[1] = vqrshrun_n_s16(odd, 4);
+ vst2_u8(out + i*2, o);
+#endif
+
+ // "previous" value for next iter
+ t1 = 3*in_near[i+7] + in_far[i+7];
+ }
+
+ t0 = t1;
+ t1 = 3*in_near[i] + in_far[i];
+ out[i*2] = stbi__div16(3*t1 + t0 + 8);
+
+ for (++i; i < w; ++i) {
+ t0 = t1;
+ t1 = 3*in_near[i]+in_far[i];
+ out[i*2-1] = stbi__div16(3*t0 + t1 + 8);
+ out[i*2 ] = stbi__div16(3*t1 + t0 + 8);
+ }
+ out[w*2-1] = stbi__div4(t1+2);
+
+ STBI_NOTUSED(hs);
+
+ return out;
+}
+#endif
+
+static stbi_uc *stbi__resample_row_generic(stbi_uc *out, stbi_uc *in_near, stbi_uc *in_far, int w, int hs)
+{
+ // resample with nearest-neighbor
+ int i,j;
+ STBI_NOTUSED(in_far);
+ for (i=0; i < w; ++i)
+ for (j=0; j < hs; ++j)
+ out[i*hs+j] = in_near[i];
+ return out;
+}
+
+// this is a reduced-precision calculation of YCbCr-to-RGB introduced
+// to make sure the code produces the same results in both SIMD and scalar
+#define stbi__float2fixed(x) (((int) ((x) * 4096.0f + 0.5f)) << 8)
+static void stbi__YCbCr_to_RGB_row(stbi_uc *out, const stbi_uc *y, const stbi_uc *pcb, const stbi_uc *pcr, int count, int step)
+{
+ int i;
+ for (i=0; i < count; ++i) {
+ int y_fixed = (y[i] << 20) + (1<<19); // rounding
+ int r,g,b;
+ int cr = pcr[i] - 128;
+ int cb = pcb[i] - 128;
+ r = y_fixed + cr* stbi__float2fixed(1.40200f);
+ g = y_fixed + (cr*-stbi__float2fixed(0.71414f)) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
+ b = y_fixed + cb* stbi__float2fixed(1.77200f);
+ r >>= 20;
+ g >>= 20;
+ b >>= 20;
+ if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
+ if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
+ if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
+ out[0] = (stbi_uc)r;
+ out[1] = (stbi_uc)g;
+ out[2] = (stbi_uc)b;
+ out[3] = 255;
+ out += step;
+ }
+}
+
+#if defined(STBI_SSE2) || defined(STBI_NEON)
+static void stbi__YCbCr_to_RGB_simd(stbi_uc *out, stbi_uc const *y, stbi_uc const *pcb, stbi_uc const *pcr, int count, int step)
+{
+ int i = 0;
+
+#ifdef STBI_SSE2
+ // step == 3 is pretty ugly on the final interleave, and i'm not convinced
+ // it's useful in practice (you wouldn't use it for textures, for example).
+ // so just accelerate step == 4 case.
+ if (step == 4) {
+ // this is a fairly straightforward implementation and not super-optimized.
+ __m128i signflip = _mm_set1_epi8(-0x80);
+ __m128i cr_const0 = _mm_set1_epi16( (short) ( 1.40200f*4096.0f+0.5f));
+ __m128i cr_const1 = _mm_set1_epi16( - (short) ( 0.71414f*4096.0f+0.5f));
+ __m128i cb_const0 = _mm_set1_epi16( - (short) ( 0.34414f*4096.0f+0.5f));
+ __m128i cb_const1 = _mm_set1_epi16( (short) ( 1.77200f*4096.0f+0.5f));
+ __m128i y_bias = _mm_set1_epi8((char) (unsigned char) 128);
+ __m128i xw = _mm_set1_epi16(255); // alpha channel
+
+ for (; i+7 < count; i += 8) {
+ // load
+ __m128i y_bytes = _mm_loadl_epi64((__m128i *) (y+i));
+ __m128i cr_bytes = _mm_loadl_epi64((__m128i *) (pcr+i));
+ __m128i cb_bytes = _mm_loadl_epi64((__m128i *) (pcb+i));
+ __m128i cr_biased = _mm_xor_si128(cr_bytes, signflip); // -128
+ __m128i cb_biased = _mm_xor_si128(cb_bytes, signflip); // -128
+
+ // unpack to short (and left-shift cr, cb by 8)
+ __m128i yw = _mm_unpacklo_epi8(y_bias, y_bytes);
+ __m128i crw = _mm_unpacklo_epi8(_mm_setzero_si128(), cr_biased);
+ __m128i cbw = _mm_unpacklo_epi8(_mm_setzero_si128(), cb_biased);
+
+ // color transform
+ __m128i yws = _mm_srli_epi16(yw, 4);
+ __m128i cr0 = _mm_mulhi_epi16(cr_const0, crw);
+ __m128i cb0 = _mm_mulhi_epi16(cb_const0, cbw);
+ __m128i cb1 = _mm_mulhi_epi16(cbw, cb_const1);
+ __m128i cr1 = _mm_mulhi_epi16(crw, cr_const1);
+ __m128i rws = _mm_add_epi16(cr0, yws);
+ __m128i gwt = _mm_add_epi16(cb0, yws);
+ __m128i bws = _mm_add_epi16(yws, cb1);
+ __m128i gws = _mm_add_epi16(gwt, cr1);
+
+ // descale
+ __m128i rw = _mm_srai_epi16(rws, 4);
+ __m128i bw = _mm_srai_epi16(bws, 4);
+ __m128i gw = _mm_srai_epi16(gws, 4);
+
+ // back to byte, set up for transpose
+ __m128i brb = _mm_packus_epi16(rw, bw);
+ __m128i gxb = _mm_packus_epi16(gw, xw);
+
+ // transpose to interleave channels
+ __m128i t0 = _mm_unpacklo_epi8(brb, gxb);
+ __m128i t1 = _mm_unpackhi_epi8(brb, gxb);
+ __m128i o0 = _mm_unpacklo_epi16(t0, t1);
+ __m128i o1 = _mm_unpackhi_epi16(t0, t1);
+
+ // store
+ _mm_storeu_si128((__m128i *) (out + 0), o0);
+ _mm_storeu_si128((__m128i *) (out + 16), o1);
+ out += 32;
+ }
+ }
+#endif
+
+#ifdef STBI_NEON
+ // in this version, step=3 support would be easy to add. but is there demand?
+ if (step == 4) {
+ // this is a fairly straightforward implementation and not super-optimized.
+ uint8x8_t signflip = vdup_n_u8(0x80);
+ int16x8_t cr_const0 = vdupq_n_s16( (short) ( 1.40200f*4096.0f+0.5f));
+ int16x8_t cr_const1 = vdupq_n_s16( - (short) ( 0.71414f*4096.0f+0.5f));
+ int16x8_t cb_const0 = vdupq_n_s16( - (short) ( 0.34414f*4096.0f+0.5f));
+ int16x8_t cb_const1 = vdupq_n_s16( (short) ( 1.77200f*4096.0f+0.5f));
+
+ for (; i+7 < count; i += 8) {
+ // load
+ uint8x8_t y_bytes = vld1_u8(y + i);
+ uint8x8_t cr_bytes = vld1_u8(pcr + i);
+ uint8x8_t cb_bytes = vld1_u8(pcb + i);
+ int8x8_t cr_biased = vreinterpret_s8_u8(vsub_u8(cr_bytes, signflip));
+ int8x8_t cb_biased = vreinterpret_s8_u8(vsub_u8(cb_bytes, signflip));
+
+ // expand to s16
+ int16x8_t yws = vreinterpretq_s16_u16(vshll_n_u8(y_bytes, 4));
+ int16x8_t crw = vshll_n_s8(cr_biased, 7);
+ int16x8_t cbw = vshll_n_s8(cb_biased, 7);
+
+ // color transform
+ int16x8_t cr0 = vqdmulhq_s16(crw, cr_const0);
+ int16x8_t cb0 = vqdmulhq_s16(cbw, cb_const0);
+ int16x8_t cr1 = vqdmulhq_s16(crw, cr_const1);
+ int16x8_t cb1 = vqdmulhq_s16(cbw, cb_const1);
+ int16x8_t rws = vaddq_s16(yws, cr0);
+ int16x8_t gws = vaddq_s16(vaddq_s16(yws, cb0), cr1);
+ int16x8_t bws = vaddq_s16(yws, cb1);
+
+ // undo scaling, round, convert to byte
+ uint8x8x4_t o;
+ o.val[0] = vqrshrun_n_s16(rws, 4);
+ o.val[1] = vqrshrun_n_s16(gws, 4);
+ o.val[2] = vqrshrun_n_s16(bws, 4);
+ o.val[3] = vdup_n_u8(255);
+
+ // store, interleaving r/g/b/a
+ vst4_u8(out, o);
+ out += 8*4;
+ }
+ }
+#endif
+
+ for (; i < count; ++i) {
+ int y_fixed = (y[i] << 20) + (1<<19); // rounding
+ int r,g,b;
+ int cr = pcr[i] - 128;
+ int cb = pcb[i] - 128;
+ r = y_fixed + cr* stbi__float2fixed(1.40200f);
+ g = y_fixed + cr*-stbi__float2fixed(0.71414f) + ((cb*-stbi__float2fixed(0.34414f)) & 0xffff0000);
+ b = y_fixed + cb* stbi__float2fixed(1.77200f);
+ r >>= 20;
+ g >>= 20;
+ b >>= 20;
+ if ((unsigned) r > 255) { if (r < 0) r = 0; else r = 255; }
+ if ((unsigned) g > 255) { if (g < 0) g = 0; else g = 255; }
+ if ((unsigned) b > 255) { if (b < 0) b = 0; else b = 255; }
+ out[0] = (stbi_uc)r;
+ out[1] = (stbi_uc)g;
+ out[2] = (stbi_uc)b;
+ out[3] = 255;
+ out += step;
+ }
+}
+#endif
+
+// set up the kernels
+static void stbi__setup_jpeg(stbi__jpeg *j)
+{
+ j->idct_block_kernel = stbi__idct_block;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_row;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2;
+
+#ifdef STBI_SSE2
+ if (stbi__sse2_available()) {
+ j->idct_block_kernel = stbi__idct_simd;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
+ }
+#endif
+
+#ifdef STBI_NEON
+ j->idct_block_kernel = stbi__idct_simd;
+ j->YCbCr_to_RGB_kernel = stbi__YCbCr_to_RGB_simd;
+ j->resample_row_hv_2_kernel = stbi__resample_row_hv_2_simd;
+#endif
+}
+
+// clean up the temporary component buffers
+static void stbi__cleanup_jpeg(stbi__jpeg *j)
+{
+ stbi__free_jpeg_components(j, j->s->img_n, 0);
+}
+
+typedef struct
+{
+ resample_row_func resample;
+ stbi_uc *line0,*line1;
+ int hs,vs; // expansion factor in each axis
+ int w_lores; // horizontal pixels pre-expansion
+ int ystep; // how far through vertical expansion we are
+ int ypos; // which pre-expansion row we're on
+} stbi__resample;
+
+// fast 0..255 * 0..255 => 0..255 rounded multiplication
+static stbi_uc stbi__blinn_8x8(stbi_uc x, stbi_uc y)
+{
+ unsigned int t = x*y + 128;
+ return (stbi_uc) ((t + (t >>8)) >> 8);
+}
+
+static stbi_uc *load_jpeg_image(stbi__jpeg *z, int *out_x, int *out_y, int *comp, int req_comp)
+{
+ int n, decode_n, is_rgb;
+ z->s->img_n = 0; // make stbi__cleanup_jpeg safe
+
+ // validate req_comp
+ if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
+
+ // load a jpeg image from whichever source, but leave in YCbCr format
+ if (!stbi__decode_jpeg_image(z)) { stbi__cleanup_jpeg(z); return NULL; }
+
+ // determine actual number of components to generate
+ n = req_comp ? req_comp : z->s->img_n >= 3 ? 3 : 1;
+
+ is_rgb = z->s->img_n == 3 && (z->rgb == 3 || (z->app14_color_transform == 0 && !z->jfif));
+
+ if (z->s->img_n == 3 && n < 3 && !is_rgb)
+ decode_n = 1;
+ else
+ decode_n = z->s->img_n;
+
+ // resample and color-convert
+ {
+ int k;
+ unsigned int i,j;
+ stbi_uc *output;
+ stbi_uc *coutput[4];
+
+ stbi__resample res_comp[4];
+
+ for (k=0; k < decode_n; ++k) {
+ stbi__resample *r = &res_comp[k];
+
+ // allocate line buffer big enough for upsampling off the edges
+ // with upsample factor of 4
+ z->img_comp[k].linebuf = (stbi_uc *) stbi__malloc(z->s->img_x + 3);
+ if (!z->img_comp[k].linebuf) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
+
+ r->hs = z->img_h_max / z->img_comp[k].h;
+ r->vs = z->img_v_max / z->img_comp[k].v;
+ r->ystep = r->vs >> 1;
+ r->w_lores = (z->s->img_x + r->hs-1) / r->hs;
+ r->ypos = 0;
+ r->line0 = r->line1 = z->img_comp[k].data;
+
+ if (r->hs == 1 && r->vs == 1) r->resample = resample_row_1;
+ else if (r->hs == 1 && r->vs == 2) r->resample = stbi__resample_row_v_2;
+ else if (r->hs == 2 && r->vs == 1) r->resample = stbi__resample_row_h_2;
+ else if (r->hs == 2 && r->vs == 2) r->resample = z->resample_row_hv_2_kernel;
+ else r->resample = stbi__resample_row_generic;
+ }
+
+ // can't error after this so, this is safe
+ output = (stbi_uc *) stbi__malloc_mad3(n, z->s->img_x, z->s->img_y, 1);
+ if (!output) { stbi__cleanup_jpeg(z); return stbi__errpuc("outofmem", "Out of memory"); }
+
+ // now go ahead and resample
+ for (j=0; j < z->s->img_y; ++j) {
+ stbi_uc *out = output + n * z->s->img_x * j;
+ for (k=0; k < decode_n; ++k) {
+ stbi__resample *r = &res_comp[k];
+ int y_bot = r->ystep >= (r->vs >> 1);
+ coutput[k] = r->resample(z->img_comp[k].linebuf,
+ y_bot ? r->line1 : r->line0,
+ y_bot ? r->line0 : r->line1,
+ r->w_lores, r->hs);
+ if (++r->ystep >= r->vs) {
+ r->ystep = 0;
+ r->line0 = r->line1;
+ if (++r->ypos < z->img_comp[k].y)
+ r->line1 += z->img_comp[k].w2;
+ }
+ }
+ if (n >= 3) {
+ stbi_uc *y = coutput[0];
+ if (z->s->img_n == 3) {
+ if (is_rgb) {
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = y[i];
+ out[1] = coutput[1][i];
+ out[2] = coutput[2][i];
+ out[3] = 255;
+ out += n;
+ }
+ } else {
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ }
+ } else if (z->s->img_n == 4) {
+ if (z->app14_color_transform == 0) { // CMYK
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ out[0] = stbi__blinn_8x8(coutput[0][i], m);
+ out[1] = stbi__blinn_8x8(coutput[1][i], m);
+ out[2] = stbi__blinn_8x8(coutput[2][i], m);
+ out[3] = 255;
+ out += n;
+ }
+ } else if (z->app14_color_transform == 2) { // YCCK
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ out[0] = stbi__blinn_8x8(255 - out[0], m);
+ out[1] = stbi__blinn_8x8(255 - out[1], m);
+ out[2] = stbi__blinn_8x8(255 - out[2], m);
+ out += n;
+ }
+ } else { // YCbCr + alpha? Ignore the fourth channel for now
+ z->YCbCr_to_RGB_kernel(out, y, coutput[1], coutput[2], z->s->img_x, n);
+ }
+ } else
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = out[1] = out[2] = y[i];
+ out[3] = 255; // not used if n==3
+ out += n;
+ }
+ } else {
+ if (is_rgb) {
+ if (n == 1)
+ for (i=0; i < z->s->img_x; ++i)
+ *out++ = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
+ else {
+ for (i=0; i < z->s->img_x; ++i, out += 2) {
+ out[0] = stbi__compute_y(coutput[0][i], coutput[1][i], coutput[2][i]);
+ out[1] = 255;
+ }
+ }
+ } else if (z->s->img_n == 4 && z->app14_color_transform == 0) {
+ for (i=0; i < z->s->img_x; ++i) {
+ stbi_uc m = coutput[3][i];
+ stbi_uc r = stbi__blinn_8x8(coutput[0][i], m);
+ stbi_uc g = stbi__blinn_8x8(coutput[1][i], m);
+ stbi_uc b = stbi__blinn_8x8(coutput[2][i], m);
+ out[0] = stbi__compute_y(r, g, b);
+ out[1] = 255;
+ out += n;
+ }
+ } else if (z->s->img_n == 4 && z->app14_color_transform == 2) {
+ for (i=0; i < z->s->img_x; ++i) {
+ out[0] = stbi__blinn_8x8(255 - coutput[0][i], coutput[3][i]);
+ out[1] = 255;
+ out += n;
+ }
+ } else {
+ stbi_uc *y = coutput[0];
+ if (n == 1)
+ for (i=0; i < z->s->img_x; ++i) out[i] = y[i];
+ else
+ for (i=0; i < z->s->img_x; ++i) *out++ = y[i], *out++ = 255;
+ }
+ }
+ }
+ stbi__cleanup_jpeg(z);
+ *out_x = z->s->img_x;
+ *out_y = z->s->img_y;
+ if (comp) *comp = z->s->img_n >= 3 ? 3 : 1; // report original components, not output
+ return output;
+ }
+}
+
+static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ unsigned char* result;
+ stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg));
+ STBI_NOTUSED(ri);
+ j->s = s;
+ stbi__setup_jpeg(j);
+ result = load_jpeg_image(j, x,y,comp,req_comp);
+ STBI_FREE(j);
+ return result;
+}
+
+static int stbi__jpeg_test(stbi__context *s)
+{
+ int r;
+ stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg));
+ j->s = s;
+ stbi__setup_jpeg(j);
+ r = stbi__decode_jpeg_header(j, STBI__SCAN_type);
+ stbi__rewind(s);
+ STBI_FREE(j);
+ return r;
+}
+
+static int stbi__jpeg_info_raw(stbi__jpeg *j, int *x, int *y, int *comp)
+{
+ if (!stbi__decode_jpeg_header(j, STBI__SCAN_header)) {
+ stbi__rewind( j->s );
+ return 0;
+ }
+ if (x) *x = j->s->img_x;
+ if (y) *y = j->s->img_y;
+ if (comp) *comp = j->s->img_n >= 3 ? 3 : 1;
+ return 1;
+}
+
+static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int result;
+ stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg)));
+ j->s = s;
+ result = stbi__jpeg_info_raw(j, x, y, comp);
+ STBI_FREE(j);
+ return result;
+}
+#endif
+
+// public domain zlib decode v0.2 Sean Barrett 2006-11-18
+// simple implementation
+// - all input must be provided in an upfront buffer
+// - all output is written to a single output buffer (can malloc/realloc)
+// performance
+// - fast huffman
+
+#ifndef STBI_NO_ZLIB
+
+// fast-way is faster to check than jpeg huffman, but slow way is slower
+#define STBI__ZFAST_BITS 9 // accelerate all cases in default tables
+#define STBI__ZFAST_MASK ((1 << STBI__ZFAST_BITS) - 1)
+
+// zlib-style huffman encoding
+// (jpegs packs from left, zlib from right, so can't share code)
+typedef struct
+{
+ stbi__uint16 fast[1 << STBI__ZFAST_BITS];
+ stbi__uint16 firstcode[16];
+ int maxcode[17];
+ stbi__uint16 firstsymbol[16];
+ stbi_uc size[288];
+ stbi__uint16 value[288];
+} stbi__zhuffman;
+
+stbi_inline static int stbi__bitreverse16(int n)
+{
+ n = ((n & 0xAAAA) >> 1) | ((n & 0x5555) << 1);
+ n = ((n & 0xCCCC) >> 2) | ((n & 0x3333) << 2);
+ n = ((n & 0xF0F0) >> 4) | ((n & 0x0F0F) << 4);
+ n = ((n & 0xFF00) >> 8) | ((n & 0x00FF) << 8);
+ return n;
+}
+
+stbi_inline static int stbi__bit_reverse(int v, int bits)
+{
+ STBI_ASSERT(bits <= 16);
+ // to bit reverse n bits, reverse 16 and shift
+ // e.g. 11 bits, bit reverse and shift away 5
+ return stbi__bitreverse16(v) >> (16-bits);
+}
+
+static int stbi__zbuild_huffman(stbi__zhuffman *z, const stbi_uc *sizelist, int num)
+{
+ int i,k=0;
+ int code, next_code[16], sizes[17];
+
+ // DEFLATE spec for generating codes
+ memset(sizes, 0, sizeof(sizes));
+ memset(z->fast, 0, sizeof(z->fast));
+ for (i=0; i < num; ++i)
+ ++sizes[sizelist[i]];
+ sizes[0] = 0;
+ for (i=1; i < 16; ++i)
+ if (sizes[i] > (1 << i))
+ return stbi__err("bad sizes", "Corrupt PNG");
+ code = 0;
+ for (i=1; i < 16; ++i) {
+ next_code[i] = code;
+ z->firstcode[i] = (stbi__uint16) code;
+ z->firstsymbol[i] = (stbi__uint16) k;
+ code = (code + sizes[i]);
+ if (sizes[i])
+ if (code-1 >= (1 << i)) return stbi__err("bad codelengths","Corrupt PNG");
+ z->maxcode[i] = code << (16-i); // preshift for inner loop
+ code <<= 1;
+ k += sizes[i];
+ }
+ z->maxcode[16] = 0x10000; // sentinel
+ for (i=0; i < num; ++i) {
+ int s = sizelist[i];
+ if (s) {
+ int c = next_code[s] - z->firstcode[s] + z->firstsymbol[s];
+ stbi__uint16 fastv = (stbi__uint16) ((s << 9) | i);
+ z->size [c] = (stbi_uc ) s;
+ z->value[c] = (stbi__uint16) i;
+ if (s <= STBI__ZFAST_BITS) {
+ int j = stbi__bit_reverse(next_code[s],s);
+ while (j < (1 << STBI__ZFAST_BITS)) {
+ z->fast[j] = fastv;
+ j += (1 << s);
+ }
+ }
+ ++next_code[s];
+ }
+ }
+ return 1;
+}
+
+// zlib-from-memory implementation for PNG reading
+// because PNG allows splitting the zlib stream arbitrarily,
+// and it's annoying structurally to have PNG call ZLIB call PNG,
+// we require PNG read all the IDATs and combine them into a single
+// memory buffer
+
+typedef struct
+{
+ stbi_uc *zbuffer, *zbuffer_end;
+ int num_bits;
+ stbi__uint32 code_buffer;
+
+ char *zout;
+ char *zout_start;
+ char *zout_end;
+ int z_expandable;
+
+ stbi__zhuffman z_length, z_distance;
+} stbi__zbuf;
+
+stbi_inline static stbi_uc stbi__zget8(stbi__zbuf *z)
+{
+ if (z->zbuffer >= z->zbuffer_end) return 0;
+ return *z->zbuffer++;
+}
+
+static void stbi__fill_bits(stbi__zbuf *z)
+{
+ do {
+ STBI_ASSERT(z->code_buffer < (1U << z->num_bits));
+ z->code_buffer |= (unsigned int) stbi__zget8(z) << z->num_bits;
+ z->num_bits += 8;
+ } while (z->num_bits <= 24);
+}
+
+stbi_inline static unsigned int stbi__zreceive(stbi__zbuf *z, int n)
+{
+ unsigned int k;
+ if (z->num_bits < n) stbi__fill_bits(z);
+ k = z->code_buffer & ((1 << n) - 1);
+ z->code_buffer >>= n;
+ z->num_bits -= n;
+ return k;
+}
+
+static int stbi__zhuffman_decode_slowpath(stbi__zbuf *a, stbi__zhuffman *z)
+{
+ int b,s,k;
+ // not resolved by fast table, so compute it the slow way
+ // use jpeg approach, which requires MSbits at top
+ k = stbi__bit_reverse(a->code_buffer, 16);
+ for (s=STBI__ZFAST_BITS+1; ; ++s)
+ if (k < z->maxcode[s])
+ break;
+ if (s == 16) return -1; // invalid code!
+ // code size is s, so:
+ b = (k >> (16-s)) - z->firstcode[s] + z->firstsymbol[s];
+ STBI_ASSERT(z->size[b] == s);
+ a->code_buffer >>= s;
+ a->num_bits -= s;
+ return z->value[b];
+}
+
+stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z)
+{
+ int b,s;
+ if (a->num_bits < 16) stbi__fill_bits(a);
+ b = z->fast[a->code_buffer & STBI__ZFAST_MASK];
+ if (b) {
+ s = b >> 9;
+ a->code_buffer >>= s;
+ a->num_bits -= s;
+ return b & 511;
+ }
+ return stbi__zhuffman_decode_slowpath(a, z);
+}
+
+static int stbi__zexpand(stbi__zbuf *z, char *zout, int n) // need to make room for n bytes
+{
+ char *q;
+ int cur, limit, old_limit;
+ z->zout = zout;
+ if (!z->z_expandable) return stbi__err("output buffer limit","Corrupt PNG");
+ cur = (int) (z->zout - z->zout_start);
+ limit = old_limit = (int) (z->zout_end - z->zout_start);
+ while (cur + n > limit)
+ limit *= 2;
+ q = (char *) STBI_REALLOC_SIZED(z->zout_start, old_limit, limit);
+ STBI_NOTUSED(old_limit);
+ if (q == NULL) return stbi__err("outofmem", "Out of memory");
+ z->zout_start = q;
+ z->zout = q + cur;
+ z->zout_end = q + limit;
+ return 1;
+}
+
+static const int stbi__zlength_base[31] = {
+ 3,4,5,6,7,8,9,10,11,13,
+ 15,17,19,23,27,31,35,43,51,59,
+ 67,83,99,115,131,163,195,227,258,0,0 };
+
+static const int stbi__zlength_extra[31]=
+{ 0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0,0,0 };
+
+static const int stbi__zdist_base[32] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,
+257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577,0,0};
+
+static const int stbi__zdist_extra[32] =
+{ 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13};
+
+static int stbi__parse_huffman_block(stbi__zbuf *a)
+{
+ char *zout = a->zout;
+ for(;;) {
+ int z = stbi__zhuffman_decode(a, &a->z_length);
+ if (z < 256) {
+ if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); // error in huffman codes
+ if (zout >= a->zout_end) {
+ if (!stbi__zexpand(a, zout, 1)) return 0;
+ zout = a->zout;
+ }
+ *zout++ = (char) z;
+ } else {
+ stbi_uc *p;
+ int len,dist;
+ if (z == 256) {
+ a->zout = zout;
+ return 1;
+ }
+ z -= 257;
+ len = stbi__zlength_base[z];
+ if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]);
+ z = stbi__zhuffman_decode(a, &a->z_distance);
+ if (z < 0) return stbi__err("bad huffman code","Corrupt PNG");
+ dist = stbi__zdist_base[z];
+ if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]);
+ if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG");
+ if (zout + len > a->zout_end) {
+ if (!stbi__zexpand(a, zout, len)) return 0;
+ zout = a->zout;
+ }
+ p = (stbi_uc *) (zout - dist);
+ if (dist == 1) { // run of one byte; common in images.
+ stbi_uc v = *p;
+ if (len) { do *zout++ = v; while (--len); }
+ } else {
+ if (len) { do *zout++ = *p++; while (--len); }
+ }
+ }
+ }
+}
+
+static int stbi__compute_huffman_codes(stbi__zbuf *a)
+{
+ static const stbi_uc length_dezigzag[19] = { 16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15 };
+ stbi__zhuffman z_codelength;
+ stbi_uc lencodes[286+32+137];//padding for maximum single op
+ stbi_uc codelength_sizes[19];
+ int i,n;
+
+ int hlit = stbi__zreceive(a,5) + 257;
+ int hdist = stbi__zreceive(a,5) + 1;
+ int hclen = stbi__zreceive(a,4) + 4;
+ int ntot = hlit + hdist;
+
+ memset(codelength_sizes, 0, sizeof(codelength_sizes));
+ for (i=0; i < hclen; ++i) {
+ int s = stbi__zreceive(a,3);
+ codelength_sizes[length_dezigzag[i]] = (stbi_uc) s;
+ }
+ if (!stbi__zbuild_huffman(&z_codelength, codelength_sizes, 19)) return 0;
+
+ n = 0;
+ while (n < ntot) {
+ int c = stbi__zhuffman_decode(a, &z_codelength);
+ if (c < 0 || c >= 19) return stbi__err("bad codelengths", "Corrupt PNG");
+ if (c < 16)
+ lencodes[n++] = (stbi_uc) c;
+ else {
+ stbi_uc fill = 0;
+ if (c == 16) {
+ c = stbi__zreceive(a,2)+3;
+ if (n == 0) return stbi__err("bad codelengths", "Corrupt PNG");
+ fill = lencodes[n-1];
+ } else if (c == 17)
+ c = stbi__zreceive(a,3)+3;
+ else {
+ STBI_ASSERT(c == 18);
+ c = stbi__zreceive(a,7)+11;
+ }
+ if (ntot - n < c) return stbi__err("bad codelengths", "Corrupt PNG");
+ memset(lencodes+n, fill, c);
+ n += c;
+ }
+ }
+ if (n != ntot) return stbi__err("bad codelengths","Corrupt PNG");
+ if (!stbi__zbuild_huffman(&a->z_length, lencodes, hlit)) return 0;
+ if (!stbi__zbuild_huffman(&a->z_distance, lencodes+hlit, hdist)) return 0;
+ return 1;
+}
+
+static int stbi__parse_uncompressed_block(stbi__zbuf *a)
+{
+ stbi_uc header[4];
+ int len,nlen,k;
+ if (a->num_bits & 7)
+ stbi__zreceive(a, a->num_bits & 7); // discard
+ // drain the bit-packed data into header
+ k = 0;
+ while (a->num_bits > 0) {
+ header[k++] = (stbi_uc) (a->code_buffer & 255); // suppress MSVC run-time check
+ a->code_buffer >>= 8;
+ a->num_bits -= 8;
+ }
+ STBI_ASSERT(a->num_bits == 0);
+ // now fill header the normal way
+ while (k < 4)
+ header[k++] = stbi__zget8(a);
+ len = header[1] * 256 + header[0];
+ nlen = header[3] * 256 + header[2];
+ if (nlen != (len ^ 0xffff)) return stbi__err("zlib corrupt","Corrupt PNG");
+ if (a->zbuffer + len > a->zbuffer_end) return stbi__err("read past buffer","Corrupt PNG");
+ if (a->zout + len > a->zout_end)
+ if (!stbi__zexpand(a, a->zout, len)) return 0;
+ memcpy(a->zout, a->zbuffer, len);
+ a->zbuffer += len;
+ a->zout += len;
+ return 1;
+}
+
+static int stbi__parse_zlib_header(stbi__zbuf *a)
+{
+ int cmf = stbi__zget8(a);
+ int cm = cmf & 15;
+ /* int cinfo = cmf >> 4; */
+ int flg = stbi__zget8(a);
+ if ((cmf*256+flg) % 31 != 0) return stbi__err("bad zlib header","Corrupt PNG"); // zlib spec
+ if (flg & 32) return stbi__err("no preset dict","Corrupt PNG"); // preset dictionary not allowed in png
+ if (cm != 8) return stbi__err("bad compression","Corrupt PNG"); // DEFLATE required for png
+ // window = 1 << (8 + cinfo)... but who cares, we fully buffer output
+ return 1;
+}
+
+static const stbi_uc stbi__zdefault_length[288] =
+{
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
+ 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9, 9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,8,8,8,8,8,8,8,8
+};
+static const stbi_uc stbi__zdefault_distance[32] =
+{
+ 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
+};
+/*
+Init algorithm:
+{
+ int i; // use <= to match clearly with spec
+ for (i=0; i <= 143; ++i) stbi__zdefault_length[i] = 8;
+ for ( ; i <= 255; ++i) stbi__zdefault_length[i] = 9;
+ for ( ; i <= 279; ++i) stbi__zdefault_length[i] = 7;
+ for ( ; i <= 287; ++i) stbi__zdefault_length[i] = 8;
+
+ for (i=0; i <= 31; ++i) stbi__zdefault_distance[i] = 5;
+}
+*/
+
+static int stbi__parse_zlib(stbi__zbuf *a, int parse_header)
+{
+ int final, type;
+ if (parse_header)
+ if (!stbi__parse_zlib_header(a)) return 0;
+ a->num_bits = 0;
+ a->code_buffer = 0;
+ do {
+ final = stbi__zreceive(a,1);
+ type = stbi__zreceive(a,2);
+ if (type == 0) {
+ if (!stbi__parse_uncompressed_block(a)) return 0;
+ } else if (type == 3) {
+ return 0;
+ } else {
+ if (type == 1) {
+ // use fixed code lengths
+ if (!stbi__zbuild_huffman(&a->z_length , stbi__zdefault_length , 288)) return 0;
+ if (!stbi__zbuild_huffman(&a->z_distance, stbi__zdefault_distance, 32)) return 0;
+ } else {
+ if (!stbi__compute_huffman_codes(a)) return 0;
+ }
+ if (!stbi__parse_huffman_block(a)) return 0;
+ }
+ } while (!final);
+ return 1;
+}
+
+static int stbi__do_zlib(stbi__zbuf *a, char *obuf, int olen, int exp, int parse_header)
+{
+ a->zout_start = obuf;
+ a->zout = obuf;
+ a->zout_end = obuf + olen;
+ a->z_expandable = exp;
+
+ return stbi__parse_zlib(a, parse_header);
+}
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize(const char *buffer, int len, int initial_size, int *outlen)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(initial_size);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer + len;
+ if (stbi__do_zlib(&a, p, initial_size, 1, 1)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF char *stbi_zlib_decode_malloc(char const *buffer, int len, int *outlen)
+{
+ return stbi_zlib_decode_malloc_guesssize(buffer, len, 16384, outlen);
+}
+
+STBIDEF char *stbi_zlib_decode_malloc_guesssize_headerflag(const char *buffer, int len, int initial_size, int *outlen, int parse_header)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(initial_size);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer + len;
+ if (stbi__do_zlib(&a, p, initial_size, 1, parse_header)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF int stbi_zlib_decode_buffer(char *obuffer, int olen, char const *ibuffer, int ilen)
+{
+ stbi__zbuf a;
+ a.zbuffer = (stbi_uc *) ibuffer;
+ a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
+ if (stbi__do_zlib(&a, obuffer, olen, 0, 1))
+ return (int) (a.zout - a.zout_start);
+ else
+ return -1;
+}
+
+STBIDEF char *stbi_zlib_decode_noheader_malloc(char const *buffer, int len, int *outlen)
+{
+ stbi__zbuf a;
+ char *p = (char *) stbi__malloc(16384);
+ if (p == NULL) return NULL;
+ a.zbuffer = (stbi_uc *) buffer;
+ a.zbuffer_end = (stbi_uc *) buffer+len;
+ if (stbi__do_zlib(&a, p, 16384, 1, 0)) {
+ if (outlen) *outlen = (int) (a.zout - a.zout_start);
+ return a.zout_start;
+ } else {
+ STBI_FREE(a.zout_start);
+ return NULL;
+ }
+}
+
+STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const char *ibuffer, int ilen)
+{
+ stbi__zbuf a;
+ a.zbuffer = (stbi_uc *) ibuffer;
+ a.zbuffer_end = (stbi_uc *) ibuffer + ilen;
+ if (stbi__do_zlib(&a, obuffer, olen, 0, 0))
+ return (int) (a.zout - a.zout_start);
+ else
+ return -1;
+}
+#endif
+
+// public domain "baseline" PNG decoder v0.10 Sean Barrett 2006-11-18
+// simple implementation
+// - only 8-bit samples
+// - no CRC checking
+// - allocates lots of intermediate memory
+// - avoids problem of streaming data between subsystems
+// - avoids explicit window management
+// performance
+// - uses stb_zlib, a PD zlib implementation with fast huffman decoding
+
+#ifndef STBI_NO_PNG
+typedef struct
+{
+ stbi__uint32 length;
+ stbi__uint32 type;
+} stbi__pngchunk;
+
+static stbi__pngchunk stbi__get_chunk_header(stbi__context *s)
+{
+ stbi__pngchunk c;
+ c.length = stbi__get32be(s);
+ c.type = stbi__get32be(s);
+ return c;
+}
+
+static int stbi__check_png_header(stbi__context *s)
+{
+ static const stbi_uc png_sig[8] = { 137,80,78,71,13,10,26,10 };
+ int i;
+ for (i=0; i < 8; ++i)
+ if (stbi__get8(s) != png_sig[i]) return stbi__err("bad png sig","Not a PNG");
+ return 1;
+}
+
+typedef struct
+{
+ stbi__context *s;
+ stbi_uc *idata, *expanded, *out;
+ int depth;
+} stbi__png;
+
+
+enum {
+ STBI__F_none=0,
+ STBI__F_sub=1,
+ STBI__F_up=2,
+ STBI__F_avg=3,
+ STBI__F_paeth=4,
+ // synthetic filters used for first scanline to avoid needing a dummy row of 0s
+ STBI__F_avg_first,
+ STBI__F_paeth_first
+};
+
+static stbi_uc first_row_filter[5] =
+{
+ STBI__F_none,
+ STBI__F_sub,
+ STBI__F_none,
+ STBI__F_avg_first,
+ STBI__F_paeth_first
+};
+
+static int stbi__paeth(int a, int b, int c)
+{
+ int p = a + b - c;
+ int pa = abs(p-a);
+ int pb = abs(p-b);
+ int pc = abs(p-c);
+ if (pa <= pb && pa <= pc) return a;
+ if (pb <= pc) return b;
+ return c;
+}
+
+static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 };
+
+// create the png data from post-deflated data
+static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color)
+{
+ int bytes = (depth == 16? 2 : 1);
+ stbi__context *s = a->s;
+ stbi__uint32 i,j,stride = x*out_n*bytes;
+ stbi__uint32 img_len, img_width_bytes;
+ int k;
+ int img_n = s->img_n; // copy it into a local for later
+
+ int output_bytes = out_n*bytes;
+ int filter_bytes = img_n*bytes;
+ int width = x;
+
+ STBI_ASSERT(out_n == s->img_n || out_n == s->img_n+1);
+ a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into
+ if (!a->out) return stbi__err("outofmem", "Out of memory");
+
+ if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG");
+ img_width_bytes = (((img_n * x * depth) + 7) >> 3);
+ img_len = (img_width_bytes + 1) * y;
+
+ // we used to check for exact match between raw_len and img_len on non-interlaced PNGs,
+ // but issue #276 reported a PNG in the wild that had extra data at the end (all zeros),
+ // so just check for raw_len < img_len always.
+ if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG");
+
+ for (j=0; j < y; ++j) {
+ stbi_uc *cur = a->out + stride*j;
+ stbi_uc *prior;
+ int filter = *raw++;
+
+ if (filter > 4)
+ return stbi__err("invalid filter","Corrupt PNG");
+
+ if (depth < 8) {
+ STBI_ASSERT(img_width_bytes <= x);
+ cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place
+ filter_bytes = 1;
+ width = img_width_bytes;
+ }
+ prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above
+
+ // if first row, use special filter that doesn't sample previous row
+ if (j == 0) filter = first_row_filter[filter];
+
+ // handle first byte explicitly
+ for (k=0; k < filter_bytes; ++k) {
+ switch (filter) {
+ case STBI__F_none : cur[k] = raw[k]; break;
+ case STBI__F_sub : cur[k] = raw[k]; break;
+ case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break;
+ case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break;
+ case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break;
+ case STBI__F_avg_first : cur[k] = raw[k]; break;
+ case STBI__F_paeth_first: cur[k] = raw[k]; break;
+ }
+ }
+
+ if (depth == 8) {
+ if (img_n != out_n)
+ cur[img_n] = 255; // first pixel
+ raw += img_n;
+ cur += out_n;
+ prior += out_n;
+ } else if (depth == 16) {
+ if (img_n != out_n) {
+ cur[filter_bytes] = 255; // first pixel top byte
+ cur[filter_bytes+1] = 255; // first pixel bottom byte
+ }
+ raw += filter_bytes;
+ cur += output_bytes;
+ prior += output_bytes;
+ } else {
+ raw += 1;
+ cur += 1;
+ prior += 1;
+ }
+
+ // this is a little gross, so that we don't switch per-pixel or per-component
+ if (depth < 8 || img_n == out_n) {
+ int nk = (width - 1)*filter_bytes;
+ #define STBI__CASE(f) \
+ case f: \
+ for (k=0; k < nk; ++k)
+ switch (filter) {
+ // "none" filter turns into a memcpy here; make that explicit.
+ case STBI__F_none: memcpy(cur, raw, nk); break;
+ STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break;
+ STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
+ STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break;
+ STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break;
+ STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break;
+ STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break;
+ }
+ #undef STBI__CASE
+ raw += nk;
+ } else {
+ STBI_ASSERT(img_n+1 == out_n);
+ #define STBI__CASE(f) \
+ case f: \
+ for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \
+ for (k=0; k < filter_bytes; ++k)
+ switch (filter) {
+ STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break;
+ STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break;
+ STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break;
+ STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break;
+ STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break;
+ STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break;
+ STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break;
+ }
+ #undef STBI__CASE
+
+ // the loop above sets the high byte of the pixels' alpha, but for
+ // 16 bit png files we also need the low byte set. we'll do that here.
+ if (depth == 16) {
+ cur = a->out + stride*j; // start at the beginning of the row again
+ for (i=0; i < x; ++i,cur+=output_bytes) {
+ cur[filter_bytes+1] = 255;
+ }
+ }
+ }
+ }
+
+ // we make a separate pass to expand bits to pixels; for performance,
+ // this could run two scanlines behind the above code, so it won't
+ // intefere with filtering but will still be in the cache.
+ if (depth < 8) {
+ for (j=0; j < y; ++j) {
+ stbi_uc *cur = a->out + stride*j;
+ stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes;
+ // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit
+ // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop
+ stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range
+
+ // note that the final byte might overshoot and write more data than desired.
+ // we can allocate enough data that this never writes out of memory, but it
+ // could also overwrite the next scanline. can it overwrite non-empty data
+ // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel.
+ // so we need to explicitly clamp the final ones
+
+ if (depth == 4) {
+ for (k=x*img_n; k >= 2; k-=2, ++in) {
+ *cur++ = scale * ((*in >> 4) );
+ *cur++ = scale * ((*in ) & 0x0f);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 4) );
+ } else if (depth == 2) {
+ for (k=x*img_n; k >= 4; k-=4, ++in) {
+ *cur++ = scale * ((*in >> 6) );
+ *cur++ = scale * ((*in >> 4) & 0x03);
+ *cur++ = scale * ((*in >> 2) & 0x03);
+ *cur++ = scale * ((*in ) & 0x03);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 6) );
+ if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03);
+ if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03);
+ } else if (depth == 1) {
+ for (k=x*img_n; k >= 8; k-=8, ++in) {
+ *cur++ = scale * ((*in >> 7) );
+ *cur++ = scale * ((*in >> 6) & 0x01);
+ *cur++ = scale * ((*in >> 5) & 0x01);
+ *cur++ = scale * ((*in >> 4) & 0x01);
+ *cur++ = scale * ((*in >> 3) & 0x01);
+ *cur++ = scale * ((*in >> 2) & 0x01);
+ *cur++ = scale * ((*in >> 1) & 0x01);
+ *cur++ = scale * ((*in ) & 0x01);
+ }
+ if (k > 0) *cur++ = scale * ((*in >> 7) );
+ if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01);
+ if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01);
+ if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01);
+ if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01);
+ if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01);
+ if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01);
+ }
+ if (img_n != out_n) {
+ int q;
+ // insert alpha = 255
+ cur = a->out + stride*j;
+ if (img_n == 1) {
+ for (q=x-1; q >= 0; --q) {
+ cur[q*2+1] = 255;
+ cur[q*2+0] = cur[q];
+ }
+ } else {
+ STBI_ASSERT(img_n == 3);
+ for (q=x-1; q >= 0; --q) {
+ cur[q*4+3] = 255;
+ cur[q*4+2] = cur[q*3+2];
+ cur[q*4+1] = cur[q*3+1];
+ cur[q*4+0] = cur[q*3+0];
+ }
+ }
+ }
+ }
+ } else if (depth == 16) {
+ // force the image data from big-endian to platform-native.
+ // this is done in a separate pass due to the decoding relying
+ // on the data being untouched, but could probably be done
+ // per-line during decode if care is taken.
+ stbi_uc *cur = a->out;
+ stbi__uint16 *cur16 = (stbi__uint16*)cur;
+
+ for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) {
+ *cur16 = (cur[0] << 8) | cur[1];
+ }
+ }
+
+ return 1;
+}
+
+static int stbi__create_png_image(stbi__png *a, stbi_uc *image_data, stbi__uint32 image_data_len, int out_n, int depth, int color, int interlaced)
+{
+ int bytes = (depth == 16 ? 2 : 1);
+ int out_bytes = out_n * bytes;
+ stbi_uc *final;
+ int p;
+ if (!interlaced)
+ return stbi__create_png_image_raw(a, image_data, image_data_len, out_n, a->s->img_x, a->s->img_y, depth, color);
+
+ // de-interlacing
+ final = (stbi_uc *) stbi__malloc_mad3(a->s->img_x, a->s->img_y, out_bytes, 0);
+ for (p=0; p < 7; ++p) {
+ int xorig[] = { 0,4,0,2,0,1,0 };
+ int yorig[] = { 0,0,4,0,2,0,1 };
+ int xspc[] = { 8,8,4,4,2,2,1 };
+ int yspc[] = { 8,8,8,4,4,2,2 };
+ int i,j,x,y;
+ // pass1_x[4] = 0, pass1_x[5] = 1, pass1_x[12] = 1
+ x = (a->s->img_x - xorig[p] + xspc[p]-1) / xspc[p];
+ y = (a->s->img_y - yorig[p] + yspc[p]-1) / yspc[p];
+ if (x && y) {
+ stbi__uint32 img_len = ((((a->s->img_n * x * depth) + 7) >> 3) + 1) * y;
+ if (!stbi__create_png_image_raw(a, image_data, image_data_len, out_n, x, y, depth, color)) {
+ STBI_FREE(final);
+ return 0;
+ }
+ for (j=0; j < y; ++j) {
+ for (i=0; i < x; ++i) {
+ int out_y = j*yspc[p]+yorig[p];
+ int out_x = i*xspc[p]+xorig[p];
+ memcpy(final + out_y*a->s->img_x*out_bytes + out_x*out_bytes,
+ a->out + (j*x+i)*out_bytes, out_bytes);
+ }
+ }
+ STBI_FREE(a->out);
+ image_data += img_len;
+ image_data_len -= img_len;
+ }
+ }
+ a->out = final;
+
+ return 1;
+}
+
+static int stbi__compute_transparency(stbi__png *z, stbi_uc tc[3], int out_n)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi_uc *p = z->out;
+
+ // compute color-based transparency, assuming we've
+ // already got 255 as the alpha value in the output
+ STBI_ASSERT(out_n == 2 || out_n == 4);
+
+ if (out_n == 2) {
+ for (i=0; i < pixel_count; ++i) {
+ p[1] = (p[0] == tc[0] ? 0 : 255);
+ p += 2;
+ }
+ } else {
+ for (i=0; i < pixel_count; ++i) {
+ if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
+ p[3] = 0;
+ p += 4;
+ }
+ }
+ return 1;
+}
+
+static int stbi__compute_transparency16(stbi__png *z, stbi__uint16 tc[3], int out_n)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi__uint16 *p = (stbi__uint16*) z->out;
+
+ // compute color-based transparency, assuming we've
+ // already got 65535 as the alpha value in the output
+ STBI_ASSERT(out_n == 2 || out_n == 4);
+
+ if (out_n == 2) {
+ for (i = 0; i < pixel_count; ++i) {
+ p[1] = (p[0] == tc[0] ? 0 : 65535);
+ p += 2;
+ }
+ } else {
+ for (i = 0; i < pixel_count; ++i) {
+ if (p[0] == tc[0] && p[1] == tc[1] && p[2] == tc[2])
+ p[3] = 0;
+ p += 4;
+ }
+ }
+ return 1;
+}
+
+static int stbi__expand_png_palette(stbi__png *a, stbi_uc *palette, int len, int pal_img_n)
+{
+ stbi__uint32 i, pixel_count = a->s->img_x * a->s->img_y;
+ stbi_uc *p, *temp_out, *orig = a->out;
+
+ p = (stbi_uc *) stbi__malloc_mad2(pixel_count, pal_img_n, 0);
+ if (p == NULL) return stbi__err("outofmem", "Out of memory");
+
+ // between here and free(out) below, exitting would leak
+ temp_out = p;
+
+ if (pal_img_n == 3) {
+ for (i=0; i < pixel_count; ++i) {
+ int n = orig[i]*4;
+ p[0] = palette[n ];
+ p[1] = palette[n+1];
+ p[2] = palette[n+2];
+ p += 3;
+ }
+ } else {
+ for (i=0; i < pixel_count; ++i) {
+ int n = orig[i]*4;
+ p[0] = palette[n ];
+ p[1] = palette[n+1];
+ p[2] = palette[n+2];
+ p[3] = palette[n+3];
+ p += 4;
+ }
+ }
+ STBI_FREE(a->out);
+ a->out = temp_out;
+
+ STBI_NOTUSED(len);
+
+ return 1;
+}
+
+static int stbi__unpremultiply_on_load = 0;
+static int stbi__de_iphone_flag = 0;
+
+STBIDEF void stbi_set_unpremultiply_on_load(int flag_true_if_should_unpremultiply)
+{
+ stbi__unpremultiply_on_load = flag_true_if_should_unpremultiply;
+}
+
+STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert)
+{
+ stbi__de_iphone_flag = flag_true_if_should_convert;
+}
+
+static void stbi__de_iphone(stbi__png *z)
+{
+ stbi__context *s = z->s;
+ stbi__uint32 i, pixel_count = s->img_x * s->img_y;
+ stbi_uc *p = z->out;
+
+ if (s->img_out_n == 3) { // convert bgr to rgb
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc t = p[0];
+ p[0] = p[2];
+ p[2] = t;
+ p += 3;
+ }
+ } else {
+ STBI_ASSERT(s->img_out_n == 4);
+ if (stbi__unpremultiply_on_load) {
+ // convert bgr to rgb and unpremultiply
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc a = p[3];
+ stbi_uc t = p[0];
+ if (a) {
+ stbi_uc half = a / 2;
+ p[0] = (p[2] * 255 + half) / a;
+ p[1] = (p[1] * 255 + half) / a;
+ p[2] = ( t * 255 + half) / a;
+ } else {
+ p[0] = p[2];
+ p[2] = t;
+ }
+ p += 4;
+ }
+ } else {
+ // convert bgr to rgb
+ for (i=0; i < pixel_count; ++i) {
+ stbi_uc t = p[0];
+ p[0] = p[2];
+ p[2] = t;
+ p += 4;
+ }
+ }
+ }
+}
+
+#define STBI__PNG_TYPE(a,b,c,d) (((unsigned) (a) << 24) + ((unsigned) (b) << 16) + ((unsigned) (c) << 8) + (unsigned) (d))
+
+static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp)
+{
+ stbi_uc palette[1024], pal_img_n=0;
+ stbi_uc has_trans=0, tc[3];
+ stbi__uint16 tc16[3];
+ stbi__uint32 ioff=0, idata_limit=0, i, pal_len=0;
+ int first=1,k,interlace=0, color=0, is_iphone=0;
+ stbi__context *s = z->s;
+
+ z->expanded = NULL;
+ z->idata = NULL;
+ z->out = NULL;
+
+ if (!stbi__check_png_header(s)) return 0;
+
+ if (scan == STBI__SCAN_type) return 1;
+
+ for (;;) {
+ stbi__pngchunk c = stbi__get_chunk_header(s);
+ switch (c.type) {
+ case STBI__PNG_TYPE('C','g','B','I'):
+ is_iphone = 1;
+ stbi__skip(s, c.length);
+ break;
+ case STBI__PNG_TYPE('I','H','D','R'): {
+ int comp,filter;
+ if (!first) return stbi__err("multiple IHDR","Corrupt PNG");
+ first = 0;
+ if (c.length != 13) return stbi__err("bad IHDR len","Corrupt PNG");
+ s->img_x = stbi__get32be(s); if (s->img_x > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
+ s->img_y = stbi__get32be(s); if (s->img_y > (1 << 24)) return stbi__err("too large","Very large image (corrupt?)");
+ z->depth = stbi__get8(s); if (z->depth != 1 && z->depth != 2 && z->depth != 4 && z->depth != 8 && z->depth != 16) return stbi__err("1/2/4/8/16-bit only","PNG not supported: 1/2/4/8/16-bit only");
+ color = stbi__get8(s); if (color > 6) return stbi__err("bad ctype","Corrupt PNG");
+ if (color == 3 && z->depth == 16) return stbi__err("bad ctype","Corrupt PNG");
+ if (color == 3) pal_img_n = 3; else if (color & 1) return stbi__err("bad ctype","Corrupt PNG");
+ comp = stbi__get8(s); if (comp) return stbi__err("bad comp method","Corrupt PNG");
+ filter= stbi__get8(s); if (filter) return stbi__err("bad filter method","Corrupt PNG");
+ interlace = stbi__get8(s); if (interlace>1) return stbi__err("bad interlace method","Corrupt PNG");
+ if (!s->img_x || !s->img_y) return stbi__err("0-pixel image","Corrupt PNG");
+ if (!pal_img_n) {
+ s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0);
+ if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode");
+ if (scan == STBI__SCAN_header) return 1;
+ } else {
+ // if paletted, then pal_n is our final components, and
+ // img_n is # components to decompress/filter.
+ s->img_n = 1;
+ if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG");
+ // if SCAN_header, have to scan to see if we have a tRNS
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('P','L','T','E'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (c.length > 256*3) return stbi__err("invalid PLTE","Corrupt PNG");
+ pal_len = c.length / 3;
+ if (pal_len * 3 != c.length) return stbi__err("invalid PLTE","Corrupt PNG");
+ for (i=0; i < pal_len; ++i) {
+ palette[i*4+0] = stbi__get8(s);
+ palette[i*4+1] = stbi__get8(s);
+ palette[i*4+2] = stbi__get8(s);
+ palette[i*4+3] = 255;
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('t','R','N','S'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (z->idata) return stbi__err("tRNS after IDAT","Corrupt PNG");
+ if (pal_img_n) {
+ if (scan == STBI__SCAN_header) { s->img_n = 4; return 1; }
+ if (pal_len == 0) return stbi__err("tRNS before PLTE","Corrupt PNG");
+ if (c.length > pal_len) return stbi__err("bad tRNS len","Corrupt PNG");
+ pal_img_n = 4;
+ for (i=0; i < c.length; ++i)
+ palette[i*4+3] = stbi__get8(s);
+ } else {
+ if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG");
+ if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG");
+ has_trans = 1;
+ if (z->depth == 16) {
+ for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is
+ } else {
+ for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger
+ }
+ }
+ break;
+ }
+
+ case STBI__PNG_TYPE('I','D','A','T'): {
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG");
+ if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; }
+ if ((int)(ioff + c.length) < (int)ioff) return 0;
+ if (ioff + c.length > idata_limit) {
+ stbi__uint32 idata_limit_old = idata_limit;
+ stbi_uc *p;
+ if (idata_limit == 0) idata_limit = c.length > 4096 ? c.length : 4096;
+ while (ioff + c.length > idata_limit)
+ idata_limit *= 2;
+ STBI_NOTUSED(idata_limit_old);
+ p = (stbi_uc *) STBI_REALLOC_SIZED(z->idata, idata_limit_old, idata_limit); if (p == NULL) return stbi__err("outofmem", "Out of memory");
+ z->idata = p;
+ }
+ if (!stbi__getn(s, z->idata+ioff,c.length)) return stbi__err("outofdata","Corrupt PNG");
+ ioff += c.length;
+ break;
+ }
+
+ case STBI__PNG_TYPE('I','E','N','D'): {
+ stbi__uint32 raw_len, bpl;
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if (scan != STBI__SCAN_load) return 1;
+ if (z->idata == NULL) return stbi__err("no IDAT","Corrupt PNG");
+ // initial guess for decoded data size to avoid unnecessary reallocs
+ bpl = (s->img_x * z->depth + 7) / 8; // bytes per line, per component
+ raw_len = bpl * s->img_y * s->img_n /* pixels */ + s->img_y /* filter mode per row */;
+ z->expanded = (stbi_uc *) stbi_zlib_decode_malloc_guesssize_headerflag((char *) z->idata, ioff, raw_len, (int *) &raw_len, !is_iphone);
+ if (z->expanded == NULL) return 0; // zlib should set error
+ STBI_FREE(z->idata); z->idata = NULL;
+ if ((req_comp == s->img_n+1 && req_comp != 3 && !pal_img_n) || has_trans)
+ s->img_out_n = s->img_n+1;
+ else
+ s->img_out_n = s->img_n;
+ if (!stbi__create_png_image(z, z->expanded, raw_len, s->img_out_n, z->depth, color, interlace)) return 0;
+ if (has_trans) {
+ if (z->depth == 16) {
+ if (!stbi__compute_transparency16(z, tc16, s->img_out_n)) return 0;
+ } else {
+ if (!stbi__compute_transparency(z, tc, s->img_out_n)) return 0;
+ }
+ }
+ if (is_iphone && stbi__de_iphone_flag && s->img_out_n > 2)
+ stbi__de_iphone(z);
+ if (pal_img_n) {
+ // pal_img_n == 3 or 4
+ s->img_n = pal_img_n; // record the actual colors we had
+ s->img_out_n = pal_img_n;
+ if (req_comp >= 3) s->img_out_n = req_comp;
+ if (!stbi__expand_png_palette(z, palette, pal_len, s->img_out_n))
+ return 0;
+ } else if (has_trans) {
+ // non-paletted image with tRNS -> source image has (constant) alpha
+ ++s->img_n;
+ }
+ STBI_FREE(z->expanded); z->expanded = NULL;
+ return 1;
+ }
+
+ default:
+ // if critical, fail
+ if (first) return stbi__err("first not IHDR", "Corrupt PNG");
+ if ((c.type & (1 << 29)) == 0) {
+ #ifndef STBI_NO_FAILURE_STRINGS
+ // not threadsafe
+ static char invalid_chunk[] = "XXXX PNG chunk not known";
+ invalid_chunk[0] = STBI__BYTECAST(c.type >> 24);
+ invalid_chunk[1] = STBI__BYTECAST(c.type >> 16);
+ invalid_chunk[2] = STBI__BYTECAST(c.type >> 8);
+ invalid_chunk[3] = STBI__BYTECAST(c.type >> 0);
+ #endif
+ return stbi__err(invalid_chunk, "PNG not supported: unknown PNG chunk type");
+ }
+ stbi__skip(s, c.length);
+ break;
+ }
+ // end of PNG chunk, read and skip CRC
+ stbi__get32be(s);
+ }
+}
+
+static void *stbi__do_png(stbi__png *p, int *x, int *y, int *n, int req_comp, stbi__result_info *ri)
+{
+ void *result=NULL;
+ if (req_comp < 0 || req_comp > 4) return stbi__errpuc("bad req_comp", "Internal error");
+ if (stbi__parse_png_file(p, STBI__SCAN_load, req_comp)) {
+ if (p->depth < 8)
+ ri->bits_per_channel = 8;
+ else
+ ri->bits_per_channel = p->depth;
+ result = p->out;
+ p->out = NULL;
+ if (req_comp && req_comp != p->s->img_out_n) {
+ if (ri->bits_per_channel == 8)
+ result = stbi__convert_format((unsigned char *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
+ else
+ result = stbi__convert_format16((stbi__uint16 *) result, p->s->img_out_n, req_comp, p->s->img_x, p->s->img_y);
+ p->s->img_out_n = req_comp;
+ if (result == NULL) return result;
+ }
+ *x = p->s->img_x;
+ *y = p->s->img_y;
+ if (n) *n = p->s->img_n;
+ }
+ STBI_FREE(p->out); p->out = NULL;
+ STBI_FREE(p->expanded); p->expanded = NULL;
+ STBI_FREE(p->idata); p->idata = NULL;
+
+ return result;
+}
+
+static void *stbi__png_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi__png p;
+ p.s = s;
+ return stbi__do_png(&p, x,y,comp,req_comp, ri);
+}
+
+static int stbi__png_test(stbi__context *s)
+{
+ int r;
+ r = stbi__check_png_header(s);
+ stbi__rewind(s);
+ return r;
+}
+
+static int stbi__png_info_raw(stbi__png *p, int *x, int *y, int *comp)
+{
+ if (!stbi__parse_png_file(p, STBI__SCAN_header, 0)) {
+ stbi__rewind( p->s );
+ return 0;
+ }
+ if (x) *x = p->s->img_x;
+ if (y) *y = p->s->img_y;
+ if (comp) *comp = p->s->img_n;
+ return 1;
+}
+
+static int stbi__png_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ stbi__png p;
+ p.s = s;
+ return stbi__png_info_raw(&p, x, y, comp);
+}
+
+static int stbi__png_is16(stbi__context *s)
+{
+ stbi__png p;
+ p.s = s;
+ if (!stbi__png_info_raw(&p, NULL, NULL, NULL))
+ return 0;
+ if (p.depth != 16) {
+ stbi__rewind(p.s);
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+// Microsoft/Windows BMP image
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_test_raw(stbi__context *s)
+{
+ int r;
+ int sz;
+ if (stbi__get8(s) != 'B') return 0;
+ if (stbi__get8(s) != 'M') return 0;
+ stbi__get32le(s); // discard filesize
+ stbi__get16le(s); // discard reserved
+ stbi__get16le(s); // discard reserved
+ stbi__get32le(s); // discard data offset
+ sz = stbi__get32le(s);
+ r = (sz == 12 || sz == 40 || sz == 56 || sz == 108 || sz == 124);
+ return r;
+}
+
+static int stbi__bmp_test(stbi__context *s)
+{
+ int r = stbi__bmp_test_raw(s);
+ stbi__rewind(s);
+ return r;
+}
+
+
+// returns 0..31 for the highest set bit
+static int stbi__high_bit(unsigned int z)
+{
+ int n=0;
+ if (z == 0) return -1;
+ if (z >= 0x10000) n += 16, z >>= 16;
+ if (z >= 0x00100) n += 8, z >>= 8;
+ if (z >= 0x00010) n += 4, z >>= 4;
+ if (z >= 0x00004) n += 2, z >>= 2;
+ if (z >= 0x00002) n += 1, z >>= 1;
+ return n;
+}
+
+static int stbi__bitcount(unsigned int a)
+{
+ a = (a & 0x55555555) + ((a >> 1) & 0x55555555); // max 2
+ a = (a & 0x33333333) + ((a >> 2) & 0x33333333); // max 4
+ a = (a + (a >> 4)) & 0x0f0f0f0f; // max 8 per 4, now 8 bits
+ a = (a + (a >> 8)); // max 16 per 8 bits
+ a = (a + (a >> 16)); // max 32 per 8 bits
+ return a & 0xff;
+}
+
+// extract an arbitrarily-aligned N-bit value (N=bits)
+// from v, and then make it 8-bits long and fractionally
+// extend it to full full range.
+static int stbi__shiftsigned(int v, int shift, int bits)
+{
+ static unsigned int mul_table[9] = {
+ 0,
+ 0xff/*0b11111111*/, 0x55/*0b01010101*/, 0x49/*0b01001001*/, 0x11/*0b00010001*/,
+ 0x21/*0b00100001*/, 0x41/*0b01000001*/, 0x81/*0b10000001*/, 0x01/*0b00000001*/,
+ };
+ static unsigned int shift_table[9] = {
+ 0, 0,0,1,0,2,4,6,0,
+ };
+ if (shift < 0)
+ v <<= -shift;
+ else
+ v >>= shift;
+ STBI_ASSERT(v >= 0 && v < 256);
+ v >>= (8-bits);
+ STBI_ASSERT(bits >= 0 && bits <= 8);
+ return (int) ((unsigned) v * mul_table[bits]) >> shift_table[bits];
+}
+
+typedef struct
+{
+ int bpp, offset, hsz;
+ unsigned int mr,mg,mb,ma, all_a;
+} stbi__bmp_data;
+
+static void *stbi__bmp_parse_header(stbi__context *s, stbi__bmp_data *info)
+{
+ int hsz;
+ if (stbi__get8(s) != 'B' || stbi__get8(s) != 'M') return stbi__errpuc("not BMP", "Corrupt BMP");
+ stbi__get32le(s); // discard filesize
+ stbi__get16le(s); // discard reserved
+ stbi__get16le(s); // discard reserved
+ info->offset = stbi__get32le(s);
+ info->hsz = hsz = stbi__get32le(s);
+ info->mr = info->mg = info->mb = info->ma = 0;
+
+ if (hsz != 12 && hsz != 40 && hsz != 56 && hsz != 108 && hsz != 124) return stbi__errpuc("unknown BMP", "BMP type not supported: unknown");
+ if (hsz == 12) {
+ s->img_x = stbi__get16le(s);
+ s->img_y = stbi__get16le(s);
+ } else {
+ s->img_x = stbi__get32le(s);
+ s->img_y = stbi__get32le(s);
+ }
+ if (stbi__get16le(s) != 1) return stbi__errpuc("bad BMP", "bad BMP");
+ info->bpp = stbi__get16le(s);
+ if (hsz != 12) {
+ int compress = stbi__get32le(s);
+ if (compress == 1 || compress == 2) return stbi__errpuc("BMP RLE", "BMP type not supported: RLE");
+ stbi__get32le(s); // discard sizeof
+ stbi__get32le(s); // discard hres
+ stbi__get32le(s); // discard vres
+ stbi__get32le(s); // discard colorsused
+ stbi__get32le(s); // discard max important
+ if (hsz == 40 || hsz == 56) {
+ if (hsz == 56) {
+ stbi__get32le(s);
+ stbi__get32le(s);
+ stbi__get32le(s);
+ stbi__get32le(s);
+ }
+ if (info->bpp == 16 || info->bpp == 32) {
+ if (compress == 0) {
+ if (info->bpp == 32) {
+ info->mr = 0xffu << 16;
+ info->mg = 0xffu << 8;
+ info->mb = 0xffu << 0;
+ info->ma = 0xffu << 24;
+ info->all_a = 0; // if all_a is 0 at end, then we loaded alpha channel but it was all 0
+ } else {
+ info->mr = 31u << 10;
+ info->mg = 31u << 5;
+ info->mb = 31u << 0;
+ }
+ } else if (compress == 3) {
+ info->mr = stbi__get32le(s);
+ info->mg = stbi__get32le(s);
+ info->mb = stbi__get32le(s);
+ // not documented, but generated by photoshop and handled by mspaint
+ if (info->mr == info->mg && info->mg == info->mb) {
+ // ?!?!?
+ return stbi__errpuc("bad BMP", "bad BMP");
+ }
+ } else
+ return stbi__errpuc("bad BMP", "bad BMP");
+ }
+ } else {
+ int i;
+ if (hsz != 108 && hsz != 124)
+ return stbi__errpuc("bad BMP", "bad BMP");
+ info->mr = stbi__get32le(s);
+ info->mg = stbi__get32le(s);
+ info->mb = stbi__get32le(s);
+ info->ma = stbi__get32le(s);
+ stbi__get32le(s); // discard color space
+ for (i=0; i < 12; ++i)
+ stbi__get32le(s); // discard color space parameters
+ if (hsz == 124) {
+ stbi__get32le(s); // discard rendering intent
+ stbi__get32le(s); // discard offset of profile data
+ stbi__get32le(s); // discard size of profile data
+ stbi__get32le(s); // discard reserved
+ }
+ }
+ }
+ return (void *) 1;
+}
+
+
+static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *out;
+ unsigned int mr=0,mg=0,mb=0,ma=0, all_a;
+ stbi_uc pal[256][4];
+ int psize=0,i,j,width;
+ int flip_vertically, pad, target;
+ stbi__bmp_data info;
+ STBI_NOTUSED(ri);
+
+ info.all_a = 255;
+ if (stbi__bmp_parse_header(s, &info) == NULL)
+ return NULL; // error code already set
+
+ flip_vertically = ((int) s->img_y) > 0;
+ s->img_y = abs((int) s->img_y);
+
+ mr = info.mr;
+ mg = info.mg;
+ mb = info.mb;
+ ma = info.ma;
+ all_a = info.all_a;
+
+ if (info.hsz == 12) {
+ if (info.bpp < 24)
+ psize = (info.offset - 14 - 24) / 3;
+ } else {
+ if (info.bpp < 16)
+ psize = (info.offset - 14 - info.hsz) >> 2;
+ }
+
+ s->img_n = ma ? 4 : 3;
+ if (req_comp && req_comp >= 3) // we can directly decode 3 or 4
+ target = req_comp;
+ else
+ target = s->img_n; // if they want monochrome, we'll post-convert
+
+ // sanity-check size
+ if (!stbi__mad3sizes_valid(target, s->img_x, s->img_y, 0))
+ return stbi__errpuc("too large", "Corrupt BMP");
+
+ out = (stbi_uc *) stbi__malloc_mad3(target, s->img_x, s->img_y, 0);
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ if (info.bpp < 16) {
+ int z=0;
+ if (psize == 0 || psize > 256) { STBI_FREE(out); return stbi__errpuc("invalid", "Corrupt BMP"); }
+ for (i=0; i < psize; ++i) {
+ pal[i][2] = stbi__get8(s);
+ pal[i][1] = stbi__get8(s);
+ pal[i][0] = stbi__get8(s);
+ if (info.hsz != 12) stbi__get8(s);
+ pal[i][3] = 255;
+ }
+ stbi__skip(s, info.offset - 14 - info.hsz - psize * (info.hsz == 12 ? 3 : 4));
+ if (info.bpp == 1) width = (s->img_x + 7) >> 3;
+ else if (info.bpp == 4) width = (s->img_x + 1) >> 1;
+ else if (info.bpp == 8) width = s->img_x;
+ else { STBI_FREE(out); return stbi__errpuc("bad bpp", "Corrupt BMP"); }
+ pad = (-width)&3;
+ if (info.bpp == 1) {
+ for (j=0; j < (int) s->img_y; ++j) {
+ int bit_offset = 7, v = stbi__get8(s);
+ for (i=0; i < (int) s->img_x; ++i) {
+ int color = (v>>bit_offset)&0x1;
+ out[z++] = pal[color][0];
+ out[z++] = pal[color][1];
+ out[z++] = pal[color][2];
+ if((--bit_offset) < 0) {
+ bit_offset = 7;
+ v = stbi__get8(s);
+ }
+ }
+ stbi__skip(s, pad);
+ }
+ } else {
+ for (j=0; j < (int) s->img_y; ++j) {
+ for (i=0; i < (int) s->img_x; i += 2) {
+ int v=stbi__get8(s),v2=0;
+ if (info.bpp == 4) {
+ v2 = v & 15;
+ v >>= 4;
+ }
+ out[z++] = pal[v][0];
+ out[z++] = pal[v][1];
+ out[z++] = pal[v][2];
+ if (target == 4) out[z++] = 255;
+ if (i+1 == (int) s->img_x) break;
+ v = (info.bpp == 8) ? stbi__get8(s) : v2;
+ out[z++] = pal[v][0];
+ out[z++] = pal[v][1];
+ out[z++] = pal[v][2];
+ if (target == 4) out[z++] = 255;
+ }
+ stbi__skip(s, pad);
+ }
+ }
+ } else {
+ int rshift=0,gshift=0,bshift=0,ashift=0,rcount=0,gcount=0,bcount=0,acount=0;
+ int z = 0;
+ int easy=0;
+ stbi__skip(s, info.offset - 14 - info.hsz);
+ if (info.bpp == 24) width = 3 * s->img_x;
+ else if (info.bpp == 16) width = 2*s->img_x;
+ else /* bpp = 32 and pad = 0 */ width=0;
+ pad = (-width) & 3;
+ if (info.bpp == 24) {
+ easy = 1;
+ } else if (info.bpp == 32) {
+ if (mb == 0xff && mg == 0xff00 && mr == 0x00ff0000 && ma == 0xff000000)
+ easy = 2;
+ }
+ if (!easy) {
+ if (!mr || !mg || !mb) { STBI_FREE(out); return stbi__errpuc("bad masks", "Corrupt BMP"); }
+ // right shift amt to put high bit in position #7
+ rshift = stbi__high_bit(mr)-7; rcount = stbi__bitcount(mr);
+ gshift = stbi__high_bit(mg)-7; gcount = stbi__bitcount(mg);
+ bshift = stbi__high_bit(mb)-7; bcount = stbi__bitcount(mb);
+ ashift = stbi__high_bit(ma)-7; acount = stbi__bitcount(ma);
+ }
+ for (j=0; j < (int) s->img_y; ++j) {
+ if (easy) {
+ for (i=0; i < (int) s->img_x; ++i) {
+ unsigned char a;
+ out[z+2] = stbi__get8(s);
+ out[z+1] = stbi__get8(s);
+ out[z+0] = stbi__get8(s);
+ z += 3;
+ a = (easy == 2 ? stbi__get8(s) : 255);
+ all_a |= a;
+ if (target == 4) out[z++] = a;
+ }
+ } else {
+ int bpp = info.bpp;
+ for (i=0; i < (int) s->img_x; ++i) {
+ stbi__uint32 v = (bpp == 16 ? (stbi__uint32) stbi__get16le(s) : stbi__get32le(s));
+ unsigned int a;
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mr, rshift, rcount));
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mg, gshift, gcount));
+ out[z++] = STBI__BYTECAST(stbi__shiftsigned(v & mb, bshift, bcount));
+ a = (ma ? stbi__shiftsigned(v & ma, ashift, acount) : 255);
+ all_a |= a;
+ if (target == 4) out[z++] = STBI__BYTECAST(a);
+ }
+ }
+ stbi__skip(s, pad);
+ }
+ }
+
+ // if alpha channel is all 0s, replace with all 255s
+ if (target == 4 && all_a == 0)
+ for (i=4*s->img_x*s->img_y-1; i >= 0; i -= 4)
+ out[i] = 255;
+
+ if (flip_vertically) {
+ stbi_uc t;
+ for (j=0; j < (int) s->img_y>>1; ++j) {
+ stbi_uc *p1 = out + j *s->img_x*target;
+ stbi_uc *p2 = out + (s->img_y-1-j)*s->img_x*target;
+ for (i=0; i < (int) s->img_x*target; ++i) {
+ t = p1[i], p1[i] = p2[i], p2[i] = t;
+ }
+ }
+ }
+
+ if (req_comp && req_comp != target) {
+ out = stbi__convert_format(out, target, req_comp, s->img_x, s->img_y);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+
+ *x = s->img_x;
+ *y = s->img_y;
+ if (comp) *comp = s->img_n;
+ return out;
+}
+#endif
+
+// Targa Truevision - TGA
+// by Jonathan Dummer
+#ifndef STBI_NO_TGA
+// returns STBI_rgb or whatever, 0 on error
+static int stbi__tga_get_comp(int bits_per_pixel, int is_grey, int* is_rgb16)
+{
+ // only RGB or RGBA (incl. 16bit) or grey allowed
+ if (is_rgb16) *is_rgb16 = 0;
+ switch(bits_per_pixel) {
+ case 8: return STBI_grey;
+ case 16: if(is_grey) return STBI_grey_alpha;
+ // fallthrough
+ case 15: if(is_rgb16) *is_rgb16 = 1;
+ return STBI_rgb;
+ case 24: // fallthrough
+ case 32: return bits_per_pixel/8;
+ default: return 0;
+ }
+}
+
+static int stbi__tga_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int tga_w, tga_h, tga_comp, tga_image_type, tga_bits_per_pixel, tga_colormap_bpp;
+ int sz, tga_colormap_type;
+ stbi__get8(s); // discard Offset
+ tga_colormap_type = stbi__get8(s); // colormap type
+ if( tga_colormap_type > 1 ) {
+ stbi__rewind(s);
+ return 0; // only RGB or indexed allowed
+ }
+ tga_image_type = stbi__get8(s); // image type
+ if ( tga_colormap_type == 1 ) { // colormapped (paletted) image
+ if (tga_image_type != 1 && tga_image_type != 9) {
+ stbi__rewind(s);
+ return 0;
+ }
+ stbi__skip(s,4); // skip index of first colormap entry and number of entries
+ sz = stbi__get8(s); // check bits per palette color entry
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) {
+ stbi__rewind(s);
+ return 0;
+ }
+ stbi__skip(s,4); // skip image x and y origin
+ tga_colormap_bpp = sz;
+ } else { // "normal" image w/o colormap - only RGB or grey allowed, +/- RLE
+ if ( (tga_image_type != 2) && (tga_image_type != 3) && (tga_image_type != 10) && (tga_image_type != 11) ) {
+ stbi__rewind(s);
+ return 0; // only RGB or grey allowed, +/- RLE
+ }
+ stbi__skip(s,9); // skip colormap specification and image x/y origin
+ tga_colormap_bpp = 0;
+ }
+ tga_w = stbi__get16le(s);
+ if( tga_w < 1 ) {
+ stbi__rewind(s);
+ return 0; // test width
+ }
+ tga_h = stbi__get16le(s);
+ if( tga_h < 1 ) {
+ stbi__rewind(s);
+ return 0; // test height
+ }
+ tga_bits_per_pixel = stbi__get8(s); // bits per pixel
+ stbi__get8(s); // ignore alpha bits
+ if (tga_colormap_bpp != 0) {
+ if((tga_bits_per_pixel != 8) && (tga_bits_per_pixel != 16)) {
+ // when using a colormap, tga_bits_per_pixel is the size of the indexes
+ // I don't think anything but 8 or 16bit indexes makes sense
+ stbi__rewind(s);
+ return 0;
+ }
+ tga_comp = stbi__tga_get_comp(tga_colormap_bpp, 0, NULL);
+ } else {
+ tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3) || (tga_image_type == 11), NULL);
+ }
+ if(!tga_comp) {
+ stbi__rewind(s);
+ return 0;
+ }
+ if (x) *x = tga_w;
+ if (y) *y = tga_h;
+ if (comp) *comp = tga_comp;
+ return 1; // seems to have passed everything
+}
+
+static int stbi__tga_test(stbi__context *s)
+{
+ int res = 0;
+ int sz, tga_color_type;
+ stbi__get8(s); // discard Offset
+ tga_color_type = stbi__get8(s); // color type
+ if ( tga_color_type > 1 ) goto errorEnd; // only RGB or indexed allowed
+ sz = stbi__get8(s); // image type
+ if ( tga_color_type == 1 ) { // colormapped (paletted) image
+ if (sz != 1 && sz != 9) goto errorEnd; // colortype 1 demands image type 1 or 9
+ stbi__skip(s,4); // skip index of first colormap entry and number of entries
+ sz = stbi__get8(s); // check bits per palette color entry
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
+ stbi__skip(s,4); // skip image x and y origin
+ } else { // "normal" image w/o colormap
+ if ( (sz != 2) && (sz != 3) && (sz != 10) && (sz != 11) ) goto errorEnd; // only RGB or grey allowed, +/- RLE
+ stbi__skip(s,9); // skip colormap specification and image x/y origin
+ }
+ if ( stbi__get16le(s) < 1 ) goto errorEnd; // test width
+ if ( stbi__get16le(s) < 1 ) goto errorEnd; // test height
+ sz = stbi__get8(s); // bits per pixel
+ if ( (tga_color_type == 1) && (sz != 8) && (sz != 16) ) goto errorEnd; // for colormapped images, bpp is size of an index
+ if ( (sz != 8) && (sz != 15) && (sz != 16) && (sz != 24) && (sz != 32) ) goto errorEnd;
+
+ res = 1; // if we got this far, everything's good and we can return 1 instead of 0
+
+errorEnd:
+ stbi__rewind(s);
+ return res;
+}
+
+// read 16bit value and convert to 24bit RGB
+static void stbi__tga_read_rgb16(stbi__context *s, stbi_uc* out)
+{
+ stbi__uint16 px = (stbi__uint16)stbi__get16le(s);
+ stbi__uint16 fiveBitMask = 31;
+ // we have 3 channels with 5bits each
+ int r = (px >> 10) & fiveBitMask;
+ int g = (px >> 5) & fiveBitMask;
+ int b = px & fiveBitMask;
+ // Note that this saves the data in RGB(A) order, so it doesn't need to be swapped later
+ out[0] = (stbi_uc)((r * 255)/31);
+ out[1] = (stbi_uc)((g * 255)/31);
+ out[2] = (stbi_uc)((b * 255)/31);
+
+ // some people claim that the most significant bit might be used for alpha
+ // (possibly if an alpha-bit is set in the "image descriptor byte")
+ // but that only made 16bit test images completely translucent..
+ // so let's treat all 15 and 16bit TGAs as RGB with no alpha.
+}
+
+static void *stbi__tga_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ // read in the TGA header stuff
+ int tga_offset = stbi__get8(s);
+ int tga_indexed = stbi__get8(s);
+ int tga_image_type = stbi__get8(s);
+ int tga_is_RLE = 0;
+ int tga_palette_start = stbi__get16le(s);
+ int tga_palette_len = stbi__get16le(s);
+ int tga_palette_bits = stbi__get8(s);
+ int tga_x_origin = stbi__get16le(s);
+ int tga_y_origin = stbi__get16le(s);
+ int tga_width = stbi__get16le(s);
+ int tga_height = stbi__get16le(s);
+ int tga_bits_per_pixel = stbi__get8(s);
+ int tga_comp, tga_rgb16=0;
+ int tga_inverted = stbi__get8(s);
+ // int tga_alpha_bits = tga_inverted & 15; // the 4 lowest bits - unused (useless?)
+ // image data
+ unsigned char *tga_data;
+ unsigned char *tga_palette = NULL;
+ int i, j;
+ unsigned char raw_data[4] = {0};
+ int RLE_count = 0;
+ int RLE_repeating = 0;
+ int read_next_pixel = 1;
+ STBI_NOTUSED(ri);
+
+ // do a tiny bit of precessing
+ if ( tga_image_type >= 8 )
+ {
+ tga_image_type -= 8;
+ tga_is_RLE = 1;
+ }
+ tga_inverted = 1 - ((tga_inverted >> 5) & 1);
+
+ // If I'm paletted, then I'll use the number of bits from the palette
+ if ( tga_indexed ) tga_comp = stbi__tga_get_comp(tga_palette_bits, 0, &tga_rgb16);
+ else tga_comp = stbi__tga_get_comp(tga_bits_per_pixel, (tga_image_type == 3), &tga_rgb16);
+
+ if(!tga_comp) // shouldn't really happen, stbi__tga_test() should have ensured basic consistency
+ return stbi__errpuc("bad format", "Can't find out TGA pixelformat");
+
+ // tga info
+ *x = tga_width;
+ *y = tga_height;
+ if (comp) *comp = tga_comp;
+
+ if (!stbi__mad3sizes_valid(tga_width, tga_height, tga_comp, 0))
+ return stbi__errpuc("too large", "Corrupt TGA");
+
+ tga_data = (unsigned char*)stbi__malloc_mad3(tga_width, tga_height, tga_comp, 0);
+ if (!tga_data) return stbi__errpuc("outofmem", "Out of memory");
+
+ // skip to the data's starting position (offset usually = 0)
+ stbi__skip(s, tga_offset );
+
+ if ( !tga_indexed && !tga_is_RLE && !tga_rgb16 ) {
+ for (i=0; i < tga_height; ++i) {
+ int row = tga_inverted ? tga_height -i - 1 : i;
+ stbi_uc *tga_row = tga_data + row*tga_width*tga_comp;
+ stbi__getn(s, tga_row, tga_width * tga_comp);
+ }
+ } else {
+ // do I need to load a palette?
+ if ( tga_indexed)
+ {
+ // any data to skip? (offset usually = 0)
+ stbi__skip(s, tga_palette_start );
+ // load the palette
+ tga_palette = (unsigned char*)stbi__malloc_mad2(tga_palette_len, tga_comp, 0);
+ if (!tga_palette) {
+ STBI_FREE(tga_data);
+ return stbi__errpuc("outofmem", "Out of memory");
+ }
+ if (tga_rgb16) {
+ stbi_uc *pal_entry = tga_palette;
+ STBI_ASSERT(tga_comp == STBI_rgb);
+ for (i=0; i < tga_palette_len; ++i) {
+ stbi__tga_read_rgb16(s, pal_entry);
+ pal_entry += tga_comp;
+ }
+ } else if (!stbi__getn(s, tga_palette, tga_palette_len * tga_comp)) {
+ STBI_FREE(tga_data);
+ STBI_FREE(tga_palette);
+ return stbi__errpuc("bad palette", "Corrupt TGA");
+ }
+ }
+ // load the data
+ for (i=0; i < tga_width * tga_height; ++i)
+ {
+ // if I'm in RLE mode, do I need to get a RLE stbi__pngchunk?
+ if ( tga_is_RLE )
+ {
+ if ( RLE_count == 0 )
+ {
+ // yep, get the next byte as a RLE command
+ int RLE_cmd = stbi__get8(s);
+ RLE_count = 1 + (RLE_cmd & 127);
+ RLE_repeating = RLE_cmd >> 7;
+ read_next_pixel = 1;
+ } else if ( !RLE_repeating )
+ {
+ read_next_pixel = 1;
+ }
+ } else
+ {
+ read_next_pixel = 1;
+ }
+ // OK, if I need to read a pixel, do it now
+ if ( read_next_pixel )
+ {
+ // load however much data we did have
+ if ( tga_indexed )
+ {
+ // read in index, then perform the lookup
+ int pal_idx = (tga_bits_per_pixel == 8) ? stbi__get8(s) : stbi__get16le(s);
+ if ( pal_idx >= tga_palette_len ) {
+ // invalid index
+ pal_idx = 0;
+ }
+ pal_idx *= tga_comp;
+ for (j = 0; j < tga_comp; ++j) {
+ raw_data[j] = tga_palette[pal_idx+j];
+ }
+ } else if(tga_rgb16) {
+ STBI_ASSERT(tga_comp == STBI_rgb);
+ stbi__tga_read_rgb16(s, raw_data);
+ } else {
+ // read in the data raw
+ for (j = 0; j < tga_comp; ++j) {
+ raw_data[j] = stbi__get8(s);
+ }
+ }
+ // clear the reading flag for the next pixel
+ read_next_pixel = 0;
+ } // end of reading a pixel
+
+ // copy data
+ for (j = 0; j < tga_comp; ++j)
+ tga_data[i*tga_comp+j] = raw_data[j];
+
+ // in case we're in RLE mode, keep counting down
+ --RLE_count;
+ }
+ // do I need to invert the image?
+ if ( tga_inverted )
+ {
+ for (j = 0; j*2 < tga_height; ++j)
+ {
+ int index1 = j * tga_width * tga_comp;
+ int index2 = (tga_height - 1 - j) * tga_width * tga_comp;
+ for (i = tga_width * tga_comp; i > 0; --i)
+ {
+ unsigned char temp = tga_data[index1];
+ tga_data[index1] = tga_data[index2];
+ tga_data[index2] = temp;
+ ++index1;
+ ++index2;
+ }
+ }
+ }
+ // clear my palette, if I had one
+ if ( tga_palette != NULL )
+ {
+ STBI_FREE( tga_palette );
+ }
+ }
+
+ // swap RGB - if the source data was RGB16, it already is in the right order
+ if (tga_comp >= 3 && !tga_rgb16)
+ {
+ unsigned char* tga_pixel = tga_data;
+ for (i=0; i < tga_width * tga_height; ++i)
+ {
+ unsigned char temp = tga_pixel[0];
+ tga_pixel[0] = tga_pixel[2];
+ tga_pixel[2] = temp;
+ tga_pixel += tga_comp;
+ }
+ }
+
+ // convert to target component count
+ if (req_comp && req_comp != tga_comp)
+ tga_data = stbi__convert_format(tga_data, tga_comp, req_comp, tga_width, tga_height);
+
+ // the things I do to get rid of an error message, and yet keep
+ // Microsoft's C compilers happy... [8^(
+ tga_palette_start = tga_palette_len = tga_palette_bits =
+ tga_x_origin = tga_y_origin = 0;
+ // OK, done
+ return tga_data;
+}
+#endif
+
+// *************************************************************************************************
+// Photoshop PSD loader -- PD by Thatcher Ulrich, integration by Nicolas Schulz, tweaked by STB
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_test(stbi__context *s)
+{
+ int r = (stbi__get32be(s) == 0x38425053);
+ stbi__rewind(s);
+ return r;
+}
+
+static int stbi__psd_decode_rle(stbi__context *s, stbi_uc *p, int pixelCount)
+{
+ int count, nleft, len;
+
+ count = 0;
+ while ((nleft = pixelCount - count) > 0) {
+ len = stbi__get8(s);
+ if (len == 128) {
+ // No-op.
+ } else if (len < 128) {
+ // Copy next len+1 bytes literally.
+ len++;
+ if (len > nleft) return 0; // corrupt data
+ count += len;
+ while (len) {
+ *p = stbi__get8(s);
+ p += 4;
+ len--;
+ }
+ } else if (len > 128) {
+ stbi_uc val;
+ // Next -len+1 bytes in the dest are replicated from next source byte.
+ // (Interpret len as a negative 8-bit int.)
+ len = 257 - len;
+ if (len > nleft) return 0; // corrupt data
+ val = stbi__get8(s);
+ count += len;
+ while (len) {
+ *p = val;
+ p += 4;
+ len--;
+ }
+ }
+ }
+
+ return 1;
+}
+
+static void *stbi__psd_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri, int bpc)
+{
+ int pixelCount;
+ int channelCount, compression;
+ int channel, i;
+ int bitdepth;
+ int w,h;
+ stbi_uc *out;
+ STBI_NOTUSED(ri);
+
+ // Check identifier
+ if (stbi__get32be(s) != 0x38425053) // "8BPS"
+ return stbi__errpuc("not PSD", "Corrupt PSD image");
+
+ // Check file type version.
+ if (stbi__get16be(s) != 1)
+ return stbi__errpuc("wrong version", "Unsupported version of PSD image");
+
+ // Skip 6 reserved bytes.
+ stbi__skip(s, 6 );
+
+ // Read the number of channels (R, G, B, A, etc).
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16)
+ return stbi__errpuc("wrong channel count", "Unsupported number of channels in PSD image");
+
+ // Read the rows and columns of the image.
+ h = stbi__get32be(s);
+ w = stbi__get32be(s);
+
+ // Make sure the depth is 8 bits.
+ bitdepth = stbi__get16be(s);
+ if (bitdepth != 8 && bitdepth != 16)
+ return stbi__errpuc("unsupported bit depth", "PSD bit depth is not 8 or 16 bit");
+
+ // Make sure the color mode is RGB.
+ // Valid options are:
+ // 0: Bitmap
+ // 1: Grayscale
+ // 2: Indexed color
+ // 3: RGB color
+ // 4: CMYK color
+ // 7: Multichannel
+ // 8: Duotone
+ // 9: Lab color
+ if (stbi__get16be(s) != 3)
+ return stbi__errpuc("wrong color format", "PSD is not in RGB color format");
+
+ // Skip the Mode Data. (It's the palette for indexed color; other info for other modes.)
+ stbi__skip(s,stbi__get32be(s) );
+
+ // Skip the image resources. (resolution, pen tool paths, etc)
+ stbi__skip(s, stbi__get32be(s) );
+
+ // Skip the reserved data.
+ stbi__skip(s, stbi__get32be(s) );
+
+ // Find out if the data is compressed.
+ // Known values:
+ // 0: no compression
+ // 1: RLE compressed
+ compression = stbi__get16be(s);
+ if (compression > 1)
+ return stbi__errpuc("bad compression", "PSD has an unknown compression format");
+
+ // Check size
+ if (!stbi__mad3sizes_valid(4, w, h, 0))
+ return stbi__errpuc("too large", "Corrupt PSD");
+
+ // Create the destination image.
+
+ if (!compression && bitdepth == 16 && bpc == 16) {
+ out = (stbi_uc *) stbi__malloc_mad3(8, w, h, 0);
+ ri->bits_per_channel = 16;
+ } else
+ out = (stbi_uc *) stbi__malloc(4 * w*h);
+
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ pixelCount = w*h;
+
+ // Initialize the data to zero.
+ //memset( out, 0, pixelCount * 4 );
+
+ // Finally, the image data.
+ if (compression) {
+ // RLE as used by .PSD and .TIFF
+ // Loop until you get the number of unpacked bytes you are expecting:
+ // Read the next source byte into n.
+ // If n is between 0 and 127 inclusive, copy the next n+1 bytes literally.
+ // Else if n is between -127 and -1 inclusive, copy the next byte -n+1 times.
+ // Else if n is 128, noop.
+ // Endloop
+
+ // The RLE-compressed data is preceeded by a 2-byte data count for each row in the data,
+ // which we're going to just skip.
+ stbi__skip(s, h * channelCount * 2 );
+
+ // Read the RLE data by channel.
+ for (channel = 0; channel < 4; channel++) {
+ stbi_uc *p;
+
+ p = out+channel;
+ if (channel >= channelCount) {
+ // Fill this channel with default data.
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = (channel == 3 ? 255 : 0);
+ } else {
+ // Read the RLE data.
+ if (!stbi__psd_decode_rle(s, p, pixelCount)) {
+ STBI_FREE(out);
+ return stbi__errpuc("corrupt", "bad RLE data");
+ }
+ }
+ }
+
+ } else {
+ // We're at the raw image data. It's each channel in order (Red, Green, Blue, Alpha, ...)
+ // where each channel consists of an 8-bit (or 16-bit) value for each pixel in the image.
+
+ // Read the data by channel.
+ for (channel = 0; channel < 4; channel++) {
+ if (channel >= channelCount) {
+ // Fill this channel with default data.
+ if (bitdepth == 16 && bpc == 16) {
+ stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
+ stbi__uint16 val = channel == 3 ? 65535 : 0;
+ for (i = 0; i < pixelCount; i++, q += 4)
+ *q = val;
+ } else {
+ stbi_uc *p = out+channel;
+ stbi_uc val = channel == 3 ? 255 : 0;
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = val;
+ }
+ } else {
+ if (ri->bits_per_channel == 16) { // output bpc
+ stbi__uint16 *q = ((stbi__uint16 *) out) + channel;
+ for (i = 0; i < pixelCount; i++, q += 4)
+ *q = (stbi__uint16) stbi__get16be(s);
+ } else {
+ stbi_uc *p = out+channel;
+ if (bitdepth == 16) { // input bpc
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = (stbi_uc) (stbi__get16be(s) >> 8);
+ } else {
+ for (i = 0; i < pixelCount; i++, p += 4)
+ *p = stbi__get8(s);
+ }
+ }
+ }
+ }
+ }
+
+ // remove weird white matte from PSD
+ if (channelCount >= 4) {
+ if (ri->bits_per_channel == 16) {
+ for (i=0; i < w*h; ++i) {
+ stbi__uint16 *pixel = (stbi__uint16 *) out + 4*i;
+ if (pixel[3] != 0 && pixel[3] != 65535) {
+ float a = pixel[3] / 65535.0f;
+ float ra = 1.0f / a;
+ float inv_a = 65535.0f * (1 - ra);
+ pixel[0] = (stbi__uint16) (pixel[0]*ra + inv_a);
+ pixel[1] = (stbi__uint16) (pixel[1]*ra + inv_a);
+ pixel[2] = (stbi__uint16) (pixel[2]*ra + inv_a);
+ }
+ }
+ } else {
+ for (i=0; i < w*h; ++i) {
+ unsigned char *pixel = out + 4*i;
+ if (pixel[3] != 0 && pixel[3] != 255) {
+ float a = pixel[3] / 255.0f;
+ float ra = 1.0f / a;
+ float inv_a = 255.0f * (1 - ra);
+ pixel[0] = (unsigned char) (pixel[0]*ra + inv_a);
+ pixel[1] = (unsigned char) (pixel[1]*ra + inv_a);
+ pixel[2] = (unsigned char) (pixel[2]*ra + inv_a);
+ }
+ }
+ }
+ }
+
+ // convert to desired output format
+ if (req_comp && req_comp != 4) {
+ if (ri->bits_per_channel == 16)
+ out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, 4, req_comp, w, h);
+ else
+ out = stbi__convert_format(out, 4, req_comp, w, h);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+
+ if (comp) *comp = 4;
+ *y = h;
+ *x = w;
+
+ return out;
+}
+#endif
+
+// *************************************************************************************************
+// Softimage PIC loader
+// by Tom Seddon
+//
+// See http://softimage.wiki.softimage.com/index.php/INFO:_PIC_file_format
+// See http://ozviz.wasp.uwa.edu.au/~pbourke/dataformats/softimagepic/
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_is4(stbi__context *s,const char *str)
+{
+ int i;
+ for (i=0; i<4; ++i)
+ if (stbi__get8(s) != (stbi_uc)str[i])
+ return 0;
+
+ return 1;
+}
+
+static int stbi__pic_test_core(stbi__context *s)
+{
+ int i;
+
+ if (!stbi__pic_is4(s,"\x53\x80\xF6\x34"))
+ return 0;
+
+ for(i=0;i<84;++i)
+ stbi__get8(s);
+
+ if (!stbi__pic_is4(s,"PICT"))
+ return 0;
+
+ return 1;
+}
+
+typedef struct
+{
+ stbi_uc size,type,channel;
+} stbi__pic_packet;
+
+static stbi_uc *stbi__readval(stbi__context *s, int channel, stbi_uc *dest)
+{
+ int mask=0x80, i;
+
+ for (i=0; i<4; ++i, mask>>=1) {
+ if (channel & mask) {
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","PIC file too short");
+ dest[i]=stbi__get8(s);
+ }
+ }
+
+ return dest;
+}
+
+static void stbi__copyval(int channel,stbi_uc *dest,const stbi_uc *src)
+{
+ int mask=0x80,i;
+
+ for (i=0;i<4; ++i, mask>>=1)
+ if (channel&mask)
+ dest[i]=src[i];
+}
+
+static stbi_uc *stbi__pic_load_core(stbi__context *s,int width,int height,int *comp, stbi_uc *result)
+{
+ int act_comp=0,num_packets=0,y,chained;
+ stbi__pic_packet packets[10];
+
+ // this will (should...) cater for even some bizarre stuff like having data
+ // for the same channel in multiple packets.
+ do {
+ stbi__pic_packet *packet;
+
+ if (num_packets==sizeof(packets)/sizeof(packets[0]))
+ return stbi__errpuc("bad format","too many packets");
+
+ packet = &packets[num_packets++];
+
+ chained = stbi__get8(s);
+ packet->size = stbi__get8(s);
+ packet->type = stbi__get8(s);
+ packet->channel = stbi__get8(s);
+
+ act_comp |= packet->channel;
+
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (reading packets)");
+ if (packet->size != 8) return stbi__errpuc("bad format","packet isn't 8bpp");
+ } while (chained);
+
+ *comp = (act_comp & 0x10 ? 4 : 3); // has alpha channel?
+
+ for(y=0; ytype) {
+ default:
+ return stbi__errpuc("bad format","packet has bad compression type");
+
+ case 0: {//uncompressed
+ int x;
+
+ for(x=0;xchannel,dest))
+ return 0;
+ break;
+ }
+
+ case 1://Pure RLE
+ {
+ int left=width, i;
+
+ while (left>0) {
+ stbi_uc count,value[4];
+
+ count=stbi__get8(s);
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pure read count)");
+
+ if (count > left)
+ count = (stbi_uc) left;
+
+ if (!stbi__readval(s,packet->channel,value)) return 0;
+
+ for(i=0; ichannel,dest,value);
+ left -= count;
+ }
+ }
+ break;
+
+ case 2: {//Mixed RLE
+ int left=width;
+ while (left>0) {
+ int count = stbi__get8(s), i;
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (mixed read count)");
+
+ if (count >= 128) { // Repeated
+ stbi_uc value[4];
+
+ if (count==128)
+ count = stbi__get16be(s);
+ else
+ count -= 127;
+ if (count > left)
+ return stbi__errpuc("bad file","scanline overrun");
+
+ if (!stbi__readval(s,packet->channel,value))
+ return 0;
+
+ for(i=0;ichannel,dest,value);
+ } else { // Raw
+ ++count;
+ if (count>left) return stbi__errpuc("bad file","scanline overrun");
+
+ for(i=0;ichannel,dest))
+ return 0;
+ }
+ left-=count;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return result;
+}
+
+static void *stbi__pic_load(stbi__context *s,int *px,int *py,int *comp,int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *result;
+ int i, x,y, internal_comp;
+ STBI_NOTUSED(ri);
+
+ if (!comp) comp = &internal_comp;
+
+ for (i=0; i<92; ++i)
+ stbi__get8(s);
+
+ x = stbi__get16be(s);
+ y = stbi__get16be(s);
+ if (stbi__at_eof(s)) return stbi__errpuc("bad file","file too short (pic header)");
+ if (!stbi__mad3sizes_valid(x, y, 4, 0)) return stbi__errpuc("too large", "PIC image too large to decode");
+
+ stbi__get32be(s); //skip `ratio'
+ stbi__get16be(s); //skip `fields'
+ stbi__get16be(s); //skip `pad'
+
+ // intermediate buffer is RGBA
+ result = (stbi_uc *) stbi__malloc_mad3(x, y, 4, 0);
+ memset(result, 0xff, x*y*4);
+
+ if (!stbi__pic_load_core(s,x,y,comp, result)) {
+ STBI_FREE(result);
+ result=0;
+ }
+ *px = x;
+ *py = y;
+ if (req_comp == 0) req_comp = *comp;
+ result=stbi__convert_format(result,4,req_comp,x,y);
+
+ return result;
+}
+
+static int stbi__pic_test(stbi__context *s)
+{
+ int r = stbi__pic_test_core(s);
+ stbi__rewind(s);
+ return r;
+}
+#endif
+
+// *************************************************************************************************
+// GIF loader -- public domain by Jean-Marc Lienher -- simplified/shrunk by stb
+
+#ifndef STBI_NO_GIF
+typedef struct
+{
+ stbi__int16 prefix;
+ stbi_uc first;
+ stbi_uc suffix;
+} stbi__gif_lzw;
+
+typedef struct
+{
+ int w,h;
+ stbi_uc *out; // output buffer (always 4 components)
+ stbi_uc *background; // The current "background" as far as a gif is concerned
+ stbi_uc *history;
+ int flags, bgindex, ratio, transparent, eflags;
+ stbi_uc pal[256][4];
+ stbi_uc lpal[256][4];
+ stbi__gif_lzw codes[8192];
+ stbi_uc *color_table;
+ int parse, step;
+ int lflags;
+ int start_x, start_y;
+ int max_x, max_y;
+ int cur_x, cur_y;
+ int line_size;
+ int delay;
+} stbi__gif;
+
+static int stbi__gif_test_raw(stbi__context *s)
+{
+ int sz;
+ if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8') return 0;
+ sz = stbi__get8(s);
+ if (sz != '9' && sz != '7') return 0;
+ if (stbi__get8(s) != 'a') return 0;
+ return 1;
+}
+
+static int stbi__gif_test(stbi__context *s)
+{
+ int r = stbi__gif_test_raw(s);
+ stbi__rewind(s);
+ return r;
+}
+
+static void stbi__gif_parse_colortable(stbi__context *s, stbi_uc pal[256][4], int num_entries, int transp)
+{
+ int i;
+ for (i=0; i < num_entries; ++i) {
+ pal[i][2] = stbi__get8(s);
+ pal[i][1] = stbi__get8(s);
+ pal[i][0] = stbi__get8(s);
+ pal[i][3] = transp == i ? 0 : 255;
+ }
+}
+
+static int stbi__gif_header(stbi__context *s, stbi__gif *g, int *comp, int is_info)
+{
+ stbi_uc version;
+ if (stbi__get8(s) != 'G' || stbi__get8(s) != 'I' || stbi__get8(s) != 'F' || stbi__get8(s) != '8')
+ return stbi__err("not GIF", "Corrupt GIF");
+
+ version = stbi__get8(s);
+ if (version != '7' && version != '9') return stbi__err("not GIF", "Corrupt GIF");
+ if (stbi__get8(s) != 'a') return stbi__err("not GIF", "Corrupt GIF");
+
+ stbi__g_failure_reason = "";
+ g->w = stbi__get16le(s);
+ g->h = stbi__get16le(s);
+ g->flags = stbi__get8(s);
+ g->bgindex = stbi__get8(s);
+ g->ratio = stbi__get8(s);
+ g->transparent = -1;
+
+ if (comp != 0) *comp = 4; // can't actually tell whether it's 3 or 4 until we parse the comments
+
+ if (is_info) return 1;
+
+ if (g->flags & 0x80)
+ stbi__gif_parse_colortable(s,g->pal, 2 << (g->flags & 7), -1);
+
+ return 1;
+}
+
+static int stbi__gif_info_raw(stbi__context *s, int *x, int *y, int *comp)
+{
+ stbi__gif* g = (stbi__gif*) stbi__malloc(sizeof(stbi__gif));
+ if (!stbi__gif_header(s, g, comp, 1)) {
+ STBI_FREE(g);
+ stbi__rewind( s );
+ return 0;
+ }
+ if (x) *x = g->w;
+ if (y) *y = g->h;
+ STBI_FREE(g);
+ return 1;
+}
+
+static void stbi__out_gif_code(stbi__gif *g, stbi__uint16 code)
+{
+ stbi_uc *p, *c;
+ int idx;
+
+ // recurse to decode the prefixes, since the linked-list is backwards,
+ // and working backwards through an interleaved image would be nasty
+ if (g->codes[code].prefix >= 0)
+ stbi__out_gif_code(g, g->codes[code].prefix);
+
+ if (g->cur_y >= g->max_y) return;
+
+ idx = g->cur_x + g->cur_y;
+ p = &g->out[idx];
+ g->history[idx / 4] = 1;
+
+ c = &g->color_table[g->codes[code].suffix * 4];
+ if (c[3] > 128) { // don't render transparent pixels;
+ p[0] = c[2];
+ p[1] = c[1];
+ p[2] = c[0];
+ p[3] = c[3];
+ }
+ g->cur_x += 4;
+
+ if (g->cur_x >= g->max_x) {
+ g->cur_x = g->start_x;
+ g->cur_y += g->step;
+
+ while (g->cur_y >= g->max_y && g->parse > 0) {
+ g->step = (1 << g->parse) * g->line_size;
+ g->cur_y = g->start_y + (g->step >> 1);
+ --g->parse;
+ }
+ }
+}
+
+static stbi_uc *stbi__process_gif_raster(stbi__context *s, stbi__gif *g)
+{
+ stbi_uc lzw_cs;
+ stbi__int32 len, init_code;
+ stbi__uint32 first;
+ stbi__int32 codesize, codemask, avail, oldcode, bits, valid_bits, clear;
+ stbi__gif_lzw *p;
+
+ lzw_cs = stbi__get8(s);
+ if (lzw_cs > 12) return NULL;
+ clear = 1 << lzw_cs;
+ first = 1;
+ codesize = lzw_cs + 1;
+ codemask = (1 << codesize) - 1;
+ bits = 0;
+ valid_bits = 0;
+ for (init_code = 0; init_code < clear; init_code++) {
+ g->codes[init_code].prefix = -1;
+ g->codes[init_code].first = (stbi_uc) init_code;
+ g->codes[init_code].suffix = (stbi_uc) init_code;
+ }
+
+ // support no starting clear code
+ avail = clear+2;
+ oldcode = -1;
+
+ len = 0;
+ for(;;) {
+ if (valid_bits < codesize) {
+ if (len == 0) {
+ len = stbi__get8(s); // start new block
+ if (len == 0)
+ return g->out;
+ }
+ --len;
+ bits |= (stbi__int32) stbi__get8(s) << valid_bits;
+ valid_bits += 8;
+ } else {
+ stbi__int32 code = bits & codemask;
+ bits >>= codesize;
+ valid_bits -= codesize;
+ // @OPTIMIZE: is there some way we can accelerate the non-clear path?
+ if (code == clear) { // clear code
+ codesize = lzw_cs + 1;
+ codemask = (1 << codesize) - 1;
+ avail = clear + 2;
+ oldcode = -1;
+ first = 0;
+ } else if (code == clear + 1) { // end of stream code
+ stbi__skip(s, len);
+ while ((len = stbi__get8(s)) > 0)
+ stbi__skip(s,len);
+ return g->out;
+ } else if (code <= avail) {
+ if (first) {
+ return stbi__errpuc("no clear code", "Corrupt GIF");
+ }
+
+ if (oldcode >= 0) {
+ p = &g->codes[avail++];
+ if (avail > 8192) {
+ return stbi__errpuc("too many codes", "Corrupt GIF");
+ }
+
+ p->prefix = (stbi__int16) oldcode;
+ p->first = g->codes[oldcode].first;
+ p->suffix = (code == avail) ? p->first : g->codes[code].first;
+ } else if (code == avail)
+ return stbi__errpuc("illegal code in raster", "Corrupt GIF");
+
+ stbi__out_gif_code(g, (stbi__uint16) code);
+
+ if ((avail & codemask) == 0 && avail <= 0x0FFF) {
+ codesize++;
+ codemask = (1 << codesize) - 1;
+ }
+
+ oldcode = code;
+ } else {
+ return stbi__errpuc("illegal code in raster", "Corrupt GIF");
+ }
+ }
+ }
+}
+
+// this function is designed to support animated gifs, although stb_image doesn't support it
+// two back is the image from two frames ago, used for a very specific disposal format
+static stbi_uc *stbi__gif_load_next(stbi__context *s, stbi__gif *g, int *comp, int req_comp, stbi_uc *two_back)
+{
+ int dispose;
+ int first_frame;
+ int pi;
+ int pcount;
+
+ // on first frame, any non-written pixels get the background colour (non-transparent)
+ first_frame = 0;
+ if (g->out == 0) {
+ if (!stbi__gif_header(s, g, comp,0)) return 0; // stbi__g_failure_reason set by stbi__gif_header
+ g->out = (stbi_uc *) stbi__malloc(4 * g->w * g->h);
+ g->background = (stbi_uc *) stbi__malloc(4 * g->w * g->h);
+ g->history = (stbi_uc *) stbi__malloc(g->w * g->h);
+ if (g->out == 0) return stbi__errpuc("outofmem", "Out of memory");
+
+ // image is treated as "tranparent" at the start - ie, nothing overwrites the current background;
+ // background colour is only used for pixels that are not rendered first frame, after that "background"
+ // color refers to teh color that was there the previous frame.
+ memset( g->out, 0x00, 4 * g->w * g->h );
+ memset( g->background, 0x00, 4 * g->w * g->h ); // state of the background (starts transparent)
+ memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
+ first_frame = 1;
+ } else {
+ // second frame - how do we dispoase of the previous one?
+ dispose = (g->eflags & 0x1C) >> 2;
+ pcount = g->w * g->h;
+
+ if ((dispose == 3) && (two_back == 0)) {
+ dispose = 2; // if I don't have an image to revert back to, default to the old background
+ }
+
+ if (dispose == 3) { // use previous graphic
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi]) {
+ memcpy( &g->out[pi * 4], &two_back[pi * 4], 4 );
+ }
+ }
+ } else if (dispose == 2) {
+ // restore what was changed last frame to background before that frame;
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi]) {
+ memcpy( &g->out[pi * 4], &g->background[pi * 4], 4 );
+ }
+ }
+ } else {
+ // This is a non-disposal case eithe way, so just
+ // leave the pixels as is, and they will become the new background
+ // 1: do not dispose
+ // 0: not specified.
+ }
+
+ // background is what out is after the undoing of the previou frame;
+ memcpy( g->background, g->out, 4 * g->w * g->h );
+ }
+
+ // clear my history;
+ memset( g->history, 0x00, g->w * g->h ); // pixels that were affected previous frame
+
+ for (;;) {
+ int tag = stbi__get8(s);
+ switch (tag) {
+ case 0x2C: /* Image Descriptor */
+ {
+ stbi__int32 x, y, w, h;
+ stbi_uc *o;
+
+ x = stbi__get16le(s);
+ y = stbi__get16le(s);
+ w = stbi__get16le(s);
+ h = stbi__get16le(s);
+ if (((x + w) > (g->w)) || ((y + h) > (g->h)))
+ return stbi__errpuc("bad Image Descriptor", "Corrupt GIF");
+
+ g->line_size = g->w * 4;
+ g->start_x = x * 4;
+ g->start_y = y * g->line_size;
+ g->max_x = g->start_x + w * 4;
+ g->max_y = g->start_y + h * g->line_size;
+ g->cur_x = g->start_x;
+ g->cur_y = g->start_y;
+
+ g->lflags = stbi__get8(s);
+
+ if (g->lflags & 0x40) {
+ g->step = 8 * g->line_size; // first interlaced spacing
+ g->parse = 3;
+ } else {
+ g->step = g->line_size;
+ g->parse = 0;
+ }
+
+ if (g->lflags & 0x80) {
+ stbi__gif_parse_colortable(s,g->lpal, 2 << (g->lflags & 7), g->eflags & 0x01 ? g->transparent : -1);
+ g->color_table = (stbi_uc *) g->lpal;
+ } else if (g->flags & 0x80) {
+ g->color_table = (stbi_uc *) g->pal;
+ } else
+ return stbi__errpuc("missing color table", "Corrupt GIF");
+
+ o = stbi__process_gif_raster(s, g);
+ if (o == NULL) return NULL;
+
+ // if this was the first frame,
+ pcount = g->w * g->h;
+ if (first_frame && (g->bgindex > 0)) {
+ // if first frame, any pixel not drawn to gets the background color
+ for (pi = 0; pi < pcount; ++pi) {
+ if (g->history[pi] == 0) {
+ g->pal[g->bgindex][3] = 255; // just in case it was made transparent, undo that; It will be reset next frame if need be;
+ memcpy( &g->out[pi * 4], &g->pal[g->bgindex], 4 );
+ }
+ }
+ }
+
+ return o;
+ }
+
+ case 0x21: // Comment Extension.
+ {
+ int len;
+ int ext = stbi__get8(s);
+ if (ext == 0xF9) { // Graphic Control Extension.
+ len = stbi__get8(s);
+ if (len == 4) {
+ g->eflags = stbi__get8(s);
+ g->delay = 10 * stbi__get16le(s); // delay - 1/100th of a second, saving as 1/1000ths.
+
+ // unset old transparent
+ if (g->transparent >= 0) {
+ g->pal[g->transparent][3] = 255;
+ }
+ if (g->eflags & 0x01) {
+ g->transparent = stbi__get8(s);
+ if (g->transparent >= 0) {
+ g->pal[g->transparent][3] = 0;
+ }
+ } else {
+ // don't need transparent
+ stbi__skip(s, 1);
+ g->transparent = -1;
+ }
+ } else {
+ stbi__skip(s, len);
+ break;
+ }
+ }
+ while ((len = stbi__get8(s)) != 0) {
+ stbi__skip(s, len);
+ }
+ break;
+ }
+
+ case 0x3B: // gif stream termination code
+ return (stbi_uc *) s; // using '1' causes warning on some compilers
+
+ default:
+ return stbi__errpuc("unknown code", "Corrupt GIF");
+ }
+ }
+}
+
+static void *stbi__load_gif_main(stbi__context *s, int **delays, int *x, int *y, int *z, int *comp, int req_comp)
+{
+ if (stbi__gif_test(s)) {
+ int layers = 0;
+ stbi_uc *u = 0;
+ stbi_uc *out = 0;
+ stbi_uc *two_back = 0;
+ stbi__gif g;
+ int stride;
+ memset(&g, 0, sizeof(g));
+ if (delays) {
+ *delays = 0;
+ }
+
+ do {
+ u = stbi__gif_load_next(s, &g, comp, req_comp, two_back);
+ if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
+
+ if (u) {
+ *x = g.w;
+ *y = g.h;
+ ++layers;
+ stride = g.w * g.h * 4;
+
+ if (out) {
+ out = (stbi_uc*) STBI_REALLOC( out, layers * stride );
+ if (delays) {
+ *delays = (int*) STBI_REALLOC( *delays, sizeof(int) * layers );
+ }
+ } else {
+ out = (stbi_uc*)stbi__malloc( layers * stride );
+ if (delays) {
+ *delays = (int*) stbi__malloc( layers * sizeof(int) );
+ }
+ }
+ memcpy( out + ((layers - 1) * stride), u, stride );
+ if (layers >= 2) {
+ two_back = out - 2 * stride;
+ }
+
+ if (delays) {
+ (*delays)[layers - 1U] = g.delay;
+ }
+ }
+ } while (u != 0);
+
+ // free temp buffer;
+ STBI_FREE(g.out);
+ STBI_FREE(g.history);
+ STBI_FREE(g.background);
+
+ // do the final conversion after loading everything;
+ if (req_comp && req_comp != 4)
+ out = stbi__convert_format(out, 4, req_comp, layers * g.w, g.h);
+
+ *z = layers;
+ return out;
+ } else {
+ return stbi__errpuc("not GIF", "Image was not as a gif type.");
+ }
+}
+
+static void *stbi__gif_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *u = 0;
+ stbi__gif g;
+ memset(&g, 0, sizeof(g));
+
+ u = stbi__gif_load_next(s, &g, comp, req_comp, 0);
+ if (u == (stbi_uc *) s) u = 0; // end of animated gif marker
+ if (u) {
+ *x = g.w;
+ *y = g.h;
+
+ // moved conversion to after successful load so that the same
+ // can be done for multiple frames.
+ if (req_comp && req_comp != 4)
+ u = stbi__convert_format(u, 4, req_comp, g.w, g.h);
+ }
+
+ // free buffers needed for multiple frame loading;
+ STBI_FREE(g.history);
+ STBI_FREE(g.background);
+
+ return u;
+}
+
+static int stbi__gif_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ return stbi__gif_info_raw(s,x,y,comp);
+}
+#endif
+
+// *************************************************************************************************
+// Radiance RGBE HDR loader
+// originally by Nicolas Schulz
+#ifndef STBI_NO_HDR
+static int stbi__hdr_test_core(stbi__context *s, const char *signature)
+{
+ int i;
+ for (i=0; signature[i]; ++i)
+ if (stbi__get8(s) != signature[i])
+ return 0;
+ stbi__rewind(s);
+ return 1;
+}
+
+static int stbi__hdr_test(stbi__context* s)
+{
+ int r = stbi__hdr_test_core(s, "#?RADIANCE\n");
+ stbi__rewind(s);
+ if(!r) {
+ r = stbi__hdr_test_core(s, "#?RGBE\n");
+ stbi__rewind(s);
+ }
+ return r;
+}
+
+#define STBI__HDR_BUFLEN 1024
+static char *stbi__hdr_gettoken(stbi__context *z, char *buffer)
+{
+ int len=0;
+ char c = '\0';
+
+ c = (char) stbi__get8(z);
+
+ while (!stbi__at_eof(z) && c != '\n') {
+ buffer[len++] = c;
+ if (len == STBI__HDR_BUFLEN-1) {
+ // flush to end of line
+ while (!stbi__at_eof(z) && stbi__get8(z) != '\n')
+ ;
+ break;
+ }
+ c = (char) stbi__get8(z);
+ }
+
+ buffer[len] = 0;
+ return buffer;
+}
+
+static void stbi__hdr_convert(float *output, stbi_uc *input, int req_comp)
+{
+ if ( input[3] != 0 ) {
+ float f1;
+ // Exponent
+ f1 = (float) ldexp(1.0f, input[3] - (int)(128 + 8));
+ if (req_comp <= 2)
+ output[0] = (input[0] + input[1] + input[2]) * f1 / 3;
+ else {
+ output[0] = input[0] * f1;
+ output[1] = input[1] * f1;
+ output[2] = input[2] * f1;
+ }
+ if (req_comp == 2) output[1] = 1;
+ if (req_comp == 4) output[3] = 1;
+ } else {
+ switch (req_comp) {
+ case 4: output[3] = 1; /* fallthrough */
+ case 3: output[0] = output[1] = output[2] = 0;
+ break;
+ case 2: output[1] = 1; /* fallthrough */
+ case 1: output[0] = 0;
+ break;
+ }
+ }
+}
+
+static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ char buffer[STBI__HDR_BUFLEN];
+ char *token;
+ int valid = 0;
+ int width, height;
+ stbi_uc *scanline;
+ float *hdr_data;
+ int len;
+ unsigned char count, value;
+ int i, j, k, c1,c2, z;
+ const char *headerToken;
+ STBI_NOTUSED(ri);
+
+ // Check identifier
+ headerToken = stbi__hdr_gettoken(s,buffer);
+ if (strcmp(headerToken, "#?RADIANCE") != 0 && strcmp(headerToken, "#?RGBE") != 0)
+ return stbi__errpf("not HDR", "Corrupt HDR image");
+
+ // Parse header
+ for(;;) {
+ token = stbi__hdr_gettoken(s,buffer);
+ if (token[0] == 0) break;
+ if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
+ }
+
+ if (!valid) return stbi__errpf("unsupported format", "Unsupported HDR format");
+
+ // Parse width and height
+ // can't use sscanf() if we're not using stdio!
+ token = stbi__hdr_gettoken(s,buffer);
+ if (strncmp(token, "-Y ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
+ token += 3;
+ height = (int) strtol(token, &token, 10);
+ while (*token == ' ') ++token;
+ if (strncmp(token, "+X ", 3)) return stbi__errpf("unsupported data layout", "Unsupported HDR format");
+ token += 3;
+ width = (int) strtol(token, NULL, 10);
+
+ *x = width;
+ *y = height;
+
+ if (comp) *comp = 3;
+ if (req_comp == 0) req_comp = 3;
+
+ if (!stbi__mad4sizes_valid(width, height, req_comp, sizeof(float), 0))
+ return stbi__errpf("too large", "HDR image is too large");
+
+ // Read data
+ hdr_data = (float *) stbi__malloc_mad4(width, height, req_comp, sizeof(float), 0);
+ if (!hdr_data)
+ return stbi__errpf("outofmem", "Out of memory");
+
+ // Load image data
+ // image data is stored as some number of sca
+ if ( width < 8 || width >= 32768) {
+ // Read flat data
+ for (j=0; j < height; ++j) {
+ for (i=0; i < width; ++i) {
+ stbi_uc rgbe[4];
+ main_decode_loop:
+ stbi__getn(s, rgbe, 4);
+ stbi__hdr_convert(hdr_data + j * width * req_comp + i * req_comp, rgbe, req_comp);
+ }
+ }
+ } else {
+ // Read RLE-encoded data
+ scanline = NULL;
+
+ for (j = 0; j < height; ++j) {
+ c1 = stbi__get8(s);
+ c2 = stbi__get8(s);
+ len = stbi__get8(s);
+ if (c1 != 2 || c2 != 2 || (len & 0x80)) {
+ // not run-length encoded, so we have to actually use THIS data as a decoded
+ // pixel (note this can't be a valid pixel--one of RGB must be >= 128)
+ stbi_uc rgbe[4];
+ rgbe[0] = (stbi_uc) c1;
+ rgbe[1] = (stbi_uc) c2;
+ rgbe[2] = (stbi_uc) len;
+ rgbe[3] = (stbi_uc) stbi__get8(s);
+ stbi__hdr_convert(hdr_data, rgbe, req_comp);
+ i = 1;
+ j = 0;
+ STBI_FREE(scanline);
+ goto main_decode_loop; // yes, this makes no sense
+ }
+ len <<= 8;
+ len |= stbi__get8(s);
+ if (len != width) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("invalid decoded scanline length", "corrupt HDR"); }
+ if (scanline == NULL) {
+ scanline = (stbi_uc *) stbi__malloc_mad2(width, 4, 0);
+ if (!scanline) {
+ STBI_FREE(hdr_data);
+ return stbi__errpf("outofmem", "Out of memory");
+ }
+ }
+
+ for (k = 0; k < 4; ++k) {
+ int nleft;
+ i = 0;
+ while ((nleft = width - i) > 0) {
+ count = stbi__get8(s);
+ if (count > 128) {
+ // Run
+ value = stbi__get8(s);
+ count -= 128;
+ if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
+ for (z = 0; z < count; ++z)
+ scanline[i++ * 4 + k] = value;
+ } else {
+ // Dump
+ if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); }
+ for (z = 0; z < count; ++z)
+ scanline[i++ * 4 + k] = stbi__get8(s);
+ }
+ }
+ }
+ for (i=0; i < width; ++i)
+ stbi__hdr_convert(hdr_data+(j*width + i)*req_comp, scanline + i*4, req_comp);
+ }
+ if (scanline)
+ STBI_FREE(scanline);
+ }
+
+ return hdr_data;
+}
+
+static int stbi__hdr_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ char buffer[STBI__HDR_BUFLEN];
+ char *token;
+ int valid = 0;
+ int dummy;
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ if (stbi__hdr_test(s) == 0) {
+ stbi__rewind( s );
+ return 0;
+ }
+
+ for(;;) {
+ token = stbi__hdr_gettoken(s,buffer);
+ if (token[0] == 0) break;
+ if (strcmp(token, "FORMAT=32-bit_rle_rgbe") == 0) valid = 1;
+ }
+
+ if (!valid) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token = stbi__hdr_gettoken(s,buffer);
+ if (strncmp(token, "-Y ", 3)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token += 3;
+ *y = (int) strtol(token, &token, 10);
+ while (*token == ' ') ++token;
+ if (strncmp(token, "+X ", 3)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ token += 3;
+ *x = (int) strtol(token, NULL, 10);
+ *comp = 3;
+ return 1;
+}
+#endif // STBI_NO_HDR
+
+#ifndef STBI_NO_BMP
+static int stbi__bmp_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ void *p;
+ stbi__bmp_data info;
+
+ info.all_a = 255;
+ p = stbi__bmp_parse_header(s, &info);
+ stbi__rewind( s );
+ if (p == NULL)
+ return 0;
+ if (x) *x = s->img_x;
+ if (y) *y = s->img_y;
+ if (comp) *comp = info.ma ? 4 : 3;
+ return 1;
+}
+#endif
+
+#ifndef STBI_NO_PSD
+static int stbi__psd_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int channelCount, dummy, depth;
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+ if (stbi__get32be(s) != 0x38425053) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 1) {
+ stbi__rewind( s );
+ return 0;
+ }
+ stbi__skip(s, 6);
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ *y = stbi__get32be(s);
+ *x = stbi__get32be(s);
+ depth = stbi__get16be(s);
+ if (depth != 8 && depth != 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 3) {
+ stbi__rewind( s );
+ return 0;
+ }
+ *comp = 4;
+ return 1;
+}
+
+static int stbi__psd_is16(stbi__context *s)
+{
+ int channelCount, depth;
+ if (stbi__get32be(s) != 0x38425053) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (stbi__get16be(s) != 1) {
+ stbi__rewind( s );
+ return 0;
+ }
+ stbi__skip(s, 6);
+ channelCount = stbi__get16be(s);
+ if (channelCount < 0 || channelCount > 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ (void) stbi__get32be(s);
+ (void) stbi__get32be(s);
+ depth = stbi__get16be(s);
+ if (depth != 16) {
+ stbi__rewind( s );
+ return 0;
+ }
+ return 1;
+}
+#endif
+
+#ifndef STBI_NO_PIC
+static int stbi__pic_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int act_comp=0,num_packets=0,chained,dummy;
+ stbi__pic_packet packets[10];
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ if (!stbi__pic_is4(s,"\x53\x80\xF6\x34")) {
+ stbi__rewind(s);
+ return 0;
+ }
+
+ stbi__skip(s, 88);
+
+ *x = stbi__get16be(s);
+ *y = stbi__get16be(s);
+ if (stbi__at_eof(s)) {
+ stbi__rewind( s);
+ return 0;
+ }
+ if ( (*x) != 0 && (1 << 28) / (*x) < (*y)) {
+ stbi__rewind( s );
+ return 0;
+ }
+
+ stbi__skip(s, 8);
+
+ do {
+ stbi__pic_packet *packet;
+
+ if (num_packets==sizeof(packets)/sizeof(packets[0]))
+ return 0;
+
+ packet = &packets[num_packets++];
+ chained = stbi__get8(s);
+ packet->size = stbi__get8(s);
+ packet->type = stbi__get8(s);
+ packet->channel = stbi__get8(s);
+ act_comp |= packet->channel;
+
+ if (stbi__at_eof(s)) {
+ stbi__rewind( s );
+ return 0;
+ }
+ if (packet->size != 8) {
+ stbi__rewind( s );
+ return 0;
+ }
+ } while (chained);
+
+ *comp = (act_comp & 0x10 ? 4 : 3);
+
+ return 1;
+}
+#endif
+
+// *************************************************************************************************
+// Portable Gray Map and Portable Pixel Map loader
+// by Ken Miller
+//
+// PGM: http://netpbm.sourceforge.net/doc/pgm.html
+// PPM: http://netpbm.sourceforge.net/doc/ppm.html
+//
+// Known limitations:
+// Does not support comments in the header section
+// Does not support ASCII image data (formats P2 and P3)
+// Does not support 16-bit-per-channel
+
+#ifndef STBI_NO_PNM
+
+static int stbi__pnm_test(stbi__context *s)
+{
+ char p, t;
+ p = (char) stbi__get8(s);
+ t = (char) stbi__get8(s);
+ if (p != 'P' || (t != '5' && t != '6')) {
+ stbi__rewind( s );
+ return 0;
+ }
+ return 1;
+}
+
+static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req_comp, stbi__result_info *ri)
+{
+ stbi_uc *out;
+ STBI_NOTUSED(ri);
+
+ if (!stbi__pnm_info(s, (int *)&s->img_x, (int *)&s->img_y, (int *)&s->img_n))
+ return 0;
+
+ *x = s->img_x;
+ *y = s->img_y;
+ if (comp) *comp = s->img_n;
+
+ if (!stbi__mad3sizes_valid(s->img_n, s->img_x, s->img_y, 0))
+ return stbi__errpuc("too large", "PNM too large");
+
+ out = (stbi_uc *) stbi__malloc_mad3(s->img_n, s->img_x, s->img_y, 0);
+ if (!out) return stbi__errpuc("outofmem", "Out of memory");
+ stbi__getn(s, out, s->img_n * s->img_x * s->img_y);
+
+ if (req_comp && req_comp != s->img_n) {
+ out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y);
+ if (out == NULL) return out; // stbi__convert_format frees input on failure
+ }
+ return out;
+}
+
+static int stbi__pnm_isspace(char c)
+{
+ return c == ' ' || c == '\t' || c == '\n' || c == '\v' || c == '\f' || c == '\r';
+}
+
+static void stbi__pnm_skip_whitespace(stbi__context *s, char *c)
+{
+ for (;;) {
+ while (!stbi__at_eof(s) && stbi__pnm_isspace(*c))
+ *c = (char) stbi__get8(s);
+
+ if (stbi__at_eof(s) || *c != '#')
+ break;
+
+ while (!stbi__at_eof(s) && *c != '\n' && *c != '\r' )
+ *c = (char) stbi__get8(s);
+ }
+}
+
+static int stbi__pnm_isdigit(char c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static int stbi__pnm_getinteger(stbi__context *s, char *c)
+{
+ int value = 0;
+
+ while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) {
+ value = value*10 + (*c - '0');
+ *c = (char) stbi__get8(s);
+ }
+
+ return value;
+}
+
+static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp)
+{
+ int maxv, dummy;
+ char c, p, t;
+
+ if (!x) x = &dummy;
+ if (!y) y = &dummy;
+ if (!comp) comp = &dummy;
+
+ stbi__rewind(s);
+
+ // Get identifier
+ p = (char) stbi__get8(s);
+ t = (char) stbi__get8(s);
+ if (p != 'P' || (t != '5' && t != '6')) {
+ stbi__rewind(s);
+ return 0;
+ }
+
+ *comp = (t == '6') ? 3 : 1; // '5' is 1-component .pgm; '6' is 3-component .ppm
+
+ c = (char) stbi__get8(s);
+ stbi__pnm_skip_whitespace(s, &c);
+
+ *x = stbi__pnm_getinteger(s, &c); // read width
+ stbi__pnm_skip_whitespace(s, &c);
+
+ *y = stbi__pnm_getinteger(s, &c); // read height
+ stbi__pnm_skip_whitespace(s, &c);
+
+ maxv = stbi__pnm_getinteger(s, &c); // read max value
+
+ if (maxv > 255)
+ return stbi__err("max value > 255", "PPM image not 8-bit");
+ else
+ return 1;
+}
+#endif
+
+static int stbi__info_main(stbi__context *s, int *x, int *y, int *comp)
+{
+ #ifndef STBI_NO_JPEG
+ if (stbi__jpeg_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PNG
+ if (stbi__png_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_GIF
+ if (stbi__gif_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_BMP
+ if (stbi__bmp_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PIC
+ if (stbi__pic_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PNM
+ if (stbi__pnm_info(s, x, y, comp)) return 1;
+ #endif
+
+ #ifndef STBI_NO_HDR
+ if (stbi__hdr_info(s, x, y, comp)) return 1;
+ #endif
+
+ // test tga last because it's a crappy test!
+ #ifndef STBI_NO_TGA
+ if (stbi__tga_info(s, x, y, comp))
+ return 1;
+ #endif
+ return stbi__err("unknown image type", "Image not of any known type, or corrupt");
+}
+
+static int stbi__is_16_main(stbi__context *s)
+{
+ #ifndef STBI_NO_PNG
+ if (stbi__png_is16(s)) return 1;
+ #endif
+
+ #ifndef STBI_NO_PSD
+ if (stbi__psd_is16(s)) return 1;
+ #endif
+
+ return 0;
+}
+
+#ifndef STBI_NO_STDIO
+STBIDEF int stbi_info(char const *filename, int *x, int *y, int *comp)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result;
+ if (!f) return stbi__err("can't fopen", "Unable to open file");
+ result = stbi_info_from_file(f, x, y, comp);
+ fclose(f);
+ return result;
+}
+
+STBIDEF int stbi_info_from_file(FILE *f, int *x, int *y, int *comp)
+{
+ int r;
+ stbi__context s;
+ long pos = ftell(f);
+ stbi__start_file(&s, f);
+ r = stbi__info_main(&s,x,y,comp);
+ fseek(f,pos,SEEK_SET);
+ return r;
+}
+
+STBIDEF int stbi_is_16_bit(char const *filename)
+{
+ FILE *f = stbi__fopen(filename, "rb");
+ int result;
+ if (!f) return stbi__err("can't fopen", "Unable to open file");
+ result = stbi_is_16_bit_from_file(f);
+ fclose(f);
+ return result;
+}
+
+STBIDEF int stbi_is_16_bit_from_file(FILE *f)
+{
+ int r;
+ stbi__context s;
+ long pos = ftell(f);
+ stbi__start_file(&s, f);
+ r = stbi__is_16_main(&s);
+ fseek(f,pos,SEEK_SET);
+ return r;
+}
+#endif // !STBI_NO_STDIO
+
+STBIDEF int stbi_info_from_memory(stbi_uc const *buffer, int len, int *x, int *y, int *comp)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__info_main(&s,x,y,comp);
+}
+
+STBIDEF int stbi_info_from_callbacks(stbi_io_callbacks const *c, void *user, int *x, int *y, int *comp)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
+ return stbi__info_main(&s,x,y,comp);
+}
+
+STBIDEF int stbi_is_16_bit_from_memory(stbi_uc const *buffer, int len)
+{
+ stbi__context s;
+ stbi__start_mem(&s,buffer,len);
+ return stbi__is_16_main(&s);
+}
+
+STBIDEF int stbi_is_16_bit_from_callbacks(stbi_io_callbacks const *c, void *user)
+{
+ stbi__context s;
+ stbi__start_callbacks(&s, (stbi_io_callbacks *) c, user);
+ return stbi__is_16_main(&s);
+}
+
+#endif // STB_IMAGE_IMPLEMENTATION
+
+/*
+ revision history:
+ 2.19 (2018-02-11) fix warning
+ 2.18 (2018-01-30) fix warnings
+ 2.17 (2018-01-29) change sbti__shiftsigned to avoid clang -O2 bug
+ 1-bit BMP
+ *_is_16_bit api
+ avoid warnings
+ 2.16 (2017-07-23) all functions have 16-bit variants;
+ STBI_NO_STDIO works again;
+ compilation fixes;
+ fix rounding in unpremultiply;
+ optimize vertical flip;
+ disable raw_len validation;
+ documentation fixes
+ 2.15 (2017-03-18) fix png-1,2,4 bug; now all Imagenet JPGs decode;
+ warning fixes; disable run-time SSE detection on gcc;
+ uniform handling of optional "return" values;
+ thread-safe initialization of zlib tables
+ 2.14 (2017-03-03) remove deprecated STBI_JPEG_OLD; fixes for Imagenet JPGs
+ 2.13 (2016-11-29) add 16-bit API, only supported for PNG right now
+ 2.12 (2016-04-02) fix typo in 2.11 PSD fix that caused crashes
+ 2.11 (2016-04-02) allocate large structures on the stack
+ remove white matting for transparent PSD
+ fix reported channel count for PNG & BMP
+ re-enable SSE2 in non-gcc 64-bit
+ support RGB-formatted JPEG
+ read 16-bit PNGs (only as 8-bit)
+ 2.10 (2016-01-22) avoid warning introduced in 2.09 by STBI_REALLOC_SIZED
+ 2.09 (2016-01-16) allow comments in PNM files
+ 16-bit-per-pixel TGA (not bit-per-component)
+ info() for TGA could break due to .hdr handling
+ info() for BMP to shares code instead of sloppy parse
+ can use STBI_REALLOC_SIZED if allocator doesn't support realloc
+ code cleanup
+ 2.08 (2015-09-13) fix to 2.07 cleanup, reading RGB PSD as RGBA
+ 2.07 (2015-09-13) fix compiler warnings
+ partial animated GIF support
+ limited 16-bpc PSD support
+ #ifdef unused functions
+ bug with < 92 byte PIC,PNM,HDR,TGA
+ 2.06 (2015-04-19) fix bug where PSD returns wrong '*comp' value
+ 2.05 (2015-04-19) fix bug in progressive JPEG handling, fix warning
+ 2.04 (2015-04-15) try to re-enable SIMD on MinGW 64-bit
+ 2.03 (2015-04-12) extra corruption checking (mmozeiko)
+ stbi_set_flip_vertically_on_load (nguillemot)
+ fix NEON support; fix mingw support
+ 2.02 (2015-01-19) fix incorrect assert, fix warning
+ 2.01 (2015-01-17) fix various warnings; suppress SIMD on gcc 32-bit without -msse2
+ 2.00b (2014-12-25) fix STBI_MALLOC in progressive JPEG
+ 2.00 (2014-12-25) optimize JPG, including x86 SSE2 & NEON SIMD (ryg)
+ progressive JPEG (stb)
+ PGM/PPM support (Ken Miller)
+ STBI_MALLOC,STBI_REALLOC,STBI_FREE
+ GIF bugfix -- seemingly never worked
+ STBI_NO_*, STBI_ONLY_*
+ 1.48 (2014-12-14) fix incorrectly-named assert()
+ 1.47 (2014-12-14) 1/2/4-bit PNG support, both direct and paletted (Omar Cornut & stb)
+ optimize PNG (ryg)
+ fix bug in interlaced PNG with user-specified channel count (stb)
+ 1.46 (2014-08-26)
+ fix broken tRNS chunk (colorkey-style transparency) in non-paletted PNG
+ 1.45 (2014-08-16)
+ fix MSVC-ARM internal compiler error by wrapping malloc
+ 1.44 (2014-08-07)
+ various warning fixes from Ronny Chevalier
+ 1.43 (2014-07-15)
+ fix MSVC-only compiler problem in code changed in 1.42
+ 1.42 (2014-07-09)
+ don't define _CRT_SECURE_NO_WARNINGS (affects user code)
+ fixes to stbi__cleanup_jpeg path
+ added STBI_ASSERT to avoid requiring assert.h
+ 1.41 (2014-06-25)
+ fix search&replace from 1.36 that messed up comments/error messages
+ 1.40 (2014-06-22)
+ fix gcc struct-initialization warning
+ 1.39 (2014-06-15)
+ fix to TGA optimization when req_comp != number of components in TGA;
+ fix to GIF loading because BMP wasn't rewinding (whoops, no GIFs in my test suite)
+ add support for BMP version 5 (more ignored fields)
+ 1.38 (2014-06-06)
+ suppress MSVC warnings on integer casts truncating values
+ fix accidental rename of 'skip' field of I/O
+ 1.37 (2014-06-04)
+ remove duplicate typedef
+ 1.36 (2014-06-03)
+ convert to header file single-file library
+ if de-iphone isn't set, load iphone images color-swapped instead of returning NULL
+ 1.35 (2014-05-27)
+ various warnings
+ fix broken STBI_SIMD path
+ fix bug where stbi_load_from_file no longer left file pointer in correct place
+ fix broken non-easy path for 32-bit BMP (possibly never used)
+ TGA optimization by Arseny Kapoulkine
+ 1.34 (unknown)
+ use STBI_NOTUSED in stbi__resample_row_generic(), fix one more leak in tga failure case
+ 1.33 (2011-07-14)
+ make stbi_is_hdr work in STBI_NO_HDR (as specified), minor compiler-friendly improvements
+ 1.32 (2011-07-13)
+ support for "info" function for all supported filetypes (SpartanJ)
+ 1.31 (2011-06-20)
+ a few more leak fixes, bug in PNG handling (SpartanJ)
+ 1.30 (2011-06-11)
+ added ability to load files via callbacks to accomidate custom input streams (Ben Wenger)
+ removed deprecated format-specific test/load functions
+ removed support for installable file formats (stbi_loader) -- would have been broken for IO callbacks anyway
+ error cases in bmp and tga give messages and don't leak (Raymond Barbiero, grisha)
+ fix inefficiency in decoding 32-bit BMP (David Woo)
+ 1.29 (2010-08-16)
+ various warning fixes from Aurelien Pocheville
+ 1.28 (2010-08-01)
+ fix bug in GIF palette transparency (SpartanJ)
+ 1.27 (2010-08-01)
+ cast-to-stbi_uc to fix warnings
+ 1.26 (2010-07-24)
+ fix bug in file buffering for PNG reported by SpartanJ
+ 1.25 (2010-07-17)
+ refix trans_data warning (Won Chun)
+ 1.24 (2010-07-12)
+ perf improvements reading from files on platforms with lock-heavy fgetc()
+ minor perf improvements for jpeg
+ deprecated type-specific functions so we'll get feedback if they're needed
+ attempt to fix trans_data warning (Won Chun)
+ 1.23 fixed bug in iPhone support
+ 1.22 (2010-07-10)
+ removed image *writing* support
+ stbi_info support from Jetro Lauha
+ GIF support from Jean-Marc Lienher
+ iPhone PNG-extensions from James Brown
+ warning-fixes from Nicolas Schulz and Janez Zemva (i.stbi__err. Janez (U+017D)emva)
+ 1.21 fix use of 'stbi_uc' in header (reported by jon blow)
+ 1.20 added support for Softimage PIC, by Tom Seddon
+ 1.19 bug in interlaced PNG corruption check (found by ryg)
+ 1.18 (2008-08-02)
+ fix a threading bug (local mutable static)
+ 1.17 support interlaced PNG
+ 1.16 major bugfix - stbi__convert_format converted one too many pixels
+ 1.15 initialize some fields for thread safety
+ 1.14 fix threadsafe conversion bug
+ header-file-only version (#define STBI_HEADER_FILE_ONLY before including)
+ 1.13 threadsafe
+ 1.12 const qualifiers in the API
+ 1.11 Support installable IDCT, colorspace conversion routines
+ 1.10 Fixes for 64-bit (don't use "unsigned long")
+ optimized upsampling by Fabian "ryg" Giesen
+ 1.09 Fix format-conversion for PSD code (bad global variables!)
+ 1.08 Thatcher Ulrich's PSD code integrated by Nicolas Schulz
+ 1.07 attempt to fix C++ warning/errors again
+ 1.06 attempt to fix C++ warning/errors again
+ 1.05 fix TGA loading to return correct *comp and use good luminance calc
+ 1.04 default float alpha is 1, not 255; use 'void *' for stbi_image_free
+ 1.03 bugfixes to STBI_NO_STDIO, STBI_NO_HDR
+ 1.02 support for (subset of) HDR files, float interface for preferred access to them
+ 1.01 fix bug: possible bug in handling right-side up bmps... not sure
+ fix bug: the stbi__bmp_load() and stbi__tga_load() functions didn't work at all
+ 1.00 interface to zlib that skips zlib header
+ 0.99 correct handling of alpha in palette
+ 0.98 TGA loader by lonesock; dynamically add loaders (untested)
+ 0.97 jpeg errors on too large a file; also catch another malloc failure
+ 0.96 fix detection of invalid v value - particleman@mollyrocket forum
+ 0.95 during header scan, seek to markers in case of padding
+ 0.94 STBI_NO_STDIO to disable stdio usage; rename all #defines the same
+ 0.93 handle jpegtran output; verbose errors
+ 0.92 read 4,8,16,24,32-bit BMP files of several formats
+ 0.91 output 24-bit Windows 3.0 BMP files
+ 0.90 fix a few more warnings; bump version number to approach 1.0
+ 0.61 bugfixes due to Marc LeBlanc, Christopher Lloyd
+ 0.60 fix compiling as c++
+ 0.59 fix warnings: merge Dave Moore's -Wall fixes
+ 0.58 fix bug: zlib uncompressed mode len/nlen was wrong endian
+ 0.57 fix bug: jpg last huffman symbol before marker was >9 bits but less than 16 available
+ 0.56 fix bug: zlib uncompressed mode len vs. nlen
+ 0.55 fix bug: restart_interval not initialized to 0
+ 0.54 allow NULL for 'int *comp'
+ 0.53 fix bug in png 3->4; speedup png decoding
+ 0.52 png handles req_comp=3,4 directly; minor cleanup; jpeg comments
+ 0.51 obey req_comp requests, 1-component jpegs return as 1-component,
+ on 'test' only check type, not whether we support this variant
+ 0.50 (2006-11-19)
+ first released version
+*/
+
+
+/*
+------------------------------------------------------------------------------
+This software is available under 2 licenses -- choose whichever you prefer.
+------------------------------------------------------------------------------
+ALTERNATIVE A - MIT License
+Copyright (c) 2017 Sean Barrett
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+------------------------------------------------------------------------------
+ALTERNATIVE B - Public Domain (www.unlicense.org)
+This is free and unencumbered software released into the public domain.
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
+commercial or non-commercial, and by any means.
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
+this software under copyright law.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+------------------------------------------------------------------------------
+*/
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image_write.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image_write.h
new file mode 100644
index 00000000000..c05e95812b9
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/stb_image_write.h
@@ -0,0 +1,1568 @@
+/* stb_image_write - v1.09 - public domain - http://nothings.org/stb/stb_image_write.h
+ writes out PNG/BMP/TGA/JPEG/HDR images to C stdio - Sean Barrett 2010-2015
+ no warranty implied; use at your own risk
+
+ Before #including,
+
+ #define STB_IMAGE_WRITE_IMPLEMENTATION
+
+ in the file that you want to have the implementation.
+
+ Will probably not work correctly with strict-aliasing optimizations.
+
+ If using a modern Microsoft Compiler, non-safe versions of CRT calls may cause
+ compilation warnings or even errors. To avoid this, also before #including,
+
+ #define STBI_MSC_SECURE_CRT
+
+ABOUT:
+
+ This header file is a library for writing images to C stdio. It could be
+ adapted to write to memory or a general streaming interface; let me know.
+
+ The PNG output is not optimal; it is 20-50% larger than the file
+ written by a decent optimizing implementation; though providing a custom
+ zlib compress function (see STBIW_ZLIB_COMPRESS) can mitigate that.
+ This library is designed for source code compactness and simplicity,
+ not optimal image file size or run-time performance.
+
+BUILDING:
+
+ You can #define STBIW_ASSERT(x) before the #include to avoid using assert.h.
+ You can #define STBIW_MALLOC(), STBIW_REALLOC(), and STBIW_FREE() to replace
+ malloc,realloc,free.
+ You can #define STBIW_MEMMOVE() to replace memmove()
+ You can #define STBIW_ZLIB_COMPRESS to use a custom zlib-style compress function
+ for PNG compression (instead of the builtin one), it must have the following signature:
+ unsigned char * my_compress(unsigned char *data, int data_len, int *out_len, int quality);
+ The returned data will be freed with STBIW_FREE() (free() by default),
+ so it must be heap allocated with STBIW_MALLOC() (malloc() by default),
+
+USAGE:
+
+ There are five functions, one for each image file format:
+
+ int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes);
+ int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data);
+ int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data);
+ int stbi_write_jpg(char const *filename, int w, int h, int comp, const void *data, int quality);
+ int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data);
+
+ void stbi_flip_vertically_on_write(int flag); // flag is non-zero to flip data vertically
+
+ There are also five equivalent functions that use an arbitrary write function. You are
+ expected to open/close your file-equivalent before and after calling these:
+
+ int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes);
+ int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+ int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+ int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data);
+ int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality);
+
+ where the callback is:
+ void stbi_write_func(void *context, void *data, int size);
+
+ You can configure it with these global variables:
+ int stbi_write_tga_with_rle; // defaults to true; set to 0 to disable RLE
+ int stbi_write_png_compression_level; // defaults to 8; set to higher for more compression
+ int stbi_write_force_png_filter; // defaults to -1; set to 0..5 to force a filter mode
+
+
+ You can define STBI_WRITE_NO_STDIO to disable the file variant of these
+ functions, so the library will not use stdio.h at all. However, this will
+ also disable HDR writing, because it requires stdio for formatted output.
+
+ Each function returns 0 on failure and non-0 on success.
+
+ The functions create an image file defined by the parameters. The image
+ is a rectangle of pixels stored from left-to-right, top-to-bottom.
+ Each pixel contains 'comp' channels of data stored interleaved with 8-bits
+ per channel, in the following order: 1=Y, 2=YA, 3=RGB, 4=RGBA. (Y is
+ monochrome color.) The rectangle is 'w' pixels wide and 'h' pixels tall.
+ The *data pointer points to the first byte of the top-left-most pixel.
+ For PNG, "stride_in_bytes" is the distance in bytes from the first byte of
+ a row of pixels to the first byte of the next row of pixels.
+
+ PNG creates output files with the same number of components as the input.
+ The BMP format expands Y to RGB in the file format and does not
+ output alpha.
+
+ PNG supports writing rectangles of data even when the bytes storing rows of
+ data are not consecutive in memory (e.g. sub-rectangles of a larger image),
+ by supplying the stride between the beginning of adjacent rows. The other
+ formats do not. (Thus you cannot write a native-format BMP through the BMP
+ writer, both because it is in BGR order and because it may have padding
+ at the end of the line.)
+
+ PNG allows you to set the deflate compression level by setting the global
+ variable 'stbi_write_png_compression_level' (it defaults to 8).
+
+ HDR expects linear float data. Since the format is always 32-bit rgb(e)
+ data, alpha (if provided) is discarded, and for monochrome data it is
+ replicated across all three channels.
+
+ TGA supports RLE or non-RLE compressed data. To use non-RLE-compressed
+ data, set the global variable 'stbi_write_tga_with_rle' to 0.
+
+ JPEG does ignore alpha channels in input data; quality is between 1 and 100.
+ Higher quality looks better but results in a bigger image.
+ JPEG baseline (no JPEG progressive).
+
+CREDITS:
+
+
+ Sean Barrett - PNG/BMP/TGA
+ Baldur Karlsson - HDR
+ Jean-Sebastien Guay - TGA monochrome
+ Tim Kelsey - misc enhancements
+ Alan Hickman - TGA RLE
+ Emmanuel Julien - initial file IO callback implementation
+ Jon Olick - original jo_jpeg.cpp code
+ Daniel Gibson - integrate JPEG, allow external zlib
+ Aarni Koskela - allow choosing PNG filter
+
+ bugfixes:
+ github:Chribba
+ Guillaume Chereau
+ github:jry2
+ github:romigrou
+ Sergio Gonzalez
+ Jonas Karlsson
+ Filip Wasil
+ Thatcher Ulrich
+ github:poppolopoppo
+ Patrick Boettcher
+ github:xeekworx
+ Cap Petschulat
+ Simon Rodriguez
+ Ivan Tikhonov
+ github:ignotion
+ Adam Schackart
+
+LICENSE
+
+ See end of file for license information.
+
+*/
+
+#ifndef INCLUDE_STB_IMAGE_WRITE_H
+#define INCLUDE_STB_IMAGE_WRITE_H
+
+// if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline'
+#ifndef STBIWDEF
+#ifdef STB_IMAGE_WRITE_STATIC
+#define STBIWDEF static
+#else
+#ifdef __cplusplus
+#define STBIWDEF extern "C"
+#else
+#define STBIWDEF extern
+#endif
+#endif
+#endif
+
+#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations
+extern int stbi_write_tga_with_rle;
+extern int stbi_write_png_compression_level;
+extern int stbi_write_force_png_filter;
+#endif
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes);
+STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data);
+STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality);
+#endif
+
+typedef void stbi_write_func(void *context, void *data, int size);
+
+STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes);
+STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data);
+STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data);
+STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality);
+
+STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean);
+
+#endif//INCLUDE_STB_IMAGE_WRITE_H
+
+#ifdef STB_IMAGE_WRITE_IMPLEMENTATION
+
+#ifdef _WIN32
+ #ifndef _CRT_SECURE_NO_WARNINGS
+ #define _CRT_SECURE_NO_WARNINGS
+ #endif
+ #ifndef _CRT_NONSTDC_NO_DEPRECATE
+ #define _CRT_NONSTDC_NO_DEPRECATE
+ #endif
+#endif
+
+#ifndef STBI_WRITE_NO_STDIO
+#include
+#endif // STBI_WRITE_NO_STDIO
+
+#include
+#include
+#include
+#include
+
+#if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED))
+// ok
+#elif !defined(STBIW_MALLOC) && !defined(STBIW_FREE) && !defined(STBIW_REALLOC) && !defined(STBIW_REALLOC_SIZED)
+// ok
+#else
+#error "Must define all or none of STBIW_MALLOC, STBIW_FREE, and STBIW_REALLOC (or STBIW_REALLOC_SIZED)."
+#endif
+
+#ifndef STBIW_MALLOC
+#define STBIW_MALLOC(sz) malloc(sz)
+#define STBIW_REALLOC(p,newsz) realloc(p,newsz)
+#define STBIW_FREE(p) free(p)
+#endif
+
+#ifndef STBIW_REALLOC_SIZED
+#define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz)
+#endif
+
+
+#ifndef STBIW_MEMMOVE
+#define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz)
+#endif
+
+
+#ifndef STBIW_ASSERT
+#include
+#define STBIW_ASSERT(x) assert(x)
+#endif
+
+#define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff)
+
+#ifdef STB_IMAGE_WRITE_STATIC
+static int stbi__flip_vertically_on_write=0;
+static int stbi_write_png_compression_level = 8;
+static int stbi_write_tga_with_rle = 1;
+static int stbi_write_force_png_filter = -1;
+#else
+int stbi_write_png_compression_level = 8;
+int stbi__flip_vertically_on_write=0;
+int stbi_write_tga_with_rle = 1;
+int stbi_write_force_png_filter = -1;
+#endif
+
+STBIWDEF void stbi_flip_vertically_on_write(int flag)
+{
+ stbi__flip_vertically_on_write = flag;
+}
+
+typedef struct
+{
+ stbi_write_func *func;
+ void *context;
+} stbi__write_context;
+
+// initialize a callback-based context
+static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context)
+{
+ s->func = c;
+ s->context = context;
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+
+static void stbi__stdio_write(void *context, void *data, int size)
+{
+ fwrite(data,1,size,(FILE*) context);
+}
+
+static int stbi__start_write_file(stbi__write_context *s, const char *filename)
+{
+ FILE *f;
+#ifdef STBI_MSC_SECURE_CRT
+ if (fopen_s(&f, filename, "wb"))
+ f = NULL;
+#else
+ f = fopen(filename, "wb");
+#endif
+ stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f);
+ return f != NULL;
+}
+
+static void stbi__end_write_file(stbi__write_context *s)
+{
+ fclose((FILE *)s->context);
+}
+
+#endif // !STBI_WRITE_NO_STDIO
+
+typedef unsigned int stbiw_uint32;
+typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1];
+
+static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v)
+{
+ while (*fmt) {
+ switch (*fmt++) {
+ case ' ': break;
+ case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int));
+ s->func(s->context,&x,1);
+ break; }
+ case '2': { int x = va_arg(v,int);
+ unsigned char b[2];
+ b[0] = STBIW_UCHAR(x);
+ b[1] = STBIW_UCHAR(x>>8);
+ s->func(s->context,b,2);
+ break; }
+ case '4': { stbiw_uint32 x = va_arg(v,int);
+ unsigned char b[4];
+ b[0]=STBIW_UCHAR(x);
+ b[1]=STBIW_UCHAR(x>>8);
+ b[2]=STBIW_UCHAR(x>>16);
+ b[3]=STBIW_UCHAR(x>>24);
+ s->func(s->context,b,4);
+ break; }
+ default:
+ STBIW_ASSERT(0);
+ return;
+ }
+ }
+}
+
+static void stbiw__writef(stbi__write_context *s, const char *fmt, ...)
+{
+ va_list v;
+ va_start(v, fmt);
+ stbiw__writefv(s, fmt, v);
+ va_end(v);
+}
+
+static void stbiw__putc(stbi__write_context *s, unsigned char c)
+{
+ s->func(s->context, &c, 1);
+}
+
+static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c)
+{
+ unsigned char arr[3];
+ arr[0] = a, arr[1] = b, arr[2] = c;
+ s->func(s->context, arr, 3);
+}
+
+static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d)
+{
+ unsigned char bg[3] = { 255, 0, 255}, px[3];
+ int k;
+
+ if (write_alpha < 0)
+ s->func(s->context, &d[comp - 1], 1);
+
+ switch (comp) {
+ case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case
+ case 1:
+ if (expand_mono)
+ stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp
+ else
+ s->func(s->context, d, 1); // monochrome TGA
+ break;
+ case 4:
+ if (!write_alpha) {
+ // composite against pink background
+ for (k = 0; k < 3; ++k)
+ px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255;
+ stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]);
+ break;
+ }
+ /* FALLTHROUGH */
+ case 3:
+ stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]);
+ break;
+ }
+ if (write_alpha > 0)
+ s->func(s->context, &d[comp - 1], 1);
+}
+
+static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono)
+{
+ stbiw_uint32 zero = 0;
+ int i,j, j_end;
+
+ if (y <= 0)
+ return;
+
+ if (stbi__flip_vertically_on_write)
+ vdir *= -1;
+
+ if (vdir < 0)
+ j_end = -1, j = y-1;
+ else
+ j_end = y, j = 0;
+
+ for (; j != j_end; j += vdir) {
+ for (i=0; i < x; ++i) {
+ unsigned char *d = (unsigned char *) data + (j*x+i)*comp;
+ stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d);
+ }
+ s->func(s->context, &zero, scanline_pad);
+ }
+}
+
+static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...)
+{
+ if (y < 0 || x < 0) {
+ return 0;
+ } else {
+ va_list v;
+ va_start(v, fmt);
+ stbiw__writefv(s, fmt, v);
+ va_end(v);
+ stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono);
+ return 1;
+ }
+}
+
+static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data)
+{
+ int pad = (-x*3) & 3;
+ return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad,
+ "11 4 22 4" "4 44 22 444444",
+ 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header
+ 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header
+}
+
+STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s;
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_bmp_core(&s, x, y, comp, data);
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s;
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_bmp_core(&s, x, y, comp, data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif //!STBI_WRITE_NO_STDIO
+
+static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data)
+{
+ int has_alpha = (comp == 2 || comp == 4);
+ int colorbytes = has_alpha ? comp-1 : comp;
+ int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3
+
+ if (y < 0 || x < 0)
+ return 0;
+
+ if (!stbi_write_tga_with_rle) {
+ return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0,
+ "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8);
+ } else {
+ int i,j,k;
+ int jend, jdir;
+
+ stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8);
+
+ if (stbi__flip_vertically_on_write) {
+ j = 0;
+ jend = y;
+ jdir = 1;
+ } else {
+ j = y-1;
+ jend = -1;
+ jdir = -1;
+ }
+ for (; j != jend; j += jdir) {
+ unsigned char *row = (unsigned char *) data + j * x * comp;
+ int len;
+
+ for (i = 0; i < x; i += len) {
+ unsigned char *begin = row + i * comp;
+ int diff = 1;
+ len = 1;
+
+ if (i < x - 1) {
+ ++len;
+ diff = memcmp(begin, row + (i + 1) * comp, comp);
+ if (diff) {
+ const unsigned char *prev = begin;
+ for (k = i + 2; k < x && len < 128; ++k) {
+ if (memcmp(prev, row + k * comp, comp)) {
+ prev += comp;
+ ++len;
+ } else {
+ --len;
+ break;
+ }
+ }
+ } else {
+ for (k = i + 2; k < x && len < 128; ++k) {
+ if (!memcmp(begin, row + k * comp, comp)) {
+ ++len;
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ if (diff) {
+ unsigned char header = STBIW_UCHAR(len - 1);
+ s->func(s->context, &header, 1);
+ for (k = 0; k < len; ++k) {
+ stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp);
+ }
+ } else {
+ unsigned char header = STBIW_UCHAR(len - 129);
+ s->func(s->context, &header, 1);
+ stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin);
+ }
+ }
+ }
+ }
+ return 1;
+}
+
+STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s;
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_tga_core(&s, x, y, comp, (void *) data);
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data)
+{
+ stbi__write_context s;
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_tga_core(&s, x, y, comp, (void *) data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif
+
+// *************************************************************************************************
+// Radiance RGBE HDR writer
+// by Baldur Karlsson
+
+#define stbiw__max(a, b) ((a) > (b) ? (a) : (b))
+
+void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear)
+{
+ int exponent;
+ float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2]));
+
+ if (maxcomp < 1e-32f) {
+ rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0;
+ } else {
+ float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp;
+
+ rgbe[0] = (unsigned char)(linear[0] * normalize);
+ rgbe[1] = (unsigned char)(linear[1] * normalize);
+ rgbe[2] = (unsigned char)(linear[2] * normalize);
+ rgbe[3] = (unsigned char)(exponent + 128);
+ }
+}
+
+void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte)
+{
+ unsigned char lengthbyte = STBIW_UCHAR(length+128);
+ STBIW_ASSERT(length+128 <= 255);
+ s->func(s->context, &lengthbyte, 1);
+ s->func(s->context, &databyte, 1);
+}
+
+void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data)
+{
+ unsigned char lengthbyte = STBIW_UCHAR(length);
+ STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code
+ s->func(s->context, &lengthbyte, 1);
+ s->func(s->context, data, length);
+}
+
+void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline)
+{
+ unsigned char scanlineheader[4] = { 2, 2, 0, 0 };
+ unsigned char rgbe[4];
+ float linear[3];
+ int x;
+
+ scanlineheader[2] = (width&0xff00)>>8;
+ scanlineheader[3] = (width&0x00ff);
+
+ /* skip RLE for images too small or large */
+ if (width < 8 || width >= 32768) {
+ for (x=0; x < width; x++) {
+ switch (ncomp) {
+ case 4: /* fallthrough */
+ case 3: linear[2] = scanline[x*ncomp + 2];
+ linear[1] = scanline[x*ncomp + 1];
+ linear[0] = scanline[x*ncomp + 0];
+ break;
+ default:
+ linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0];
+ break;
+ }
+ stbiw__linear_to_rgbe(rgbe, linear);
+ s->func(s->context, rgbe, 4);
+ }
+ } else {
+ int c,r;
+ /* encode into scratch buffer */
+ for (x=0; x < width; x++) {
+ switch(ncomp) {
+ case 4: /* fallthrough */
+ case 3: linear[2] = scanline[x*ncomp + 2];
+ linear[1] = scanline[x*ncomp + 1];
+ linear[0] = scanline[x*ncomp + 0];
+ break;
+ default:
+ linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0];
+ break;
+ }
+ stbiw__linear_to_rgbe(rgbe, linear);
+ scratch[x + width*0] = rgbe[0];
+ scratch[x + width*1] = rgbe[1];
+ scratch[x + width*2] = rgbe[2];
+ scratch[x + width*3] = rgbe[3];
+ }
+
+ s->func(s->context, scanlineheader, 4);
+
+ /* RLE each component separately */
+ for (c=0; c < 4; c++) {
+ unsigned char *comp = &scratch[width*c];
+
+ x = 0;
+ while (x < width) {
+ // find first run
+ r = x;
+ while (r+2 < width) {
+ if (comp[r] == comp[r+1] && comp[r] == comp[r+2])
+ break;
+ ++r;
+ }
+ if (r+2 >= width)
+ r = width;
+ // dump up to first run
+ while (x < r) {
+ int len = r-x;
+ if (len > 128) len = 128;
+ stbiw__write_dump_data(s, len, &comp[x]);
+ x += len;
+ }
+ // if there's a run, output it
+ if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd
+ // find next byte after run
+ while (r < width && comp[r] == comp[x])
+ ++r;
+ // output run up to r
+ while (x < r) {
+ int len = r-x;
+ if (len > 127) len = 127;
+ stbiw__write_run_data(s, len, comp[x]);
+ x += len;
+ }
+ }
+ }
+ }
+ }
+}
+
+static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data)
+{
+ if (y <= 0 || x <= 0 || data == NULL)
+ return 0;
+ else {
+ // Each component is stored separately. Allocate scratch space for full output scanline.
+ unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4);
+ int i, len;
+ char buffer[128];
+ char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n";
+ s->func(s->context, header, sizeof(header)-1);
+
+#ifdef STBI_MSC_SECURE_CRT
+ len = sprintf_s(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x);
+#else
+ len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x);
+#endif
+ s->func(s->context, buffer, len);
+
+ for(i=0; i < y; i++)
+ stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)*x);
+ STBIW_FREE(scratch);
+ return 1;
+ }
+}
+
+STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data)
+{
+ stbi__write_context s;
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_hdr_core(&s, x, y, comp, (float *) data);
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data)
+{
+ stbi__write_context s;
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif // STBI_WRITE_NO_STDIO
+
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// PNG writer
+//
+
+#ifndef STBIW_ZLIB_COMPRESS
+// stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size()
+#define stbiw__sbraw(a) ((int *) (a) - 2)
+#define stbiw__sbm(a) stbiw__sbraw(a)[0]
+#define stbiw__sbn(a) stbiw__sbraw(a)[1]
+
+#define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a))
+#define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0)
+#define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a)))
+
+#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v))
+#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0)
+#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0)
+
+static void *stbiw__sbgrowf(void **arr, int increment, int itemsize)
+{
+ int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1;
+ void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2);
+ STBIW_ASSERT(p);
+ if (p) {
+ if (!*arr) ((int *) p)[1] = 0;
+ *arr = (void *) ((int *) p + 2);
+ stbiw__sbm(*arr) = m;
+ }
+ return *arr;
+}
+
+static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount)
+{
+ while (*bitcount >= 8) {
+ stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer));
+ *bitbuffer >>= 8;
+ *bitcount -= 8;
+ }
+ return data;
+}
+
+static int stbiw__zlib_bitrev(int code, int codebits)
+{
+ int res=0;
+ while (codebits--) {
+ res = (res << 1) | (code & 1);
+ code >>= 1;
+ }
+ return res;
+}
+
+static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit)
+{
+ int i;
+ for (i=0; i < limit && i < 258; ++i)
+ if (a[i] != b[i]) break;
+ return i;
+}
+
+static unsigned int stbiw__zhash(unsigned char *data)
+{
+ stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16);
+ hash ^= hash << 3;
+ hash += hash >> 5;
+ hash ^= hash << 4;
+ hash += hash >> 17;
+ hash ^= hash << 25;
+ hash += hash >> 6;
+ return hash;
+}
+
+#define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount))
+#define stbiw__zlib_add(code,codebits) \
+ (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush())
+#define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c)
+// default huffman tables
+#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8)
+#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9)
+#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7)
+#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8)
+#define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n))
+#define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n))
+
+#define stbiw__ZHASH 16384
+
+#endif // STBIW_ZLIB_COMPRESS
+
+unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality)
+{
+#ifdef STBIW_ZLIB_COMPRESS
+ // user provided a zlib compress implementation, use that
+ return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality);
+#else // use builtin
+ static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 };
+ static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 };
+ static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 };
+ static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 };
+ unsigned int bitbuf=0;
+ int i,j, bitcount=0;
+ unsigned char *out = NULL;
+ unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(char**));
+ if (hash_table == NULL)
+ return NULL;
+ if (quality < 5) quality = 5;
+
+ stbiw__sbpush(out, 0x78); // DEFLATE 32K window
+ stbiw__sbpush(out, 0x5e); // FLEVEL = 1
+ stbiw__zlib_add(1,1); // BFINAL = 1
+ stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman
+
+ for (i=0; i < stbiw__ZHASH; ++i)
+ hash_table[i] = NULL;
+
+ i=0;
+ while (i < data_len-3) {
+ // hash next 3 bytes of data to be compressed
+ int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3;
+ unsigned char *bestloc = 0;
+ unsigned char **hlist = hash_table[h];
+ int n = stbiw__sbcount(hlist);
+ for (j=0; j < n; ++j) {
+ if (hlist[j]-data > i-32768) { // if entry lies within window
+ int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i);
+ if (d >= best) best=d,bestloc=hlist[j];
+ }
+ }
+ // when hash table entry is too long, delete half the entries
+ if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) {
+ STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality);
+ stbiw__sbn(hash_table[h]) = quality;
+ }
+ stbiw__sbpush(hash_table[h],data+i);
+
+ if (bestloc) {
+ // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal
+ h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1);
+ hlist = hash_table[h];
+ n = stbiw__sbcount(hlist);
+ for (j=0; j < n; ++j) {
+ if (hlist[j]-data > i-32767) {
+ int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1);
+ if (e > best) { // if next match is better, bail on current match
+ bestloc = NULL;
+ break;
+ }
+ }
+ }
+ }
+
+ if (bestloc) {
+ int d = (int) (data+i - bestloc); // distance back
+ STBIW_ASSERT(d <= 32767 && best <= 258);
+ for (j=0; best > lengthc[j+1]-1; ++j);
+ stbiw__zlib_huff(j+257);
+ if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]);
+ for (j=0; d > distc[j+1]-1; ++j);
+ stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5);
+ if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]);
+ i += best;
+ } else {
+ stbiw__zlib_huffb(data[i]);
+ ++i;
+ }
+ }
+ // write out final bytes
+ for (;i < data_len; ++i)
+ stbiw__zlib_huffb(data[i]);
+ stbiw__zlib_huff(256); // end of block
+ // pad with 0 bits to byte boundary
+ while (bitcount)
+ stbiw__zlib_add(0,1);
+
+ for (i=0; i < stbiw__ZHASH; ++i)
+ (void) stbiw__sbfree(hash_table[i]);
+ STBIW_FREE(hash_table);
+
+ {
+ // compute adler32 on input
+ unsigned int s1=1, s2=0;
+ int blocklen = (int) (data_len % 5552);
+ j=0;
+ while (j < data_len) {
+ for (i=0; i < blocklen; ++i) s1 += data[j+i], s2 += s1;
+ s1 %= 65521, s2 %= 65521;
+ j += blocklen;
+ blocklen = 5552;
+ }
+ stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8));
+ stbiw__sbpush(out, STBIW_UCHAR(s2));
+ stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8));
+ stbiw__sbpush(out, STBIW_UCHAR(s1));
+ }
+ *out_len = stbiw__sbn(out);
+ // make returned pointer freeable
+ STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len);
+ return (unsigned char *) stbiw__sbraw(out);
+#endif // STBIW_ZLIB_COMPRESS
+}
+
+static unsigned int stbiw__crc32(unsigned char *buffer, int len)
+{
+ static unsigned int crc_table[256] =
+ {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+ };
+
+ unsigned int crc = ~0u;
+ int i;
+ for (i=0; i < len; ++i)
+ crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)];
+ return ~crc;
+}
+
+#define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4)
+#define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v));
+#define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3])
+
+static void stbiw__wpcrc(unsigned char **data, int len)
+{
+ unsigned int crc = stbiw__crc32(*data - len - 4, len+4);
+ stbiw__wp32(*data, crc);
+}
+
+static unsigned char stbiw__paeth(int a, int b, int c)
+{
+ int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c);
+ if (pa <= pb && pa <= pc) return STBIW_UCHAR(a);
+ if (pb <= pc) return STBIW_UCHAR(b);
+ return STBIW_UCHAR(c);
+}
+
+// @OPTIMIZE: provide an option that always forces left-predict or paeth predict
+static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer)
+{
+ static int mapping[] = { 0,1,2,3,4 };
+ static int firstmap[] = { 0,1,0,5,6 };
+ int *mymap = (y != 0) ? mapping : firstmap;
+ int i;
+ int type = mymap[filter_type];
+ unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y);
+ int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes;
+ for (i = 0; i < n; ++i) {
+ switch (type) {
+ case 0: line_buffer[i] = z[i]; break;
+ case 1: line_buffer[i] = z[i]; break;
+ case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break;
+ case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break;
+ case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break;
+ case 5: line_buffer[i] = z[i]; break;
+ case 6: line_buffer[i] = z[i]; break;
+ }
+ }
+ for (i=n; i < width*n; ++i) {
+ switch (type) {
+ case 0: line_buffer[i] = z[i]; break;
+ case 1: line_buffer[i] = z[i] - z[i-n]; break;
+ case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break;
+ case 3: line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break;
+ case 4: line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break;
+ case 5: line_buffer[i] = z[i] - (z[i-n]>>1); break;
+ case 6: line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break;
+ }
+ }
+}
+
+unsigned char *stbi_write_png_to_mem(unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len)
+{
+ int force_filter = stbi_write_force_png_filter;
+ int ctype[5] = { -1, 0, 4, 2, 6 };
+ unsigned char sig[8] = { 137,80,78,71,13,10,26,10 };
+ unsigned char *out,*o, *filt, *zlib;
+ signed char *line_buffer;
+ int j,zlen;
+
+ if (stride_bytes == 0)
+ stride_bytes = x * n;
+
+ if (force_filter >= 5) {
+ force_filter = -1;
+ }
+
+ filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0;
+ line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; }
+ for (j=0; j < y; ++j) {
+ int filter_type;
+ if (force_filter > -1) {
+ filter_type = force_filter;
+ stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, force_filter, line_buffer);
+ } else { // Estimate the best filter by running through all of them:
+ int best_filter = 0, best_filter_val = 0x7fffffff, est, i;
+ for (filter_type = 0; filter_type < 5; filter_type++) {
+ stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, filter_type, line_buffer);
+
+ // Estimate the entropy of the line using this filter; the less, the better.
+ est = 0;
+ for (i = 0; i < x*n; ++i) {
+ est += abs((signed char) line_buffer[i]);
+ }
+ if (est < best_filter_val) {
+ best_filter_val = est;
+ best_filter = filter_type;
+ }
+ }
+ if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it
+ stbiw__encode_png_line(pixels, stride_bytes, x, y, j, n, best_filter, line_buffer);
+ filter_type = best_filter;
+ }
+ }
+ // when we get here, filter_type contains the filter type, and line_buffer contains the data
+ filt[j*(x*n+1)] = (unsigned char) filter_type;
+ STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n);
+ }
+ STBIW_FREE(line_buffer);
+ zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level);
+ STBIW_FREE(filt);
+ if (!zlib) return 0;
+
+ // each tag requires 12 bytes of overhead
+ out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12);
+ if (!out) return 0;
+ *out_len = 8 + 12+13 + 12+zlen + 12;
+
+ o=out;
+ STBIW_MEMMOVE(o,sig,8); o+= 8;
+ stbiw__wp32(o, 13); // header length
+ stbiw__wptag(o, "IHDR");
+ stbiw__wp32(o, x);
+ stbiw__wp32(o, y);
+ *o++ = 8;
+ *o++ = STBIW_UCHAR(ctype[n]);
+ *o++ = 0;
+ *o++ = 0;
+ *o++ = 0;
+ stbiw__wpcrc(&o,13);
+
+ stbiw__wp32(o, zlen);
+ stbiw__wptag(o, "IDAT");
+ STBIW_MEMMOVE(o, zlib, zlen);
+ o += zlen;
+ STBIW_FREE(zlib);
+ stbiw__wpcrc(&o, zlen);
+
+ stbiw__wp32(o,0);
+ stbiw__wptag(o, "IEND");
+ stbiw__wpcrc(&o,0);
+
+ STBIW_ASSERT(o == out + *out_len);
+
+ return out;
+}
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes)
+{
+ FILE *f;
+ int len;
+ unsigned char *png = stbi_write_png_to_mem((unsigned char *) data, stride_bytes, x, y, comp, &len);
+ if (png == NULL) return 0;
+#ifdef STBI_MSC_SECURE_CRT
+ if (fopen_s(&f, filename, "wb"))
+ f = NULL;
+#else
+ f = fopen(filename, "wb");
+#endif
+ if (!f) { STBIW_FREE(png); return 0; }
+ fwrite(png, 1, len, f);
+ fclose(f);
+ STBIW_FREE(png);
+ return 1;
+}
+#endif
+
+STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes)
+{
+ int len;
+ unsigned char *png = stbi_write_png_to_mem((unsigned char *) data, stride_bytes, x, y, comp, &len);
+ if (png == NULL) return 0;
+ func(context, png, len);
+ STBIW_FREE(png);
+ return 1;
+}
+
+
+/* ***************************************************************************
+ *
+ * JPEG writer
+ *
+ * This is based on Jon Olick's jo_jpeg.cpp:
+ * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html
+ */
+
+static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18,
+ 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 };
+
+static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) {
+ int bitBuf = *bitBufP, bitCnt = *bitCntP;
+ bitCnt += bs[1];
+ bitBuf |= bs[0] << (24 - bitCnt);
+ while(bitCnt >= 8) {
+ unsigned char c = (bitBuf >> 16) & 255;
+ stbiw__putc(s, c);
+ if(c == 255) {
+ stbiw__putc(s, 0);
+ }
+ bitBuf <<= 8;
+ bitCnt -= 8;
+ }
+ *bitBufP = bitBuf;
+ *bitCntP = bitCnt;
+}
+
+static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) {
+ float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p;
+ float z1, z2, z3, z4, z5, z11, z13;
+
+ float tmp0 = d0 + d7;
+ float tmp7 = d0 - d7;
+ float tmp1 = d1 + d6;
+ float tmp6 = d1 - d6;
+ float tmp2 = d2 + d5;
+ float tmp5 = d2 - d5;
+ float tmp3 = d3 + d4;
+ float tmp4 = d3 - d4;
+
+ // Even part
+ float tmp10 = tmp0 + tmp3; // phase 2
+ float tmp13 = tmp0 - tmp3;
+ float tmp11 = tmp1 + tmp2;
+ float tmp12 = tmp1 - tmp2;
+
+ d0 = tmp10 + tmp11; // phase 3
+ d4 = tmp10 - tmp11;
+
+ z1 = (tmp12 + tmp13) * 0.707106781f; // c4
+ d2 = tmp13 + z1; // phase 5
+ d6 = tmp13 - z1;
+
+ // Odd part
+ tmp10 = tmp4 + tmp5; // phase 2
+ tmp11 = tmp5 + tmp6;
+ tmp12 = tmp6 + tmp7;
+
+ // The rotator is modified from fig 4-8 to avoid extra negations.
+ z5 = (tmp10 - tmp12) * 0.382683433f; // c6
+ z2 = tmp10 * 0.541196100f + z5; // c2-c6
+ z4 = tmp12 * 1.306562965f + z5; // c2+c6
+ z3 = tmp11 * 0.707106781f; // c4
+
+ z11 = tmp7 + z3; // phase 5
+ z13 = tmp7 - z3;
+
+ *d5p = z13 + z2; // phase 6
+ *d3p = z13 - z2;
+ *d1p = z11 + z4;
+ *d7p = z11 - z4;
+
+ *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6;
+}
+
+static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) {
+ int tmp1 = val < 0 ? -val : val;
+ val = val < 0 ? val-1 : val;
+ bits[1] = 1;
+ while(tmp1 >>= 1) {
+ ++bits[1];
+ }
+ bits[0] = val & ((1<0)&&(DU[end0pos]==0); --end0pos) {
+ }
+ // end0pos = first element in reverse order !=0
+ if(end0pos == 0) {
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB);
+ return DU[0];
+ }
+ for(i = 1; i <= end0pos; ++i) {
+ int startpos = i;
+ int nrzeroes;
+ unsigned short bits[2];
+ for (; DU[i]==0 && i<=end0pos; ++i) {
+ }
+ nrzeroes = i-startpos;
+ if ( nrzeroes >= 16 ) {
+ int lng = nrzeroes>>4;
+ int nrmarker;
+ for (nrmarker=1; nrmarker <= lng; ++nrmarker)
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes);
+ nrzeroes &= 15;
+ }
+ stbiw__jpg_calcBits(DU[i], bits);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]);
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits);
+ }
+ if(end0pos != 63) {
+ stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB);
+ }
+ return DU[0];
+}
+
+static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) {
+ // Constants that don't pollute global namespace
+ static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0};
+ static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11};
+ static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d};
+ static const unsigned char std_ac_luminance_values[] = {
+ 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,
+ 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,
+ 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,
+ 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
+ 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,
+ 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,
+ 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa
+ };
+ static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0};
+ static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11};
+ static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77};
+ static const unsigned char std_ac_chrominance_values[] = {
+ 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,
+ 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,
+ 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,
+ 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
+ 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,
+ 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,
+ 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa
+ };
+ // Huffman tables
+ static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}};
+ static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}};
+ static const unsigned short YAC_HT[256][2] = {
+ {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0}
+ };
+ static const unsigned short UVAC_HT[256][2] = {
+ {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0},
+ {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0}
+ };
+ static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22,
+ 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99};
+ static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99,
+ 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99};
+ static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f,
+ 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f };
+
+ int row, col, i, k;
+ float fdtbl_Y[64], fdtbl_UV[64];
+ unsigned char YTable[64], UVTable[64];
+
+ if(!data || !width || !height || comp > 4 || comp < 1) {
+ return 0;
+ }
+
+ quality = quality ? quality : 90;
+ quality = quality < 1 ? 1 : quality > 100 ? 100 : quality;
+ quality = quality < 50 ? 5000 / quality : 200 - quality * 2;
+
+ for(i = 0; i < 64; ++i) {
+ int uvti, yti = (YQT[i]*quality+50)/100;
+ YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti);
+ uvti = (UVQT[i]*quality+50)/100;
+ UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti);
+ }
+
+ for(row = 0, k = 0; row < 8; ++row) {
+ for(col = 0; col < 8; ++col, ++k) {
+ fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]);
+ fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]);
+ }
+ }
+
+ // Write Headers
+ {
+ static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 };
+ static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 };
+ const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width),
+ 3,1,0x11,0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 };
+ s->func(s->context, (void*)head0, sizeof(head0));
+ s->func(s->context, (void*)YTable, sizeof(YTable));
+ stbiw__putc(s, 1);
+ s->func(s->context, UVTable, sizeof(UVTable));
+ s->func(s->context, (void*)head1, sizeof(head1));
+ s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1);
+ s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values));
+ stbiw__putc(s, 0x10); // HTYACinfo
+ s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1);
+ s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values));
+ stbiw__putc(s, 1); // HTUDCinfo
+ s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1);
+ s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values));
+ stbiw__putc(s, 0x11); // HTUACinfo
+ s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1);
+ s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values));
+ s->func(s->context, (void*)head2, sizeof(head2));
+ }
+
+ // Encode 8x8 macroblocks
+ {
+ static const unsigned short fillBits[] = {0x7F, 7};
+ const unsigned char *imageData = (const unsigned char *)data;
+ int DCY=0, DCU=0, DCV=0;
+ int bitBuf=0, bitCnt=0;
+ // comp == 2 is grey+alpha (alpha is ignored)
+ int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0;
+ int x, y, pos;
+ for(y = 0; y < height; y += 8) {
+ for(x = 0; x < width; x += 8) {
+ float YDU[64], UDU[64], VDU[64];
+ for(row = y, pos = 0; row < y+8; ++row) {
+ for(col = x; col < x+8; ++col, ++pos) {
+ int p = (stbi__flip_vertically_on_write ? height-1-row : row)*width*comp + col*comp;
+ float r, g, b;
+ if(row >= height) {
+ p -= width*comp*(row+1 - height);
+ }
+ if(col >= width) {
+ p -= comp*(col+1 - width);
+ }
+
+ r = imageData[p+0];
+ g = imageData[p+ofsG];
+ b = imageData[p+ofsB];
+ YDU[pos]=+0.29900f*r+0.58700f*g+0.11400f*b-128;
+ UDU[pos]=-0.16874f*r-0.33126f*g+0.50000f*b;
+ VDU[pos]=+0.50000f*r-0.41869f*g-0.08131f*b;
+ }
+ }
+
+ DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, YDU, fdtbl_Y, DCY, YDC_HT, YAC_HT);
+ DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, UDU, fdtbl_UV, DCU, UVDC_HT, UVAC_HT);
+ DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, VDU, fdtbl_UV, DCV, UVDC_HT, UVAC_HT);
+ }
+ }
+
+ // Do the bit alignment of the EOI marker
+ stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits);
+ }
+
+ // EOI
+ stbiw__putc(s, 0xFF);
+ stbiw__putc(s, 0xD9);
+
+ return 1;
+}
+
+STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality)
+{
+ stbi__write_context s;
+ stbi__start_write_callbacks(&s, func, context);
+ return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality);
+}
+
+
+#ifndef STBI_WRITE_NO_STDIO
+STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality)
+{
+ stbi__write_context s;
+ if (stbi__start_write_file(&s,filename)) {
+ int r = stbi_write_jpg_core(&s, x, y, comp, data, quality);
+ stbi__end_write_file(&s);
+ return r;
+ } else
+ return 0;
+}
+#endif
+
+#endif // STB_IMAGE_WRITE_IMPLEMENTATION
+
+/* Revision history
+ 1.09 (2018-02-11)
+ fix typo in zlib quality API, improve STB_I_W_STATIC in C++
+ 1.08 (2018-01-29)
+ add stbi__flip_vertically_on_write, external zlib, zlib quality, choose PNG filter
+ 1.07 (2017-07-24)
+ doc fix
+ 1.06 (2017-07-23)
+ writing JPEG (using Jon Olick's code)
+ 1.05 ???
+ 1.04 (2017-03-03)
+ monochrome BMP expansion
+ 1.03 ???
+ 1.02 (2016-04-02)
+ avoid allocating large structures on the stack
+ 1.01 (2016-01-16)
+ STBIW_REALLOC_SIZED: support allocators with no realloc support
+ avoid race-condition in crc initialization
+ minor compile issues
+ 1.00 (2015-09-14)
+ installable file IO function
+ 0.99 (2015-09-13)
+ warning fixes; TGA rle support
+ 0.98 (2015-04-08)
+ added STBIW_MALLOC, STBIW_ASSERT etc
+ 0.97 (2015-01-18)
+ fixed HDR asserts, rewrote HDR rle logic
+ 0.96 (2015-01-17)
+ add HDR output
+ fix monochrome BMP
+ 0.95 (2014-08-17)
+ add monochrome TGA output
+ 0.94 (2014-05-31)
+ rename private functions to avoid conflicts with stb_image.h
+ 0.93 (2014-05-27)
+ warning fixes
+ 0.92 (2010-08-01)
+ casts to unsigned char to fix warnings
+ 0.91 (2010-07-17)
+ first public release
+ 0.90 first internal release
+*/
+
+/*
+------------------------------------------------------------------------------
+This software is available under 2 licenses -- choose whichever you prefer.
+------------------------------------------------------------------------------
+ALTERNATIVE A - MIT License
+Copyright (c) 2017 Sean Barrett
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+------------------------------------------------------------------------------
+ALTERNATIVE B - Public Domain (www.unlicense.org)
+This is free and unencumbered software released into the public domain.
+Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
+software, either in source code form or as a compiled binary, for any purpose,
+commercial or non-commercial, and by any means.
+In jurisdictions that recognize copyright laws, the author or authors of this
+software dedicate any and all copyright interest in the software to the public
+domain. We make this dedication for the benefit of the public at large and to
+the detriment of our heirs and successors. We intend this dedication to be an
+overt act of relinquishment in perpetuity of all present and future rights to
+this software under copyright law.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+------------------------------------------------------------------------------
+*/
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.c
new file mode 100644
index 00000000000..67b6d431f6f
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.c
@@ -0,0 +1,139 @@
+#include
+#include
+#include "tree.h"
+#include "utils.h"
+#include "data.h"
+
+void change_leaves(tree *t, char *leaf_list)
+{
+ list *llist = get_paths(leaf_list);
+ char **leaves = (char **)list_to_array(llist);
+ int n = llist->size;
+ int i,j;
+ int found = 0;
+ for(i = 0; i < t->n; ++i){
+ t->leaf[i] = 0;
+ for(j = 0; j < n; ++j){
+ if (0==strcmp(t->name[i], leaves[j])){
+ t->leaf[i] = 1;
+ ++found;
+ break;
+ }
+ }
+ }
+ fprintf(stderr, "Found %d leaves.\n", found);
+}
+
+float get_hierarchy_probability(float *x, tree *hier, int c, int stride)
+{
+ float p = 1;
+ while(c >= 0){
+ p = p * x[c*stride];
+ c = hier->parent[c];
+ }
+ return p;
+}
+
+void hierarchy_predictions(float *predictions, int n, tree *hier, int only_leaves, int stride)
+{
+ int j;
+ for(j = 0; j < n; ++j){
+ int parent = hier->parent[j];
+ if(parent >= 0){
+ predictions[j*stride] *= predictions[parent*stride];
+ }
+ }
+ if(only_leaves){
+ for(j = 0; j < n; ++j){
+ if(!hier->leaf[j]) predictions[j*stride] = 0;
+ }
+ }
+}
+
+int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride)
+{
+ float p = 1;
+ int group = 0;
+ int i;
+ while(1){
+ float max = 0;
+ int max_i = 0;
+
+ for(i = 0; i < hier->group_size[group]; ++i){
+ int index = i + hier->group_offset[group];
+ float val = predictions[(i + hier->group_offset[group])*stride];
+ if(val > max){
+ max_i = index;
+ max = val;
+ }
+ }
+ if(p*max > thresh){
+ p = p*max;
+ group = hier->child[max_i];
+ if(hier->child[max_i] < 0) return max_i;
+ } else if (group == 0){
+ return max_i;
+ } else {
+ return hier->parent[hier->group_offset[group]];
+ }
+ }
+ return 0;
+}
+
+tree *read_tree(char *filename)
+{
+ tree t = {0};
+ FILE *fp = fopen(filename, "r");
+
+ char *line;
+ int last_parent = -1;
+ int group_size = 0;
+ int groups = 0;
+ int n = 0;
+ while((line=fgetl(fp)) != 0){
+ char *id = calloc(256, sizeof(char));
+ int parent = -1;
+ sscanf(line, "%s %d", id, &parent);
+ t.parent = realloc(t.parent, (n+1)*sizeof(int));
+ t.parent[n] = parent;
+
+ t.child = realloc(t.child, (n+1)*sizeof(int));
+ t.child[n] = -1;
+
+ t.name = realloc(t.name, (n+1)*sizeof(char *));
+ t.name[n] = id;
+ if(parent != last_parent){
+ ++groups;
+ t.group_offset = realloc(t.group_offset, groups * sizeof(int));
+ t.group_offset[groups - 1] = n - group_size;
+ t.group_size = realloc(t.group_size, groups * sizeof(int));
+ t.group_size[groups - 1] = group_size;
+ group_size = 0;
+ last_parent = parent;
+ }
+ t.group = realloc(t.group, (n+1)*sizeof(int));
+ t.group[n] = groups;
+ if (parent >= 0) {
+ t.child[parent] = groups;
+ }
+ ++n;
+ ++group_size;
+ }
+ ++groups;
+ t.group_offset = realloc(t.group_offset, groups * sizeof(int));
+ t.group_offset[groups - 1] = n - group_size;
+ t.group_size = realloc(t.group_size, groups * sizeof(int));
+ t.group_size[groups - 1] = group_size;
+ t.n = n;
+ t.groups = groups;
+ t.leaf = calloc(n, sizeof(int));
+ int i;
+ for(i = 0; i < n; ++i) t.leaf[i] = 1;
+ for(i = 0; i < n; ++i) if(t.parent[i] >= 0) t.leaf[t.parent[i]] = 0;
+
+ fclose(fp);
+ tree *tree_ptr = calloc(1, sizeof(tree));
+ *tree_ptr = t;
+ //error(0);
+ return tree_ptr;
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.h
new file mode 100644
index 00000000000..3802b8ead80
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/tree.h
@@ -0,0 +1,8 @@
+#ifndef TREE_H
+#define TREE_H
+#include "darknet.h"
+
+int hierarchy_top_prediction(float *predictions, tree *hier, float thresh, int stride);
+float get_hierarchy_probability(float *x, tree *hier, int c, int stride);
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.c
new file mode 100644
index 00000000000..605f21f8ebd
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.c
@@ -0,0 +1,106 @@
+#include "upsample_layer.h"
+#include "cuda.h"
+#include "blas.h"
+
+#include
+
+layer make_upsample_layer(int batch, int w, int h, int c, int stride)
+{
+ layer l = {0};
+ l.type = UPSAMPLE;
+ l.batch = batch;
+ l.w = w;
+ l.h = h;
+ l.c = c;
+ l.out_w = w*stride;
+ l.out_h = h*stride;
+ l.out_c = c;
+ if(stride < 0){
+ stride = -stride;
+ l.reverse=1;
+ l.out_w = w/stride;
+ l.out_h = h/stride;
+ }
+ l.stride = stride;
+ l.outputs = l.out_w*l.out_h*l.out_c;
+ l.inputs = l.w*l.h*l.c;
+ l.delta = calloc(l.outputs*batch, sizeof(float));
+ l.output = calloc(l.outputs*batch, sizeof(float));;
+
+ l.forward = forward_upsample_layer;
+ l.backward = backward_upsample_layer;
+ #ifdef GPU
+ l.forward_gpu = forward_upsample_layer_gpu;
+ l.backward_gpu = backward_upsample_layer_gpu;
+
+ l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
+ l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
+ #endif
+ if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
+ else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
+ return l;
+}
+
+void resize_upsample_layer(layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+ l->out_w = w*l->stride;
+ l->out_h = h*l->stride;
+ if(l->reverse){
+ l->out_w = w/l->stride;
+ l->out_h = h/l->stride;
+ }
+ l->outputs = l->out_w*l->out_h*l->out_c;
+ l->inputs = l->h*l->w*l->c;
+ l->delta = realloc(l->delta, l->outputs*l->batch*sizeof(float));
+ l->output = realloc(l->output, l->outputs*l->batch*sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->output_gpu);
+ cuda_free(l->delta_gpu);
+ l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
+ l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
+#endif
+
+}
+
+void forward_upsample_layer(const layer l, network net)
+{
+ fill_cpu(l.outputs*l.batch, 0, l.output, 1);
+ if(l.reverse){
+ upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input);
+ }else{
+ upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
+ }
+}
+
+void backward_upsample_layer(const layer l, network net)
+{
+ if(l.reverse){
+ upsample_cpu(l.delta, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta);
+ }else{
+ upsample_cpu(net.delta, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta);
+ }
+}
+
+#ifdef GPU
+void forward_upsample_layer_gpu(const layer l, network net)
+{
+ fill_gpu(l.outputs*l.batch, 0, l.output_gpu, 1);
+ if(l.reverse){
+ upsample_gpu(l.output_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input_gpu);
+ }else{
+ upsample_gpu(net.input_gpu, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output_gpu);
+ }
+}
+
+void backward_upsample_layer_gpu(const layer l, network net)
+{
+ if(l.reverse){
+ upsample_gpu(l.delta_gpu, l.out_w, l.out_h, l.c, l.batch, l.stride, 1, l.scale, net.delta_gpu);
+ }else{
+ upsample_gpu(net.delta_gpu, l.w, l.h, l.c, l.batch, l.stride, 0, l.scale, l.delta_gpu);
+ }
+}
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.h
new file mode 100644
index 00000000000..86790d10883
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/upsample_layer.h
@@ -0,0 +1,15 @@
+#ifndef UPSAMPLE_LAYER_H
+#define UPSAMPLE_LAYER_H
+#include "darknet.h"
+
+layer make_upsample_layer(int batch, int w, int h, int c, int stride);
+void forward_upsample_layer(const layer l, network net);
+void backward_upsample_layer(const layer l, network net);
+void resize_upsample_layer(layer *l, int w, int h);
+
+#ifdef GPU
+void forward_upsample_layer_gpu(const layer l, network net);
+void backward_upsample_layer_gpu(const layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.c
new file mode 100644
index 00000000000..4e4efc2c57e
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.c
@@ -0,0 +1,723 @@
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "utils.h"
+
+
+/*
+// old timing. is it better? who knows!!
+double get_wall_time()
+{
+ struct timeval time;
+ if (gettimeofday(&time,NULL)){
+ return 0;
+ }
+ return (double)time.tv_sec + (double)time.tv_usec * .000001;
+}
+*/
+
+double what_time_is_it_now()
+{
+ struct timespec now;
+ clock_gettime(CLOCK_REALTIME, &now);
+ return now.tv_sec + now.tv_nsec*1e-9;
+}
+
+int *read_intlist(char *gpu_list, int *ngpus, int d)
+{
+ int *gpus = 0;
+ if(gpu_list){
+ int len = strlen(gpu_list);
+ *ngpus = 1;
+ int i;
+ for(i = 0; i < len; ++i){
+ if (gpu_list[i] == ',') ++*ngpus;
+ }
+ gpus = calloc(*ngpus, sizeof(int));
+ for(i = 0; i < *ngpus; ++i){
+ gpus[i] = atoi(gpu_list);
+ gpu_list = strchr(gpu_list, ',')+1;
+ }
+ } else {
+ gpus = calloc(1, sizeof(float));
+ *gpus = d;
+ *ngpus = 1;
+ }
+ return gpus;
+}
+
+int *read_map(char *filename)
+{
+ int n = 0;
+ int *map = 0;
+ char *str;
+ FILE *file = fopen(filename, "r");
+ if(!file) file_error(filename);
+ while((str=fgetl(file))){
+ ++n;
+ map = realloc(map, n*sizeof(int));
+ map[n-1] = atoi(str);
+ }
+ return map;
+}
+
+void sorta_shuffle(void *arr, size_t n, size_t size, size_t sections)
+{
+ size_t i;
+ for(i = 0; i < sections; ++i){
+ size_t start = n*i/sections;
+ size_t end = n*(i+1)/sections;
+ size_t num = end-start;
+ shuffle(arr+(start*size), num, size);
+ }
+}
+
+void shuffle(void *arr, size_t n, size_t size)
+{
+ size_t i;
+ void *swp = calloc(1, size);
+ for(i = 0; i < n-1; ++i){
+ size_t j = i + rand()/(RAND_MAX / (n-i)+1);
+ memcpy(swp, arr+(j*size), size);
+ memcpy(arr+(j*size), arr+(i*size), size);
+ memcpy(arr+(i*size), swp, size);
+ }
+}
+
+int *random_index_order(int min, int max)
+{
+ int *inds = calloc(max-min, sizeof(int));
+ int i;
+ for(i = min; i < max; ++i){
+ inds[i] = i;
+ }
+ for(i = min; i < max-1; ++i){
+ int swap = inds[i];
+ int index = i + rand()%(max-i);
+ inds[i] = inds[index];
+ inds[index] = swap;
+ }
+ return inds;
+}
+
+void del_arg(int argc, char **argv, int index)
+{
+ int i;
+ for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
+ argv[i] = 0;
+}
+
+int find_arg(int argc, char* argv[], char *arg)
+{
+ int i;
+ for(i = 0; i < argc; ++i) {
+ if(!argv[i]) continue;
+ if(0==strcmp(argv[i], arg)) {
+ del_arg(argc, argv, i);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+int find_int_arg(int argc, char **argv, char *arg, int def)
+{
+ int i;
+ for(i = 0; i < argc-1; ++i){
+ if(!argv[i]) continue;
+ if(0==strcmp(argv[i], arg)){
+ def = atoi(argv[i+1]);
+ del_arg(argc, argv, i);
+ del_arg(argc, argv, i);
+ break;
+ }
+ }
+ return def;
+}
+
+float find_float_arg(int argc, char **argv, char *arg, float def)
+{
+ int i;
+ for(i = 0; i < argc-1; ++i){
+ if(!argv[i]) continue;
+ if(0==strcmp(argv[i], arg)){
+ def = atof(argv[i+1]);
+ del_arg(argc, argv, i);
+ del_arg(argc, argv, i);
+ break;
+ }
+ }
+ return def;
+}
+
+char *find_char_arg(int argc, char **argv, char *arg, char *def)
+{
+ int i;
+ for(i = 0; i < argc-1; ++i){
+ if(!argv[i]) continue;
+ if(0==strcmp(argv[i], arg)){
+ def = argv[i+1];
+ del_arg(argc, argv, i);
+ del_arg(argc, argv, i);
+ break;
+ }
+ }
+ return def;
+}
+
+
+char *basecfg(char *cfgfile)
+{
+ char *c = cfgfile;
+ char *next;
+ while((next = strchr(c, '/')))
+ {
+ c = next+1;
+ }
+ c = copy_string(c);
+ next = strchr(c, '.');
+ if (next) *next = 0;
+ return c;
+}
+
+int alphanum_to_int(char c)
+{
+ return (c < 58) ? c - 48 : c-87;
+}
+char int_to_alphanum(int i)
+{
+ if (i == 36) return '.';
+ return (i < 10) ? i + 48 : i + 87;
+}
+
+void pm(int M, int N, float *A)
+{
+ int i,j;
+ for(i =0 ; i < M; ++i){
+ printf("%d ", i+1);
+ for(j = 0; j < N; ++j){
+ printf("%2.4f, ", A[i*N+j]);
+ }
+ printf("\n");
+ }
+ printf("\n");
+}
+
+void find_replace(char *str, char *orig, char *rep, char *output)
+{
+ char buffer[4096] = {0};
+ char *p;
+
+ sprintf(buffer, "%s", str);
+ if(!(p = strstr(buffer, orig))){ // Is 'orig' even in 'str'?
+ sprintf(output, "%s", str);
+ return;
+ }
+
+ *p = '\0';
+
+ sprintf(output, "%s%s%s", buffer, rep, p+strlen(orig));
+}
+
+float sec(clock_t clocks)
+{
+ return (float)clocks/CLOCKS_PER_SEC;
+}
+
+void top_k(float *a, int n, int k, int *index)
+{
+ int i,j;
+ for(j = 0; j < k; ++j) index[j] = -1;
+ for(i = 0; i < n; ++i){
+ int curr = i;
+ for(j = 0; j < k; ++j){
+ if((index[j] < 0) || a[curr] > a[index[j]]){
+ int swap = curr;
+ curr = index[j];
+ index[j] = swap;
+ }
+ }
+ }
+}
+
+void error(const char *s)
+{
+ perror(s);
+ assert(0);
+ exit(-1);
+}
+
+unsigned char *read_file(char *filename)
+{
+ FILE *fp = fopen(filename, "rb");
+ size_t size;
+
+ fseek(fp, 0, SEEK_END);
+ size = ftell(fp);
+ fseek(fp, 0, SEEK_SET);
+
+ unsigned char *text = calloc(size+1, sizeof(char));
+ fread(text, 1, size, fp);
+ fclose(fp);
+ return text;
+}
+
+void malloc_error()
+{
+ fprintf(stderr, "Malloc error\n");
+ exit(-1);
+}
+
+void file_error(char *s)
+{
+ fprintf(stderr, "Couldn't open file: %s\n", s);
+ exit(0);
+}
+
+list *split_str(char *s, char delim)
+{
+ size_t i;
+ size_t len = strlen(s);
+ list *l = make_list();
+ list_insert(l, s);
+ for(i = 0; i < len; ++i){
+ if(s[i] == delim){
+ s[i] = '\0';
+ list_insert(l, &(s[i+1]));
+ }
+ }
+ return l;
+}
+
+void strip(char *s)
+{
+ size_t i;
+ size_t len = strlen(s);
+ size_t offset = 0;
+ for(i = 0; i < len; ++i){
+ char c = s[i];
+ if(c==' '||c=='\t'||c=='\n') ++offset;
+ else s[i-offset] = c;
+ }
+ s[len-offset] = '\0';
+}
+
+void strip_char(char *s, char bad)
+{
+ size_t i;
+ size_t len = strlen(s);
+ size_t offset = 0;
+ for(i = 0; i < len; ++i){
+ char c = s[i];
+ if(c==bad) ++offset;
+ else s[i-offset] = c;
+ }
+ s[len-offset] = '\0';
+}
+
+void free_ptrs(void **ptrs, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i) free(ptrs[i]);
+ free(ptrs);
+}
+
+char *fgetl(FILE *fp)
+{
+ if(feof(fp)) return 0;
+ size_t size = 512;
+ char *line = malloc(size*sizeof(char));
+ if(!fgets(line, size, fp)){
+ free(line);
+ return 0;
+ }
+
+ size_t curr = strlen(line);
+
+ while((line[curr-1] != '\n') && !feof(fp)){
+ if(curr == size-1){
+ size *= 2;
+ line = realloc(line, size*sizeof(char));
+ if(!line) {
+ printf("%ld\n", size);
+ malloc_error();
+ }
+ }
+ size_t readsize = size-curr;
+ if(readsize > INT_MAX) readsize = INT_MAX-1;
+ fgets(&line[curr], readsize, fp);
+ curr = strlen(line);
+ }
+ if(line[curr-1] == '\n') line[curr-1] = '\0';
+
+ return line;
+}
+
+int read_int(int fd)
+{
+ int n = 0;
+ int next = read(fd, &n, sizeof(int));
+ if(next <= 0) return -1;
+ return n;
+}
+
+void write_int(int fd, int n)
+{
+ int next = write(fd, &n, sizeof(int));
+ if(next <= 0) error("read failed");
+}
+
+int read_all_fail(int fd, char *buffer, size_t bytes)
+{
+ size_t n = 0;
+ while(n < bytes){
+ int next = read(fd, buffer + n, bytes-n);
+ if(next <= 0) return 1;
+ n += next;
+ }
+ return 0;
+}
+
+int write_all_fail(int fd, char *buffer, size_t bytes)
+{
+ size_t n = 0;
+ while(n < bytes){
+ size_t next = write(fd, buffer + n, bytes-n);
+ if(next <= 0) return 1;
+ n += next;
+ }
+ return 0;
+}
+
+void read_all(int fd, char *buffer, size_t bytes)
+{
+ size_t n = 0;
+ while(n < bytes){
+ int next = read(fd, buffer + n, bytes-n);
+ if(next <= 0) error("read failed");
+ n += next;
+ }
+}
+
+void write_all(int fd, char *buffer, size_t bytes)
+{
+ size_t n = 0;
+ while(n < bytes){
+ size_t next = write(fd, buffer + n, bytes-n);
+ if(next <= 0) error("write failed");
+ n += next;
+ }
+}
+
+
+char *copy_string(char *s)
+{
+ char *copy = malloc(strlen(s)+1);
+ strncpy(copy, s, strlen(s)+1);
+ return copy;
+}
+
+list *parse_csv_line(char *line)
+{
+ list *l = make_list();
+ char *c, *p;
+ int in = 0;
+ for(c = line, p = line; *c != '\0'; ++c){
+ if(*c == '"') in = !in;
+ else if(*c == ',' && !in){
+ *c = '\0';
+ list_insert(l, copy_string(p));
+ p = c+1;
+ }
+ }
+ list_insert(l, copy_string(p));
+ return l;
+}
+
+int count_fields(char *line)
+{
+ int count = 0;
+ int done = 0;
+ char *c;
+ for(c = line; !done; ++c){
+ done = (*c == '\0');
+ if(*c == ',' || done) ++count;
+ }
+ return count;
+}
+
+float *parse_fields(char *line, int n)
+{
+ float *field = calloc(n, sizeof(float));
+ char *c, *p, *end;
+ int count = 0;
+ int done = 0;
+ for(c = line, p = line; !done; ++c){
+ done = (*c == '\0');
+ if(*c == ',' || done){
+ *c = '\0';
+ field[count] = strtod(p, &end);
+ if(p == c) field[count] = nan("");
+ if(end != c && (end != c-1 || *end != '\r')) field[count] = nan(""); //DOS file formats!
+ p = c+1;
+ ++count;
+ }
+ }
+ return field;
+}
+
+float sum_array(float *a, int n)
+{
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; ++i) sum += a[i];
+ return sum;
+}
+
+float mean_array(float *a, int n)
+{
+ return sum_array(a,n)/n;
+}
+
+void mean_arrays(float **a, int n, int els, float *avg)
+{
+ int i;
+ int j;
+ memset(avg, 0, els*sizeof(float));
+ for(j = 0; j < n; ++j){
+ for(i = 0; i < els; ++i){
+ avg[i] += a[j][i];
+ }
+ }
+ for(i = 0; i < els; ++i){
+ avg[i] /= n;
+ }
+}
+
+void print_statistics(float *a, int n)
+{
+ float m = mean_array(a, n);
+ float v = variance_array(a, n);
+ printf("MSE: %.6f, Mean: %.6f, Variance: %.6f\n", mse_array(a, n), m, v);
+}
+
+float variance_array(float *a, int n)
+{
+ int i;
+ float sum = 0;
+ float mean = mean_array(a, n);
+ for(i = 0; i < n; ++i) sum += (a[i] - mean)*(a[i]-mean);
+ float variance = sum/n;
+ return variance;
+}
+
+int constrain_int(int a, int min, int max)
+{
+ if (a < min) return min;
+ if (a > max) return max;
+ return a;
+}
+
+float constrain(float min, float max, float a)
+{
+ if (a < min) return min;
+ if (a > max) return max;
+ return a;
+}
+
+float dist_array(float *a, float *b, int n, int sub)
+{
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; i += sub) sum += pow(a[i]-b[i], 2);
+ return sqrt(sum);
+}
+
+float mse_array(float *a, int n)
+{
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; ++i) sum += a[i]*a[i];
+ return sqrt(sum/n);
+}
+
+void normalize_array(float *a, int n)
+{
+ int i;
+ float mu = mean_array(a,n);
+ float sigma = sqrt(variance_array(a,n));
+ for(i = 0; i < n; ++i){
+ a[i] = (a[i] - mu)/sigma;
+ }
+ mu = mean_array(a,n);
+ sigma = sqrt(variance_array(a,n));
+}
+
+void translate_array(float *a, int n, float s)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ a[i] += s;
+ }
+}
+
+float mag_array(float *a, int n)
+{
+ int i;
+ float sum = 0;
+ for(i = 0; i < n; ++i){
+ sum += a[i]*a[i];
+ }
+ return sqrt(sum);
+}
+
+void scale_array(float *a, int n, float s)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ a[i] *= s;
+ }
+}
+
+int sample_array(float *a, int n)
+{
+ float sum = sum_array(a, n);
+ scale_array(a, n, 1./sum);
+ float r = rand_uniform(0, 1);
+ int i;
+ for(i = 0; i < n; ++i){
+ r = r - a[i];
+ if (r <= 0) return i;
+ }
+ return n-1;
+}
+
+int max_int_index(int *a, int n)
+{
+ if(n <= 0) return -1;
+ int i, max_i = 0;
+ int max = a[0];
+ for(i = 1; i < n; ++i){
+ if(a[i] > max){
+ max = a[i];
+ max_i = i;
+ }
+ }
+ return max_i;
+}
+
+int max_index(float *a, int n)
+{
+ if(n <= 0) return -1;
+ int i, max_i = 0;
+ float max = a[0];
+ for(i = 1; i < n; ++i){
+ if(a[i] > max){
+ max = a[i];
+ max_i = i;
+ }
+ }
+ return max_i;
+}
+
+int int_index(int *a, int val, int n)
+{
+ int i;
+ for(i = 0; i < n; ++i){
+ if(a[i] == val) return i;
+ }
+ return -1;
+}
+
+int rand_int(int min, int max)
+{
+ if (max < min){
+ int s = min;
+ min = max;
+ max = s;
+ }
+ int r = (rand()%(max - min + 1)) + min;
+ return r;
+}
+
+// From http://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
+float rand_normal()
+{
+ static int haveSpare = 0;
+ static double rand1, rand2;
+
+ if(haveSpare)
+ {
+ haveSpare = 0;
+ return sqrt(rand1) * sin(rand2);
+ }
+
+ haveSpare = 1;
+
+ rand1 = rand() / ((double) RAND_MAX);
+ if(rand1 < 1e-100) rand1 = 1e-100;
+ rand1 = -2 * log(rand1);
+ rand2 = (rand() / ((double) RAND_MAX)) * TWO_PI;
+
+ return sqrt(rand1) * cos(rand2);
+}
+
+/*
+ float rand_normal()
+ {
+ int n = 12;
+ int i;
+ float sum= 0;
+ for(i = 0; i < n; ++i) sum += (float)rand()/RAND_MAX;
+ return sum-n/2.;
+ }
+ */
+
+size_t rand_size_t()
+{
+ return ((size_t)(rand()&0xff) << 56) |
+ ((size_t)(rand()&0xff) << 48) |
+ ((size_t)(rand()&0xff) << 40) |
+ ((size_t)(rand()&0xff) << 32) |
+ ((size_t)(rand()&0xff) << 24) |
+ ((size_t)(rand()&0xff) << 16) |
+ ((size_t)(rand()&0xff) << 8) |
+ ((size_t)(rand()&0xff) << 0);
+}
+
+float rand_uniform(float min, float max)
+{
+ if(max < min){
+ float swap = min;
+ min = max;
+ max = swap;
+ }
+ return ((float)rand()/RAND_MAX * (max - min)) + min;
+}
+
+float rand_scale(float s)
+{
+ float scale = rand_uniform(1, s);
+ if(rand()%2) return scale;
+ return 1./scale;
+}
+
+float **one_hot_encode(float *a, int n, int k)
+{
+ int i;
+ float **t = calloc(n, sizeof(float*));
+ for(i = 0; i < n; ++i){
+ t[i] = calloc(k, sizeof(float));
+ int index = (int)a[i];
+ t[i][index] = 1;
+ }
+ return t;
+}
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.h
new file mode 100644
index 00000000000..ef24da79888
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/utils.h
@@ -0,0 +1,53 @@
+#ifndef UTILS_H
+#define UTILS_H
+#include
+#include
+#include "darknet.h"
+#include "list.h"
+
+#define TIME(a) \
+ do { \
+ double start = what_time_is_it_now(); \
+ a; \
+ printf("%s took: %f seconds\n", #a, what_time_is_it_now() - start); \
+ } while (0)
+
+#define TWO_PI 6.2831853071795864769252866f
+
+double what_time_is_it_now();
+void shuffle(void *arr, size_t n, size_t size);
+void sorta_shuffle(void *arr, size_t n, size_t size, size_t sections);
+void free_ptrs(void **ptrs, int n);
+int alphanum_to_int(char c);
+char int_to_alphanum(int i);
+int read_int(int fd);
+void write_int(int fd, int n);
+void read_all(int fd, char *buffer, size_t bytes);
+void write_all(int fd, char *buffer, size_t bytes);
+int read_all_fail(int fd, char *buffer, size_t bytes);
+int write_all_fail(int fd, char *buffer, size_t bytes);
+void find_replace(char *str, char *orig, char *rep, char *output);
+void malloc_error();
+void file_error(char *s);
+void strip(char *s);
+void strip_char(char *s, char bad);
+list *split_str(char *s, char delim);
+char *fgetl(FILE *fp);
+list *parse_csv_line(char *line);
+char *copy_string(char *s);
+int count_fields(char *line);
+float *parse_fields(char *line, int n);
+void translate_array(float *a, int n, float s);
+float constrain(float min, float max, float a);
+int constrain_int(int a, int min, int max);
+float rand_scale(float s);
+int rand_int(int min, int max);
+void mean_arrays(float **a, int n, int els, float *avg);
+float dist_array(float *a, float *b, int n, int sub);
+float **one_hot_encode(float *a, int n, int k);
+float sec(clock_t clocks);
+void print_statistics(float *a, int n);
+int int_index(int *a, int val, int n);
+
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.c b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.c
new file mode 100644
index 00000000000..af714d46a3d
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.c
@@ -0,0 +1,374 @@
+#include "yolo_layer.h"
+#include "activations.h"
+#include "blas.h"
+#include "box.h"
+#include "cuda.h"
+#include "utils.h"
+
+#include
+#include
+#include
+#include
+
+layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
+{
+ int i;
+ layer l = {0};
+ l.type = YOLO;
+
+ l.n = n;
+ l.total = total;
+ l.batch = batch;
+ l.h = h;
+ l.w = w;
+ l.c = n*(classes + 4 + 1);
+ l.out_w = l.w;
+ l.out_h = l.h;
+ l.out_c = l.c;
+ l.classes = classes;
+ l.cost = calloc(1, sizeof(float));
+ l.biases = calloc(total*2, sizeof(float));
+ if(mask) l.mask = mask;
+ else{
+ l.mask = calloc(n, sizeof(int));
+ for(i = 0; i < n; ++i){
+ l.mask[i] = i;
+ }
+ }
+ l.bias_updates = calloc(n*2, sizeof(float));
+ l.outputs = h*w*n*(classes + 4 + 1);
+ l.inputs = l.outputs;
+ l.truths = 90*(4 + 1);
+ l.delta = calloc(batch*l.outputs, sizeof(float));
+ l.output = calloc(batch*l.outputs, sizeof(float));
+ for(i = 0; i < total*2; ++i){
+ l.biases[i] = .5;
+ }
+
+ l.forward = forward_yolo_layer;
+ l.backward = backward_yolo_layer;
+#ifdef GPU
+ l.forward_gpu = forward_yolo_layer_gpu;
+ l.backward_gpu = backward_yolo_layer_gpu;
+ l.output_gpu = cuda_make_array(l.output, batch*l.outputs);
+ l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs);
+#endif
+
+ fprintf(stderr, "detection\n");
+ srand(0);
+
+ return l;
+}
+
+void resize_yolo_layer(layer *l, int w, int h)
+{
+ l->w = w;
+ l->h = h;
+
+ l->outputs = h*w*l->n*(l->classes + 4 + 1);
+ l->inputs = l->outputs;
+
+ l->output = realloc(l->output, l->batch*l->outputs*sizeof(float));
+ l->delta = realloc(l->delta, l->batch*l->outputs*sizeof(float));
+
+#ifdef GPU
+ cuda_free(l->delta_gpu);
+ cuda_free(l->output_gpu);
+
+ l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs);
+ l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs);
+#endif
+}
+
+box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
+{
+ box b;
+ b.x = (i + x[index + 0*stride]) / lw;
+ b.y = (j + x[index + 1*stride]) / lh;
+ b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
+ b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
+ return b;
+}
+
+float delta_yolo_box(box truth, float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, float *delta, float scale, int stride)
+{
+ box pred = get_yolo_box(x, biases, n, index, i, j, lw, lh, w, h, stride);
+ float iou = box_iou(pred, truth);
+
+ float tx = (truth.x*lw - i);
+ float ty = (truth.y*lh - j);
+ float tw = log(truth.w*w / biases[2*n]);
+ float th = log(truth.h*h / biases[2*n + 1]);
+
+ delta[index + 0*stride] = scale * (tx - x[index + 0*stride]);
+ delta[index + 1*stride] = scale * (ty - x[index + 1*stride]);
+ delta[index + 2*stride] = scale * (tw - x[index + 2*stride]);
+ delta[index + 3*stride] = scale * (th - x[index + 3*stride]);
+ return iou;
+}
+
+
+void delta_yolo_class(float *output, float *delta, int index, int class, int classes, int stride, float *avg_cat)
+{
+ int n;
+ if (delta[index]){
+ delta[index + stride*class] = 1 - output[index + stride*class];
+ if(avg_cat) *avg_cat += output[index + stride*class];
+ return;
+ }
+ for(n = 0; n < classes; ++n){
+ delta[index + stride*n] = ((n == class)?1 : 0) - output[index + stride*n];
+ if(n == class && avg_cat) *avg_cat += output[index + stride*n];
+ }
+}
+
+static int entry_index(layer l, int batch, int location, int entry)
+{
+ int n = location / (l.w*l.h);
+ int loc = location % (l.w*l.h);
+ return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
+}
+
+void forward_yolo_layer(const layer l, network net)
+{
+ int i,j,b,t,n;
+ memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
+
+#ifndef GPU
+ for (b = 0; b < l.batch; ++b){
+ for(n = 0; n < l.n; ++n){
+ int index = entry_index(l, b, n*l.w*l.h, 0);
+ activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
+ index = entry_index(l, b, n*l.w*l.h, 4);
+ activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
+ }
+ }
+#endif
+
+ memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
+ if(!net.train) return;
+ float avg_iou = 0;
+ float recall = 0;
+ float recall75 = 0;
+ float avg_cat = 0;
+ float avg_obj = 0;
+ float avg_anyobj = 0;
+ int count = 0;
+ int class_count = 0;
+ *(l.cost) = 0;
+ for (b = 0; b < l.batch; ++b) {
+ for (j = 0; j < l.h; ++j) {
+ for (i = 0; i < l.w; ++i) {
+ for (n = 0; n < l.n; ++n) {
+ int box_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 0);
+ box pred = get_yolo_box(l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, net.w, net.h, l.w*l.h);
+ float best_iou = 0;
+ int best_t = 0;
+ for(t = 0; t < l.max_boxes; ++t){
+ box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
+ if(!truth.x) break;
+ float iou = box_iou(pred, truth);
+ if (iou > best_iou) {
+ best_iou = iou;
+ best_t = t;
+ }
+ }
+ int obj_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4);
+ avg_anyobj += l.output[obj_index];
+ l.delta[obj_index] = 0 - l.output[obj_index];
+ if (best_iou > l.ignore_thresh) {
+ l.delta[obj_index] = 0;
+ }
+ if (best_iou > l.truth_thresh) {
+ l.delta[obj_index] = 1 - l.output[obj_index];
+
+ int class = net.truth[best_t*(4 + 1) + b*l.truths + 4];
+ if (l.map) class = l.map[class];
+ int class_index = entry_index(l, b, n*l.w*l.h + j*l.w + i, 4 + 1);
+ delta_yolo_class(l.output, l.delta, class_index, class, l.classes, l.w*l.h, 0);
+ box truth = float_to_box(net.truth + best_t*(4 + 1) + b*l.truths, 1);
+ delta_yolo_box(truth, l.output, l.biases, l.mask[n], box_index, i, j, l.w, l.h, net.w, net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
+ }
+ }
+ }
+ }
+ for(t = 0; t < l.max_boxes; ++t){
+ box truth = float_to_box(net.truth + t*(4 + 1) + b*l.truths, 1);
+
+ if(!truth.x) break;
+ float best_iou = 0;
+ int best_n = 0;
+ i = (truth.x * l.w);
+ j = (truth.y * l.h);
+ box truth_shift = truth;
+ truth_shift.x = truth_shift.y = 0;
+ for(n = 0; n < l.total; ++n){
+ box pred = {0};
+ pred.w = l.biases[2*n]/net.w;
+ pred.h = l.biases[2*n+1]/net.h;
+ float iou = box_iou(pred, truth_shift);
+ if (iou > best_iou){
+ best_iou = iou;
+ best_n = n;
+ }
+ }
+
+ int mask_n = int_index(l.mask, best_n, l.n);
+ if(mask_n >= 0){
+ int box_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 0);
+ float iou = delta_yolo_box(truth, l.output, l.biases, best_n, box_index, i, j, l.w, l.h, net.w, net.h, l.delta, (2-truth.w*truth.h), l.w*l.h);
+
+ int obj_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4);
+ avg_obj += l.output[obj_index];
+ l.delta[obj_index] = 1 - l.output[obj_index];
+
+ int class = net.truth[t*(4 + 1) + b*l.truths + 4];
+ if (l.map) class = l.map[class];
+ int class_index = entry_index(l, b, mask_n*l.w*l.h + j*l.w + i, 4 + 1);
+ delta_yolo_class(l.output, l.delta, class_index, class, l.classes, l.w*l.h, &avg_cat);
+
+ ++count;
+ ++class_count;
+ if(iou > .5) recall += 1;
+ if(iou > .75) recall75 += 1;
+ avg_iou += iou;
+ }
+ }
+ }
+ *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2);
+ printf("Region %d Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, .5R: %f, .75R: %f, count: %d\n", net.index, avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, recall75/count, count);
+}
+
+void backward_yolo_layer(const layer l, network net)
+{
+ axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, net.delta, 1);
+}
+
+void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
+{
+ int i;
+ int new_w=0;
+ int new_h=0;
+ if (((float)netw/w) < ((float)neth/h)) {
+ new_w = netw;
+ new_h = (h * netw)/w;
+ } else {
+ new_h = neth;
+ new_w = (w * neth)/h;
+ }
+ for (i = 0; i < n; ++i){
+ box b = dets[i].bbox;
+ b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
+ b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
+ b.w *= (float)netw/new_w;
+ b.h *= (float)neth/new_h;
+ if(!relative){
+ b.x *= w;
+ b.w *= w;
+ b.y *= h;
+ b.h *= h;
+ }
+ dets[i].bbox = b;
+ }
+}
+
+int yolo_num_detections(layer l, float thresh)
+{
+ int i, n;
+ int count = 0;
+ for (i = 0; i < l.w*l.h; ++i){
+ for(n = 0; n < l.n; ++n){
+ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
+ if(l.output[obj_index] > thresh){
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+void avg_flipped_yolo(layer l)
+{
+ int i,j,n,z;
+ float *flip = l.output + l.outputs;
+ for (j = 0; j < l.h; ++j) {
+ for (i = 0; i < l.w/2; ++i) {
+ for (n = 0; n < l.n; ++n) {
+ for(z = 0; z < l.classes + 4 + 1; ++z){
+ int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
+ int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
+ float swap = flip[i1];
+ flip[i1] = flip[i2];
+ flip[i2] = swap;
+ if(z == 0){
+ flip[i1] = -flip[i1];
+ flip[i2] = -flip[i2];
+ }
+ }
+ }
+ }
+ }
+ for(i = 0; i < l.outputs; ++i){
+ l.output[i] = (l.output[i] + flip[i])/2.;
+ }
+}
+
+int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
+{
+ int i,j,n;
+ float *predictions = l.output;
+ if (l.batch == 2) avg_flipped_yolo(l);
+ int count = 0;
+ for (i = 0; i < l.w*l.h; ++i){
+ int row = i / l.w;
+ int col = i % l.w;
+ for(n = 0; n < l.n; ++n){
+ int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
+ float objectness = predictions[obj_index];
+ if(objectness <= thresh) continue;
+ int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
+ dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
+ dets[count].objectness = objectness;
+ dets[count].classes = l.classes;
+ for(j = 0; j < l.classes; ++j){
+ int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
+ float prob = objectness*predictions[class_index];
+ dets[count].prob[j] = (prob > thresh) ? prob : 0;
+ }
+ ++count;
+ }
+ }
+ correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
+ return count;
+}
+
+#ifdef GPU
+
+void forward_yolo_layer_gpu(const layer l, network net)
+{
+ copy_gpu(l.batch*l.inputs, net.input_gpu, 1, l.output_gpu, 1);
+ int b, n;
+ for (b = 0; b < l.batch; ++b){
+ for(n = 0; n < l.n; ++n){
+ int index = entry_index(l, b, n*l.w*l.h, 0);
+ activate_array_gpu(l.output_gpu + index, 2*l.w*l.h, LOGISTIC);
+ index = entry_index(l, b, n*l.w*l.h, 4);
+ activate_array_gpu(l.output_gpu + index, (1+l.classes)*l.w*l.h, LOGISTIC);
+ }
+ }
+ if(!net.train || l.onlyforward){
+ cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs);
+ return;
+ }
+
+ cuda_pull_array(l.output_gpu, net.input, l.batch*l.inputs);
+ forward_yolo_layer(l, net);
+ cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs);
+}
+
+void backward_yolo_layer_gpu(const layer l, network net)
+{
+ axpy_gpu(l.batch*l.inputs, 1, l.delta_gpu, 1, net.delta_gpu, 1);
+}
+#endif
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.h b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.h
new file mode 100644
index 00000000000..d2a02432681
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/darknet/src/yolo_layer.h
@@ -0,0 +1,19 @@
+#ifndef YOLO_LAYER_H
+#define YOLO_LAYER_H
+
+#include "darknet.h"
+#include "layer.h"
+#include "network.h"
+
+layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes);
+void forward_yolo_layer(const layer l, network net);
+void backward_yolo_layer(const layer l, network net);
+void resize_yolo_layer(layer *l, int w, int h);
+int yolo_num_detections(layer l, float thresh);
+
+#ifdef GPU
+void forward_yolo_layer_gpu(const layer l, network net);
+void backward_yolo_layer_gpu(layer l, network net);
+#endif
+
+#endif
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/launch/yolo3.launch b/ros/src/computing/perception/detection/packages/yolo3_detector/launch/yolo3.launch
new file mode 100644
index 00000000000..3d9ce713cc8
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/launch/yolo3.launch
@@ -0,0 +1,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/package.xml b/ros/src/computing/perception/detection/packages/yolo3_detector/package.xml
new file mode 100644
index 00000000000..57917d9aa7f
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/package.xml
@@ -0,0 +1,25 @@
+
+
+ yolo3_detector
+ 1.6.3
+ Yolo3 image detector
+ Abraham Monrroy
+ BSD
+ catkin
+
+ sensor_msgs
+ std_msgs
+ autoware_msgs
+ cv_bridge
+ image_transport
+ roscpp
+
+ sensor_msgs
+ std_msgs
+ autoware_msgs
+ cv_bridge
+ image_transport
+ roscpp
+
+
+
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/src/rect_class_score.h b/ros/src/computing/perception/detection/packages/yolo3_detector/src/rect_class_score.h
new file mode 100644
index 00000000000..f3cc2bdada1
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/src/rect_class_score.h
@@ -0,0 +1,24 @@
+#ifndef RECTCLASSSCORE_H_
+#define RECTCLASSSCORE_H_
+
+#include
+#include
+
+template class RectClassScore
+{
+public:
+ _Tp x, y, w, h;
+ _Tp score;
+ unsigned int class_type;
+ bool enabled;
+
+ inline std::string toString()
+ {
+ std::ostringstream out;
+ out << class_type << "(x:" << x << ", y:" << y << ", w:" << w << ", h:" << h << ") =" << score;
+ return out.str();
+ }
+};
+
+
+#endif /* RECTCLASSSCORE_H_ */
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.cpp b/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.cpp
new file mode 100644
index 00000000000..9d921506d38
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2018, Nagoya University
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Autoware nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ********************
+ * v1.0: amc-nu (abrahammonrroy@yahoo.com)
+ *
+ * yolo3_node.cpp
+ *
+ * Created on: April 4th, 2018
+ */
+#include "yolo3_detector.h"
+
+namespace darknet
+{
+ uint32_t Yolo3Detector::get_network_height()
+ {
+ return darknet_network_->h;
+ }
+ uint32_t Yolo3Detector::get_network_width()
+ {
+ return darknet_network_->w;
+ }
+ void Yolo3Detector::load(std::string& in_model_file, std::string& in_trained_file, double in_min_confidence, double in_nms_threshold)
+ {
+ min_confidence_ = in_min_confidence;
+ nms_threshold_ = in_nms_threshold;
+ darknet_network_ = parse_network_cfg(&in_model_file[0]);
+ load_weights(darknet_network_, &in_trained_file[0]);
+ set_batch_network(darknet_network_, 1);
+
+ layer output_layer = darknet_network_->layers[darknet_network_->n - 1];
+ darknet_boxes_.resize(output_layer.w * output_layer.h * output_layer.n);
+ }
+
+ Yolo3Detector::~Yolo3Detector()
+ {
+ free_network(darknet_network_);
+ }
+
+ std::vector< RectClassScore > Yolo3Detector::detect(image& in_darknet_image)
+ {
+ return forward(in_darknet_image);
+ }
+
+ image Yolo3Detector::convert_image(const sensor_msgs::ImageConstPtr& msg)
+ {
+ if (msg->encoding != sensor_msgs::image_encodings::BGR8)
+ {
+ ROS_ERROR("Unsupported encoding");
+ exit(-1);
+ }
+
+ auto data = msg->data;
+ uint32_t height = msg->height, width = msg->width, offset = msg->step - 3 * width;
+ uint32_t i = 0, j = 0;
+ image im = make_image(width, height, 3);
+
+ for (uint32_t line = height; line; line--)
+ {
+ for (uint32_t column = width; column; column--)
+ {
+ for (uint32_t channel = 0; channel < 3; channel++)
+ im.data[i + width * height * channel] = data[j++] / 255.;
+ i++;
+ }
+ j += offset;
+ }
+
+ if (darknet_network_->w == (int) width && darknet_network_->h == (int) height)
+ {
+ return im;
+ }
+ image resized = resize_image(im, darknet_network_->w, darknet_network_->h);
+ free_image(im);
+ return resized;
+ }
+
+ std::vector< RectClassScore > Yolo3Detector::forward(image& in_darknet_image)
+ {
+ float * in_data = in_darknet_image.data;
+ float *prediction = network_predict(darknet_network_, in_data);
+ layer output_layer = darknet_network_->layers[darknet_network_->n - 1];
+
+ output_layer.output = prediction;
+ int nboxes = 0;
+ int num_classes = output_layer.classes;
+ detection *darknet_detections = get_network_boxes(darknet_network_, darknet_network_->w, darknet_network_->h, min_confidence_, .5, NULL, 0, &nboxes);
+
+ do_nms_sort(darknet_detections, nboxes, num_classes, nms_threshold_);
+
+ std::vector< RectClassScore > detections;
+
+ for (int i = 0; i < nboxes; i++)
+ {
+ int class_id = -1;
+ float score = 0.f;
+ //find the class
+ for(int j = 0; j < num_classes; ++j){
+ if (darknet_detections[i].prob[j] >= min_confidence_){
+ if (class_id < 0) {
+ class_id = j;
+ score = darknet_detections[i].prob[j];
+ }
+ }
+ }
+ //if class found
+ if (class_id >= 0)
+ {
+ RectClassScore detection;
+
+ detection.x = darknet_detections[i].bbox.x - darknet_detections[i].bbox.w/2;
+ detection.y = darknet_detections[i].bbox.y - darknet_detections[i].bbox.h/2;
+ detection.w = darknet_detections[i].bbox.w;
+ detection.h = darknet_detections[i].bbox.h;
+ detection.score = score;
+ detection.class_type = class_id;
+ //std::cout << detection.toString() << std::endl;
+
+ detections.push_back(detection);
+ }
+ }
+ //std::cout << std::endl;
+ return detections;
+ }
+} // namespace darknet
+
+///////////////////
+
+void Yolo3DetectorNode::convert_rect_to_image_obj(std::vector< RectClassScore >& in_objects, autoware_msgs::image_obj& out_message, std::string in_class)
+{
+ for (unsigned int i = 0; i < in_objects.size(); ++i)
+ {
+ if ( (in_class == "car"
+ && (in_objects[i].class_type == Yolo3::CAR
+ || in_objects[i].class_type == Yolo3::BUS
+ || in_objects[i].class_type == Yolo3::TRUCK
+ || in_objects[i].class_type == Yolo3::MOTORBIKE
+ )
+ ) || (in_class == "person"
+ && (in_objects[i].class_type == Yolo3::PERSON
+ || in_objects[i].class_type == Yolo3::BICYCLE
+ || in_objects[i].class_type == Yolo3::DOG
+ || in_objects[i].class_type == Yolo3::CAT
+ || in_objects[i].class_type == Yolo3::HORSE
+ )
+ )
+ )
+ {
+ autoware_msgs::image_rect rect;
+
+ rect.x = (in_objects[i].x /image_ratio_) - image_left_right_border_/image_ratio_;
+ rect.y = (in_objects[i].y /image_ratio_) - image_top_bottom_border_/image_ratio_;
+ rect.width = in_objects[i].w /image_ratio_;
+ rect.height = in_objects[i].h /image_ratio_;
+ if (in_objects[i].x < 0)
+ rect.x = 0;
+ if (in_objects[i].y < 0)
+ rect.y = 0;
+ if (in_objects[i].w < 0)
+ rect.width = 0;
+ if (in_objects[i].h < 0)
+ rect.height = 0;
+
+ rect.score = in_objects[i].score;
+
+ //std::cout << "x "<< rect.x<< " y " << rect.y << " w "<< rect.width << " h "<< rect.height<< " s " << rect.score << " c " << in_objects[i].class_type << std::endl;
+
+ out_message.obj.push_back(rect);
+
+ }
+ }
+}
+
+void Yolo3DetectorNode::rgbgr_image(image& im)
+{
+ int i;
+ for(i = 0; i < im.w*im.h; ++i)
+ {
+ float swap = im.data[i];
+ im.data[i] = im.data[i+im.w*im.h*2];
+ im.data[i+im.w*im.h*2] = swap;
+ }
+}
+
+image Yolo3DetectorNode::convert_ipl_to_image(const sensor_msgs::ImageConstPtr& msg)
+{
+ cv_bridge::CvImagePtr cv_image = cv_bridge::toCvCopy(msg, "bgr8");//toCvCopy(image_source, sensor_msgs::image_encodings::BGR8);
+ cv::Mat mat_image = cv_image->image;
+
+ uint32_t network_input_width = yolo_detector_.get_network_width();
+ uint32_t network_input_height = yolo_detector_.get_network_height();
+
+ uint32_t image_height = msg->height,
+ image_width = msg->width;
+
+ IplImage ipl_image;
+ cv::Mat final_mat;
+
+ if (network_input_width!=image_width
+ || network_input_height != image_height)
+ {
+ //final_mat = cv::Mat(network_input_width, network_input_height, CV_8UC3, cv::Scalar(0,0,0));
+ image_ratio_ = (double ) network_input_width / (double)mat_image.cols;
+
+ cv::resize(mat_image, final_mat, cv::Size(), image_ratio_, image_ratio_);
+ image_top_bottom_border_ = abs(final_mat.rows-network_input_height)/2;
+ image_left_right_border_ = abs(final_mat.cols-network_input_width)/2;
+ cv::copyMakeBorder(final_mat, final_mat,
+ image_top_bottom_border_, image_top_bottom_border_,
+ image_left_right_border_, image_left_right_border_,
+ cv::BORDER_CONSTANT, cv::Scalar(0,0,0));
+
+ }
+ else
+ final_mat = mat_image;
+
+ ipl_image = final_mat;
+
+ unsigned char *data = (unsigned char *)ipl_image.imageData;
+ int h = ipl_image.height;
+ int w = ipl_image.width;
+ int c = ipl_image.nChannels;
+ int step = ipl_image.widthStep;
+ int i, j, k;
+
+ image darknet_image = make_image(w, h, c);
+
+ for(i = 0; i < h; ++i){
+ for(k= 0; k < c; ++k){
+ for(j = 0; j < w; ++j){
+ darknet_image.data[k*w*h + i*w + j] = data[i*step + j*c + k]/255.;
+ }
+ }
+ }
+ rgbgr_image(darknet_image);
+ return darknet_image;
+}
+
+void Yolo3DetectorNode::image_callback(const sensor_msgs::ImageConstPtr& in_image_message)
+{
+ std::vector< RectClassScore > detections;
+
+ darknet_image_ = convert_ipl_to_image(in_image_message);
+
+ detections = yolo_detector_.detect(darknet_image_);
+
+ //Prepare Output message
+ autoware_msgs::image_obj output_car_message;
+ autoware_msgs::image_obj output_person_message;
+ output_car_message.header = in_image_message->header;
+ output_car_message.type = "car";
+
+ output_person_message.header = in_image_message->header;
+ output_person_message.type = "person";
+
+ convert_rect_to_image_obj(detections, output_car_message, "car");
+ convert_rect_to_image_obj(detections, output_person_message, "person");
+
+ publisher_car_objects_.publish(output_car_message);
+ publisher_person_objects_.publish(output_person_message);
+
+ free(darknet_image_.data);
+}
+
+void Yolo3DetectorNode::config_cb(const autoware_msgs::ConfigSsd::ConstPtr& param)
+{
+ score_threshold_ = param->score_threshold;
+}
+
+
+void Yolo3DetectorNode::Run()
+{
+ //ROS STUFF
+ ros::NodeHandle private_node_handle("~");//to receive args
+
+ //RECEIVE IMAGE TOPIC NAME
+ std::string image_raw_topic_str;
+ if (private_node_handle.getParam("image_raw_node", image_raw_topic_str))
+ {
+ ROS_INFO("Setting image node to %s", image_raw_topic_str.c_str());
+ }
+ else
+ {
+ ROS_INFO("No image node received, defaulting to /image_raw, you can use _image_raw_node:=YOUR_TOPIC");
+ image_raw_topic_str = "/image_raw";
+ }
+
+ std::string network_definition_file;
+ std::string pretrained_model_file;
+ if (private_node_handle.getParam("network_definition_file", network_definition_file))
+ {
+ ROS_INFO("Network Definition File (Config): %s", network_definition_file.c_str());
+ }
+ else
+ {
+ ROS_INFO("No Network Definition File was received. Finishing execution.");
+ return;
+ }
+ if (private_node_handle.getParam("pretrained_model_file", pretrained_model_file))
+ {
+ ROS_INFO("Pretrained Model File (Weights): %s", pretrained_model_file.c_str());
+ }
+ else
+ {
+ ROS_INFO("No Pretrained Model File was received. Finishing execution.");
+ return;
+ }
+
+ private_node_handle.param("score_threshold", score_threshold_, 0.5);
+ ROS_INFO("[%s] score_threshold: %f",__APP_NAME__, score_threshold_);
+
+ private_node_handle.param("nms_threshold", nms_threshold_, 0.45);
+ ROS_INFO("[%s] nms_threshold: %f",__APP_NAME__, nms_threshold_);
+
+
+ ROS_INFO("Initializing Yolo3 on Darknet...");
+ yolo_detector_.load(network_definition_file, pretrained_model_file, score_threshold_, nms_threshold_);
+ ROS_INFO("Initialization complete.");
+
+ publisher_car_objects_ = node_handle_.advertise("/obj_car/image_obj", 1);
+ publisher_person_objects_ = node_handle_.advertise("/obj_person/image_obj", 1);
+
+ ROS_INFO("Subscribing to... %s", image_raw_topic_str.c_str());
+ subscriber_image_raw_ = node_handle_.subscribe(image_raw_topic_str, 1, &Yolo3DetectorNode::image_callback, this);
+
+ std::string config_topic("/config");
+ config_topic += "/Yolo3";
+ subscriber_yolo_config_ = node_handle_.subscribe(config_topic, 1, &Yolo3DetectorNode::config_cb, this);
+
+ ROS_INFO_STREAM( __APP_NAME__ << "" );
+
+ ros::spin();
+ ROS_INFO("END Yolo3");
+
+}
diff --git a/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.h b/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.h
new file mode 100644
index 00000000000..8420591819d
--- /dev/null
+++ b/ros/src/computing/perception/detection/packages/yolo3_detector/src/yolo3_detector.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018, Nagoya University
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Autoware nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ********************
+ * v1.0: amc-nu (abrahammonrroy@yahoo.com)
+ *
+ * yolo3_node.cpp
+ *
+ * Created on: April 4th, 2018
+ */
+#ifndef DARKNET_YOLO3_H
+#define DARKNET_YOLO3_H
+
+#define __APP_NAME__ "yolo3_detector"
+
+#include
+#include
+#include