Skip to content

Commit

Permalink
Merge pull request #95 from sony/fix/20180914-misspelling
Browse files Browse the repository at this point in the history
Fix misspell
  • Loading branch information
TakuyaNarihira authored Sep 21, 2018
2 parents d8fc000 + a281f44 commit 7473704
Show file tree
Hide file tree
Showing 38 changed files with 74 additions and 74 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ if(BUILD_CPP_LIB)

if (NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE "Release" CACHE STRING
"Build type release is default on single-configuration build system like GnuMake."
"Build type release is default on single-configuration build system like GNU make."
FORCE)
endif()

Expand Down Expand Up @@ -192,7 +192,7 @@ if(BUILD_CPP_LIB)
message(FATAL_ERROR
"Python build_ext compiler inference is only supported on Win, Unix or Apple.")
endif()
message("Python build_ext compiler is infered as '${NBLA_PYTHON_BUILD_EXT_COMPILER}'.")
message("Python build_ext compiler is inferred as '${NBLA_PYTHON_BUILD_EXT_COMPILER}'.")
message("You can specify a compiler manually setting a variable"
" NBLA_PYTHON_BUILD_EXT_COMPILER. You can see a list of supported"
" compiler by `python setup.py build_ext --help-compiler`.")
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ CUDA extension follows [the instruction found in NNabla](https://github.com/sony
## Development Guide

* [Adding a new function (layer implementation)](doc/contributing/add_function.md).
* [Adding a new solver (gradient descent algorighm implemenation)](doc/contributing/add_solver.md).
* [Adding a new solver (gradient descent algorithm implementation)](doc/contributing/add_solver.md).
10 changes: 5 additions & 5 deletions build-tools/code_generator/generate.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def generate():
base, 'python/src/nnabla_ext/cudnn/_version.py.tmpl'),
rootdir=base)

# Generate function skeltons
# Generate function skeletons
func_src_template = join(
base,
'src/nbla/cuda/function/generic/function_impl.cu.tmpl')
Expand All @@ -91,16 +91,16 @@ def generate():
func_header_template_cudnn = join(
base,
'include/nbla/cuda/cudnn/function/function_impl.hpp.tmpl')
utils.generate_skelton_function_impl(
utils.generate_skeleton_function_impl(
function_info, function_types, ext_info={},
template=func_src_template, output_format='%s.cu')
utils.generate_skelton_function_impl(
utils.generate_skeleton_function_impl(
function_info, function_types, ext_info={},
template=func_header_template, output_format='%s.hpp')
utils.generate_skelton_function_impl(
utils.generate_skeleton_function_impl(
function_info, function_types_cudnn, ext_info={},
template=func_src_template_cudnn, output_format='%s.cu')
utils.generate_skelton_function_impl(
utils.generate_skeleton_function_impl(
function_info, function_types_cudnn, ext_info={},
template=func_header_template_cudnn, output_format='%s.hpp')

Expand Down
12 changes: 6 additions & 6 deletions build-tools/make/build.mk
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ nnabla-ext-cuda-clean-all:

########################################################################################################################
# cpplib
.PHNOY: nnabla-ext-cuda-cpplib
.PHONY: nnabla-ext-cuda-cpplib
nnabla-ext-cuda-cpplib:
mkdir -p $(BUILD_EXT_CUDA_DIRECTORY_CPPLIB)
cd $(BUILD_EXT_CUDA_DIRECTORY_CPPLIB) \
Expand All @@ -57,7 +57,7 @@ nnabla-ext-cuda-cpplib:
$(NNABLA_EXT_CUDA_DIRECTORY)
$(MAKE) -C $(BUILD_EXT_CUDA_DIRECTORY_CPPLIB) -j$(PARALLEL_BUILD_NUM)

.PHNOY: nnabla-ext-cuda-cpplib-multi-gpu
.PHONY: nnabla-ext-cuda-cpplib-multi-gpu
nnabla-ext-cuda-cpplib-multi-gpu:
mkdir -p $(BUILD_EXT_CUDA_DIRECTORY_CPPLIB_MULTI_GPU)
cd $(BUILD_EXT_CUDA_DIRECTORY_CPPLIB_MULTI_GPU) \
Expand All @@ -77,15 +77,15 @@ nnabla-ext-cuda-cpplib-multi-gpu:

########################################################################################################################
# wheel
.PHNOY: nnabla-ext-cuda-wheel
.PHONY: nnabla-ext-cuda-wheel
nnabla-ext-cuda-wheel:
$(call with-virtualenv, \
$(NNABLA_EXT_CUDA_DIRECTORY), \
$(BUILD_EXT_CUDA_DIRECTORY_WHEEL)/env, \
-f build-tools/make/build.mk, \
nnabla-ext-cuda-wheel-local)

.PHNOY: nnabla-ext-cuda-wheel-local
.PHONY: nnabla-ext-cuda-wheel-local
nnabla-ext-cuda-wheel-local: nnabla-install \
$(BUILD_DIRECTORY_CPPLIB)/lib/libnnabla.so \
$(BUILD_EXT_CUDA_DIRECTORY_CPPLIB)/lib/libnnabla_cuda.so
Expand All @@ -105,7 +105,7 @@ nnabla-ext-cuda-wheel-local: nnabla-install \
$(NNABLA_EXT_CUDA_DIRECTORY) \
&& $(MAKE) -C $(BUILD_EXT_CUDA_DIRECTORY_WHEEL) wheel

.PHNOY: nnabla-ext-cuda-wheel-multi-gpu
.PHONY: nnabla-ext-cuda-wheel-multi-gpu
nnabla-ext-cuda-wheel-multi-gpu: \
nnabla-cpplib \
nnabla-wheel \
Expand Down Expand Up @@ -161,7 +161,7 @@ nnabla-ext-cuda-test-local: nnabla-install nnabla-ext-cuda-install
&& PYTHONPATH=$(NNABLA_EXT_CUDA_DIRECTORY)/python/test \
python -m pytest $(NNABLA_DIRECTORY)/python/test

.PHNOY: nnabla-ext-cuda-multi-gpu-test-local
.PHONY: nnabla-ext-cuda-multi-gpu-test-local
nnabla-ext-cuda-multi-gpu-test-local: nnabla-ext-cuda-multi-gpu-install
cd $(BUILD_EXT_CUDA_DIRECTORY_WHEEL_MULTI_GPU) \
&& PYTHONPATH=$(NNABLA_EXT_CUDA_DIRECTORY)/python/test:$(NNABLA_DIRECTORY)/python/test \
Expand Down
4 changes: 2 additions & 2 deletions doc/build/build.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ This document shows how to install CUDA extension on Ubuntu 16.04 LTS. This proc

## Prerequisites

In addition to NNabla's requirements, CUDA extension requires CUDA setup has done on your system. If you don't have CUDA on your system, follow the procedure desribed below.
In addition to NNabla's requirements, CUDA extension requires CUDA setup has done on your system. If you don't have CUDA on your system, follow the procedure described below.

Download and install CUDA and cuDNN library (both runtime library and developement library). Please follow the instruction in the document provided by NVIDIA. Do NOT see any instruction provided by any third party. They are often incorrect or based on old instructions, that could destroy your system.
Download and install CUDA and cuDNN library (both runtime library and development library). Please follow the instruction in the document provided by NVIDIA. Do NOT see any instruction provided by any third party. They are often incorrect or based on old instructions, that could destroy your system.

* [CUDA toolkit](https://developer.nvidia.com/cuda-downloads)
* [cuDNN library](https://developer.nvidia.com/rdp/cudnn-download) (Registration required)
Expand Down
8 changes: 4 additions & 4 deletions doc/build/build_distributed.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ In addition to [requirements of NNabla without distributed execution](build.md),
In order to use the distributed training, the only difference, when building, is
the procedure described here.

Download `nccl <https://developer.nvidia.com/nccl/nccl-download>`_ according to your environemnt,
Download `nccl <https://developer.nvidia.com/nccl/nccl-download>`_ according to your environment,
then install it manually in case of ubuntu16.04,

```shell
Expand All @@ -25,13 +25,13 @@ sudo apt-get install libnccl2 libnccl-dev
```

For developer, if you want to use another nccl not publicly distributed,
specify **NCCL_HOME** environment variable as the folloing.
specify **NCCL_HOME** environment variable as the following.

```shell
export NCCL_HOME=${path}/build
```

Here, we assume the directry structure,
Here, we assume the directory structure,

* ${path}/build/include
* ${path}/build/lib
Expand Down Expand Up @@ -66,7 +66,7 @@ CUDA includes: /usr/local/cuda-8.0/include;/usr/lib/openmpi/include/openmpi/opal
## Unit test


Follow the unit test section in [Build CUDA extension](build.md). Now you could see the communicater
Follow the unit test section in [Build CUDA extension](build.md). Now you could see the communicator
test passed.

```
Expand Down
4 changes: 2 additions & 2 deletions doc/build/build_windows.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@

## Prerequisites

In addition to NNabla's requirements, CUDA extension requires CUDA setup has done on your system. If you don't have CUDA on your system, follow the procedure desribed below.
In addition to NNabla's requirements, CUDA extension requires CUDA setup has done on your system. If you don't have CUDA on your system, follow the procedure described below.


Download and install CUDA and cuDNN library (both runtime library and developement library). Please follow the instruction in the document provided by NVIDIA. Do NOT see any instruction provided by any third party. They are often incorrect or based on old instructions, that could destroy your system.
Download and install CUDA and cuDNN library (both runtime library and development library). Please follow the instruction in the document provided by NVIDIA. Do NOT see any instruction provided by any third party. They are often incorrect or based on old instructions, that could destroy your system.

* [CUDA toolkit](https://developer.nvidia.com/cuda-downloads)
* [cuDNN library](https://developer.nvidia.com/rdp/cudnn-download) (Registration required)
Expand Down
2 changes: 1 addition & 1 deletion doc/build/quick_build_tools.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ Install CUDA8.0, CUDA9.0, CUDA9.1 from following site.
- https://developer.nvidia.com/cuda-toolkit-archive


Get several versions of cuDNN from following site. (Registration requried)
Get several versions of cuDNN from following site. (Registration required)
- cuDNN
- https://developer.nvidia.com/rdp/cudnn-download

Expand Down
2 changes: 1 addition & 1 deletion docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,6 @@ nvidia-docker run -it --rm -p 8888:8888 nnabla/nnabla-ext-cuda:tutorial jupyter
You can connect the jupyter server with your browser by accessing
`http://<Host OS address>:8888`. The login password is `nnabla`.

After logging in, the page lists a directory that contains jupyter `.ipynb` tutorials and the `nnabla-examples/` foler.
After logging in, the page lists a directory that contains jupyter `.ipynb` tutorials and the `nnabla-examples/` folder.
You can open any tutorial by clicking a `.ipynb` file.
A DCGAN in `nnabla-examples` is demonstrated in `run-nnabla-examples.ipynb`.
2 changes: 1 addition & 1 deletion docker/tutorial/run-nnabla-examples.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"The following command executes training of Deep Convolutional GAN on MNIST dataset. It takes a couple of minites on an average GPU."
"The following command executes training of Deep Convolutional GAN on MNIST dataset. It takes a couple of minutes on an average GPU."
]
},
{
Expand Down
6 changes: 3 additions & 3 deletions examples/cpp/mnist_training/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ If you tried the CPU version of this script, you must have downloaded MNIST data

Please copy them to this directory.

## Create NNP file of an initialized model for MNIST classifcation.
## Create NNP file of an initialized model for MNIST classification.
You might also have an NNP file of the initialized model in `nnabla/examples/cpp/mnist_training`.
Please copy it to this directory.

Expand All @@ -31,9 +31,9 @@ export NNABLA_DIR='path to your nnabla directory'
make
```

The above command generates an executable `mnist_training_cuda` at the current directry.
The above command generates an executable `mnist_training_cuda` at the current directory.

The build file `GNUMakefile` is simple.
The build file `GNUmakefile` is simple.
It links `libnnabla.so`, `libnnabla_utils.so`, `libnnabla_utils.so` and `libz.so` with the executable generated from `main.cpp`, and compiles with C++11 option `-std=c++11`.
It also needs to include path to `mnist_training.hpp` which is located in `nnabla/examples/cpp/mnist_training` directory.

Expand Down
4 changes: 2 additions & 2 deletions include/nbla/cuda/array/cuda_array.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ using std::shared_ptr;
class CudaArray : public Array {
protected:
int device_;
/* Holding CudaMemory until the instance is destoryed to prevent freeing.
/* Holding CudaMemory until the instance is destroyed to prevent freeing.
*/
shared_ptr<CudaMemory> inuse_memory_;

Expand All @@ -54,7 +54,7 @@ NBLA_CUDA_API void synchronizer_cpu_array_cuda_array(Array *src, Array *dst);

/** Array allocated on CUDA device with Memory Pool
This is a necessary ingredient for imperative programing interface of
This is a necessary ingredient for imperative programming interface of
neural networks (aka define-by-run or dynamic). Memory allocation of
CUDA is not asynchronous. Hence, allocating memory region between each
function will lead thread synchronization that will block executions of
Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ CUBLAS_TYPE_T(HalfCuda, HALF);
/** Block size */
#define NBLA_CUDA_GET_BLOCKS(num) NBLA_CEIL_INT_DIV(num, NBLA_CUDA_NUM_THREADS)

/** Get an appropreate block size given a size of elements.
/** Get an appropriate block size given a size of elements.
The kernel is assumed to contain a grid-strided loop.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ class NBLA_API DataParallelCommunicatorNccl
protected:
void wait_by_devices_synchronization();
void wait_by_streams_synchronization();
void divide_by_num_divices(bool division);
void divide_by_num_devices(bool division);

DISABLE_COPY_AND_ASSIGN(DataParallelCommunicatorNccl);
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ class NBLA_API MultiProcessDataParallelCommunicatorNccl

void wait_by_device_synchronization();
void wait_by_streams_synchronization();
void divide_by_num_divices(bool division);
void divide_by_num_devices(bool division);

DISABLE_COPY_AND_ASSIGN(MultiProcessDataParallelCommunicatorNccl);
};
Expand Down
8 changes: 4 additions & 4 deletions include/nbla/cuda/cudnn/cudnn.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ template <> class cudnn_data_type<HalfCuda> {
static cudnnDataType_t type() { return CUDNN_DATA_HALF; }
};

/** Convret cuDNN enum dtype to NNabla enum dtype.
/** Convert cuDNN enum dtype to NNabla enum dtype.
*/
inline dtypes get_dtype_by_cudnn_data_type(cudnnDataType_t dtype) {
switch (dtype) {
Expand Down Expand Up @@ -135,7 +135,7 @@ inline string cudnn_status_to_string(cudnnStatus_t status) {
http://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnSetTensorNdDescriptor
According to the doc above, cudnnSetTensorNdDescriptor does not suport a tensor
According to the doc above, cudnnSetTensorNdDescriptor does not support a tensor
less than 4 dimensions. This wrapper function adds unused dimensions with a
value of 1 at last.
Expand Down Expand Up @@ -203,7 +203,7 @@ struct NBLA_CUDA_API CudnnConvResource {
cudnnTensorDescriptor_t y_desc; ///< Output desc.
cudnnTensorDescriptor_t b_desc; ///< Bias desc.
cudnnTensorDescriptor_t b_desc_deconv; ///< Bias desc for deconvolution.
cudnnFilterDescriptor_t w_desc; ///< Wegiht desc.
cudnnFilterDescriptor_t w_desc; ///< Weight desc.
cudnnConvolutionDescriptor_t conv_desc; ///< Conv desc.
cudnnConvolutionFwdAlgo_t fwd_algo; ///< Best forward algorithm found.
cudnnConvolutionBwdFilterAlgo_t
Expand Down Expand Up @@ -236,7 +236,7 @@ class NBLA_CUDA_API CudnnHandleManager {
~CudnnHandleManager();

/**
Get cuDNN handle for devive.
Get cuDNN handle for device.
*/
cudnnHandle_t handle(int device = -1);

Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/cudnn/function/convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ template <typename T> class ConvolutionCudaCudnn : public Convolution<T> {
// NOTE: dilation > 1 is not supported by cudnn. (2016.10.19)
for (int i = 0; i < dilation.size(); ++i) {
if (dilation[i] > 1) {
// Fall back to origianl CUDA implementation if dilation > 1.
// Fall back to original CUDA implementation if dilation > 1.
// Setting fall_back_func_ overwrites behaviors of setup, forward and
// backward functions by the specified function class instance.
std::cout << "Falling back to ConvolutionCuda since dilation > 1 is "
Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/cudnn/function/deconvolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ template <typename T> class DeconvolutionCudaCudnn : public Deconvolution<T> {
// NOTE: dilation > 1 is not supported by cudnn. (2016.10.19)
for (int i = 0; i < dilation.size(); ++i) {
if (dilation[i] > 1) {
// Fall back to origianl CUDA implementation if dilation > 1.
// Fall back to original CUDA implementation if dilation > 1.
// Setting fall_back_func_ overwrites behaviors of setup, forward and
// backward functions by the specified function class instance.
std::cout << "Falling back to DeconvolutionCuda since dilation > 1 is "
Expand Down
4 changes: 2 additions & 2 deletions include/nbla/cuda/cudnn/function/function_impl.hpp.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ template <${dec_targs}> class ${name}CudaCudnn : public ${name}<${targs}> {
public:
/* TODO: remove this help message.
Typedef of CUDA scalar types used in source file.
This template function class might be instanciated for each CPU scalar types
This template function class might be instantiated for each CPU scalar types
(double, float, nbla::Half), however, for Half, CUDA kernel functions
must use nbla::HalfCuda in which a bunch of device operator functions are
overloaded. nbla::CudaType<T>::type will translate nbla::Half
Expand Down Expand Up @@ -67,7 +67,7 @@ public:
% for oname in outputs.keys():
NBLA_CUDNN_CHECK(cudnnDestroyTensorDescriptor(${oname}_desc_));
% endfor
// TODO: Destoy other descriptors
// TODO: Destroy other descriptors
}
virtual string name() { return "${name}CudaCudnn"; }
virtual vector<string> allowed_array_classes() {
Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/cudnn/function/softmax.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ namespace nbla {

/** @copydoc Softmax
@note The default algrithm is set as ACCURATE. TODO: Set an algorithm by
@note The default algorithm is set as ACCURATE. TODO: Set an algorithm by
context.
*/
template <typename T> class SoftmaxCudaCudnn : public Softmax<T> {
Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/function/function_impl.hpp.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ template <${dec_targs}> class ${name}Cuda : public ${name}<${targs}> {
public:
/* TODO: remove this help message.
Typedef of CUDA scalar types used in source file.
This template function class might be instanciated for each CPU scalar types
This template function class might be instantiated for each CPU scalar types
(double, float, nbla::Half), however, for Half, CUDA kernel functions
must use nbla::HalfCuda in which a bunch of device operator functions are
overloaded. nbla::CudaType<T>::type will translate nbla::Half
Expand Down
2 changes: 1 addition & 1 deletion include/nbla/cuda/function/interpolate.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ template <typename T> class InterpolateCuda : public Interpolate<T> {
public:
/* TODO: remove this help message.
Typedef of CUDA scalar types used in source file.
This template function class might be instanciated for each CPU scalar types
This template function class might be instantiated for each CPU scalar types
(double, float, nbla::Half), however, for Half, CUDA kernel functions
must use nbla::HalfCuda in which a bunch of device operator functions are
overloaded. nbla::CudaType<T>::type will translate nbla::Half
Expand Down
4 changes: 2 additions & 2 deletions include/nbla/cuda/utils/device_reduce.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,10 @@

namespace nbla {

/** Geric block-wise reduction kernel.
/** Generic block-wise reduction kernel.
@param[in] N Number of valid input items.
@param[in,out] op Reduciton operator class. TODO: doc.
@param[in,out] op Reduction operator class. TODO: doc.
*/
template <class ReduceOp>
__global__ void kernel_reduce_per_block(const int N, ReduceOp op,
Expand Down
2 changes: 1 addition & 1 deletion python/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def get_setup_config(root_dir):
version=__version__,
author_email=__email__,
url="https://github.com/sony/nnabla-ext-cuda",
license='Apache Licence 2.0',
license='Apache License 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
Expand Down
4 changes: 2 additions & 2 deletions python/src/nnabla_ext/cuda/init.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def clear_memory_cache():


###############################################################################
# Array pereference API
# Array preference API
# TODO: Move these to C++
###############################################################################
_original_array_classes = cuda_array_classes()
Expand Down Expand Up @@ -94,7 +94,7 @@ def device_synchronize(str device):
def get_device_count():
"""Call ``cudaGetDeviceCount`` in runtime API`.
Retuns:
Returns:
int: Number of devices available.
"""
Expand Down
Loading

0 comments on commit 7473704

Please sign in to comment.