raver119 7783012f39
cuDNN integration (#150)
* initial commit

Signed-off-by: raver119 <raver119@gmail.com>

* one file

Signed-off-by: raver119 <raver119@gmail.com>

* few more includes

Signed-off-by: raver119 <raver119@gmail.com>

* m?

Signed-off-by: raver119 <raver119@gmail.com>

* const

Signed-off-by: raver119 <raver119@gmail.com>

* cudnn linkage in tests

Signed-off-by: raver119 <raver119@gmail.com>

* culibos

Signed-off-by: raver119 <raver119@gmail.com>

* static reminder

Signed-off-by: raver119 <raver119@gmail.com>

* platform engine tag

Signed-off-by: raver119 <raver119@gmail.com>

* HAVE_CUDNN moved to config.h.in

Signed-off-by: raver119 <raver119@gmail.com>

* include

Signed-off-by: raver119 <raver119@gmail.com>

* include

Signed-off-by: raver119 <raver119@gmail.com>

* skip cudnn handle creation if there's not cudnn

Signed-off-by: raver119 <raver119@gmail.com>

* meh

Signed-off-by: raver119 <raver119@gmail.com>

* target device in context

Signed-off-by: raver119 <raver119@gmail.com>

* platform engines

Signed-off-by: raver119 <raver119@gmail.com>

* platform engines

Signed-off-by: raver119 <raver119@gmail.com>

* allow multiple -h args

Signed-off-by: raver119 <raver119@gmail.com>

* allow multiple -h args

Signed-off-by: raver119 <raver119@gmail.com>

* move mkldnn out of CPU block

Signed-off-by: raver119 <raver119@gmail.com>

* link to mkldnn on cuda

Signed-off-by: raver119 <raver119@gmail.com>

* less prints

Signed-off-by: raver119 <raver119@gmail.com>

* minor tweaks

Signed-off-by: raver119 <raver119@gmail.com>

* next step

Signed-off-by: raver119 <raver119@gmail.com>

* conv2d NCHW draft

Signed-off-by: raver119 <raver119@gmail.com>

* conv2d biasAdd

Signed-off-by: raver119 <raver119@gmail.com>

* test for MKL/CUDNN combined use

Signed-off-by: raver119 <raver119@gmail.com>

* - provide additional code for conv2d ff based on cudnn api, not tested yet

Signed-off-by: Yurii <iuriish@yahoo.com>

* - further work on conv2d helper based on using cudnn api

Signed-off-by: Yurii <iuriish@yahoo.com>

* - fixing several cuda bugs which appeared after cudnn lib had been started to use

Signed-off-by: Yurii <iuriish@yahoo.com>

* - implementation of conv2d backprop op based on cudnn api

Signed-off-by: Yurii <iuriish@yahoo.com>

* - implementaion of conv3d and conv3d_bp ops based on cudnn api

Signed-off-by: Yurii <iuriish@yahoo.com>

* - bugs fixing in conv3d/conv3d_bp ops (cudnn in use)

Signed-off-by: Yurii <iuriish@yahoo.com>

* - implementation of depthwiseConv2d (ff/bp) op based on cudnn api

Signed-off-by: Yurii <iuriish@yahoo.com>

* - implementation of batchnorm ff op based on cudnn api

Signed-off-by: Yurii <iuriish@yahoo.com>

* - disable cudnn batchnorm temporary

Signed-off-by: Yurii <iuriish@yahoo.com>

* - add minor change in cmake

Signed-off-by: Yurii <iuriish@yahoo.com>

* engine for depthwise mkldnn

Signed-off-by: raver119 <raver119@gmail.com>

* couple of includes

Signed-off-by: raver119 <raver119@gmail.com>

* - provide permutation to cudnn batchnorm ff when format is NHWC

Signed-off-by: Yurii <iuriish@yahoo.com>

* lgamma fix

Signed-off-by: raver119 <raver119@gmail.com>

* - eliminate memory leak in two tests

Signed-off-by: Yurii <iuriish@yahoo.com>

Co-authored-by: Yurii Shyrma <iuriish@yahoo.com>
2020-01-20 21:32:46 +03:00

217 lines
7.5 KiB
C++

/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author raver119@gmail.com
//
#ifndef LIBND4J_CONTEXT_H
#define LIBND4J_CONTEXT_H
#include <vector>
#include <NDArray.h>
#include <graph/Variable.h>
#include <graph/VariableSpace.h>
#include <graph/ContextPrototype.h>
#include <memory/Workspace.h>
#include <execution/Engine.h>
// CUDA-specific includes
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <cuda_device_runtime_api.h>
#endif
namespace nd4j {
namespace graph {
/**
* This class defines input desired for any given node/operation within graph
*/
class ND4J_EXPORT Context : public nd4j::graph::ContextPrototype {
protected:
nd4j::memory::Workspace* _workspace = nullptr;
nd4j::graph::VariableSpace* _variableSpace = nullptr;
std::pair<Nd4jLong, Nd4jLong> _executionTime;
nd4j::random::RandomBuffer* _rng = nullptr;
nd4j::DataType _dataType = nd4j::DataType::FLOAT32;
// branch for divergent_op
int _branch = 0;
// temporary context for standalone ops execution
LaunchContext* _context = nullptr;
std::vector<nd4j::DataType> _dataTypes;
// fields for fast execution (out-of-graph ops use)
std::vector<NDArray*> _fastpath_in;
std::vector<NDArray*> _fastpath_out;
std::vector<NDArray*> _handles;
bool _helpersAllowed = true;
// in some cases we might be able to skip shape function for validation purposes
bool _shapeFunctionOverride = false;
public:
Context(ContextPrototype* prototype, VariableSpace* variableSpace);
explicit Context(int nodeId, VariableSpace *variableSpace = nullptr);
Context(int nodeId, VariableSpace *variableSpace, bool isInplace);
// default destructor
~Context();
// these methods are for execution timing
void setOuterTime(Nd4jLong time);
void setInnerTime(Nd4jLong time);
Nd4jLong getOuterTime();
Nd4jLong getInnerTime();
nd4j::DataType dataType() override;
nd4j::DataType dataType(int index) override;
void setDataType(int index, nd4j::DataType type) override;
// these methods are related to Workspace abstraction
bool hasWorkspaceProvided();
void attachWorkspace(nd4j::memory::Workspace* workspace);
void forgetWorkspace();
// these methods return full-time workspace
nd4j::memory::Workspace* getWorkspace();
nd4j::memory::Workspace* workspace();
nd4j::memory::Workspace* fWorkspace();
// this method returns workspace for temporary allocations
nd4j::memory::Workspace* tWorkspace();
// this method returns workspace for object allocations
nd4j::memory::Workspace* oWorkspace();
void setVariableSpace(VariableSpace* variableSpace);
nd4j::random::RandomBuffer* getRNG();
void setRNG(nd4j::random::RandomBuffer* rng);
void setTargetEngine(samediff::Engine engine);
VariableSpace *getVariableSpace();
LaunchContext* launchContext();
// these fields define, if we can execute specific node in-place, without generating new array
// these variables are only for Divergent Nodes
int getBranch();
void setBranch(int branch);
/**
*
* @return
*/
Stash* getStash();
/**
*
*/
void trackList(NDArrayList* list);
/**
* This method returns variable for a given input index for this block
* @param idx
* @return
*/
Variable* getVariable(int idx);
Variable* variable(int idx);
/**
* This method is shortcut to getVariable(int idx);
*
* + it check fastpath for array availability (preferred)
* @return
*/
NDArray* getNDArray(int idx);
NDArray* array(int idx);
/**
* This method fetches variable from VariableSpace DIRECTLY
* @param p
* @return
*/
Variable* variable(int node, int index);
Variable* variable(std::pair<int,int>& p);
Variable* variable(std::initializer_list<int> p);
void pushNDArrayToVariableSpace(int nodeId, int index, NDArray* array, bool removable = true);
void pushNDArrayToVariableSpace(std::pair<int, int>& pair, NDArray* array, bool removable = true);
void pushNDArrayListToVariableSpace(int nodeId, int index, NDArrayList* list, bool track = true);
void pushNDArrayListToVariableSpace(std::pair<int, int>& pair, NDArrayList* list, bool track = true);
bool isValueAvailable(int idx = 0);
Variable* ensureVariable(int idx = 0);
unsigned long width() override;
// methods used in java interop
/**
* This method checks, if Context uses fastpath variable access
* @return
*/
bool isFastPath();
#ifndef __JAVACPP_HACK__
std::vector<NDArray*>& fastpath_in();
std::vector<NDArray*>& fastpath_out();
#endif
void setInputArray(int index, NDArray *array, bool removable = false);
void setInputArray(int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo);
void setInputArray(int index, void *databuffer, void *shapeInfo, void *specialShapeInfo);
void setOutputArray(int index, NDArray *array, bool removable = false);
void setOutputArray(int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo);
void setOutputArray(int index, void *databuffer, void *shapeInfo, void *specialShapeInfo);
void setTArguments(double *arguments, int numberOfArguments);
void setIArguments(Nd4jLong *arguments, int numberOfArguments);
void setBArguments(bool *arguments, int numberOfArguments);
void setTArguments(const std::vector<double> &tArgs);
void setIArguments(const std::vector<Nd4jLong> &tArgs);
void setBArguments(const std::vector<bool> &tArgs);
void setCudaContext(Nd4jPointer cudaStream, Nd4jPointer reductionPointer, Nd4jPointer allocationPointer);
void allowHelpers(bool reallyAllow);
bool helpersAllowed();
void setShapeFunctionOverride(bool reallyOverride);
bool shapeFunctionOverride();
};
}
}
#endif //LIBND4J_BLOCK_H