diff --git a/include/caffe/layers/cudnn_lcn_layer.hpp b/include/caffe/layers/cudnn_lcn_layer.hpp index 74cf4775e51..01051dbf654 100644 --- a/include/caffe/layers/cudnn_lcn_layer.hpp +++ b/include/caffe/layers/cudnn_lcn_layer.hpp @@ -36,9 +36,6 @@ class CuDNNLCNLayer : public LRNLayer { cudnnLRNDescriptor_t norm_desc_; cudnnTensorDescriptor_t bottom_desc_, top_desc_; - int size_, pre_pad_; - Dtype alpha_, beta_, k_; - size_t tempDataSize; void *tempData1, *tempData2; }; diff --git a/src/caffe/layers/cudnn_lcn_layer.cpp b/src/caffe/layers/cudnn_lcn_layer.cpp index 9c09bf26b4d..de744c6fb89 100644 --- a/src/caffe/layers/cudnn_lcn_layer.cpp +++ b/src/caffe/layers/cudnn_lcn_layer.cpp @@ -17,12 +17,6 @@ void CuDNNLCNLayer::LayerSetUp(const vector*>& bottom, // create a LRN handle handles_setup_ = true; - - size_ = this->layer_param().lrn_param().local_size(); - pre_pad_ = (size_ - 1) / 2; - alpha_ = this->layer_param().lrn_param().alpha(); - beta_ = this->layer_param().lrn_param().beta(); - k_ = this->layer_param().lrn_param().k(); } template @@ -33,7 +27,8 @@ void CuDNNLCNLayer::Reshape(const vector*>& bottom, this->channels_, this->height_, this->width_); cudnn::setTensor4dDesc(&top_desc_, bottom[0]->num(), this->channels_, this->height_, this->width_); - CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, size_, alpha_, beta_, k_)); + CUDNN_CHECK(cudnnSetLRNDescriptor(norm_desc_, this->size_, this->alpha_, + this->beta_, this->k_)); // allocate / reallocate tempData buffers size_t totalSizeInBytes = sizeof(Dtype)*bottom[0]->num()* \