From 18cd66cb13fcdea3a8a38445ed148a4dd561bf34 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Tue, 12 Dec 2017 16:49:54 +0800 Subject: [PATCH 1/5] add gpu support for crf --- paddle/gserver/layers/CRFDecodingLayer.cpp | 66 +++++++++++-- paddle/gserver/layers/CRFDecodingLayer.h | 6 ++ paddle/gserver/layers/CRFLayer.cpp | 107 ++++++++++++++++++--- paddle/gserver/layers/CRFLayer.h | 8 ++ paddle/gserver/tests/test_CRFLayerGrad.cpp | 8 +- 5 files changed, 172 insertions(+), 23 deletions(-) diff --git a/paddle/gserver/layers/CRFDecodingLayer.cpp b/paddle/gserver/layers/CRFDecodingLayer.cpp index 191176ce985a8e..8da3ab3d63eaa3 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.cpp +++ b/paddle/gserver/layers/CRFDecodingLayer.cpp @@ -23,16 +23,24 @@ bool CRFDecodingLayer::init(const LayerMap& layerMap, if (!CRFLayer::init(layerMap, parameterMap)) { return false; } - crf_.reset(new LinearChainCRF( - numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData())); + if (!useGpu_) { + crf_.reset(new LinearChainCRF( + numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData())); + } return true; } void CRFDecodingLayer::forward(PassType passType) { Layer::forward(passType); - CHECK(!useGpu_) << "GPU is not supported"; - + if (useGpu_) { + cpuParam = Vector::create( + parameter_->getBuf(PARAMETER_VALUE)->getSize(), + false); + cpuParam->copyFrom(*parameter_->getBuf(PARAMETER_VALUE)); + crf_.reset(new LinearChainCRF( + numClasses_, cpuParam->getData())); + } const Argument& output = getInput(0); CHECK(output.sequenceStartPositions); @@ -40,12 +48,26 @@ void CRFDecodingLayer::forward(PassType passType) { size_t numSequences = output.sequenceStartPositions->getSize() - 1; IVector::resizeOrCreate(output_.ids, batchSize, useGpu_); + IVectorPtr output_ids = output_.ids; + MatrixPtr output_arg_val = output.value; + if (useGpu_) { + Matrix::resizeOrCreate(cpuOutputArg_, + /* height */ output_arg_val->getHeight(), + /* width */ output_arg_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + IVector::resizeOrCreate(cpuOutputId_, batchSize, false); + cpuOutputArg_->copyFrom(*output_arg_val); + } else { + cpuOutputId_ = output_ids; + cpuOutputArg_ = output_arg_val; + } const int* starts = output.sequenceStartPositions->getData(false); CHECK_EQ(starts[numSequences], (int)batchSize); for (size_t i = 0; i < numSequences; ++i) { - crf_->decode(output.value->getData() + numClasses_ * starts[i], - output_.ids->getData() + starts[i], + crf_->decode(cpuOutputArg_->getData() + numClasses_ * starts[i], + cpuOutputId_->getData() + starts[i], starts[i + 1] - starts[i]); } @@ -53,12 +75,38 @@ void CRFDecodingLayer::forward(PassType passType) { const Argument& label = getInput(1); resizeOutput(batchSize, 1); CHECK(label.ids); - real* error = output_.value->getData(); - int* ids = label.ids->getData(); - int* result = output_.ids->getData(); + MatrixPtr output_val = output_.value; + if (useGpu_) { + Matrix::resizeOrCreate(cpuOutput_, + /* height */ output_val->getHeight(), + /* width */ output_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + IVector::resizeOrCreate(cpuLabel_, label.ids->getSize(), false); + cpuOutput_->copyFrom(*output_val); + cpuLabel_->copyFrom(*label.ids); + } else { + cpuOutput_ = output_val; + cpuLabel_ = label.ids; + } + real* error = cpuOutput_->getData(); + int* ids = cpuLabel_->getData(); + int* result = cpuOutputId_->getData(); for (size_t i = 0; i < batchSize; ++i) { error[i] = ids[i] == result[i] ? 0 : 1; } + if (useGpu_) { + output_val->copyFrom(*cpuOutput_); + } else { + output_val = cpuOutput_; + } + } + if (useGpu_) { + output_ids->copyFrom(*cpuOutputId_); + output_arg_val->copyFrom(*cpuOutputArg_); + } else { + output_ids = cpuOutputId_; + output_arg_val = cpuOutputArg_; } } diff --git a/paddle/gserver/layers/CRFDecodingLayer.h b/paddle/gserver/layers/CRFDecodingLayer.h index 3cbcac6cf62dec..244c7ff12336db 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.h +++ b/paddle/gserver/layers/CRFDecodingLayer.h @@ -39,6 +39,12 @@ class CRFDecodingLayer : public CRFLayer { protected: std::unique_ptr crf_; + // The temporary variables in CPU memory. + MatrixPtr cpuOutputArg_; + MatrixPtr cpuOutput_; + IVectorPtr cpuLabel_; + IVectorPtr cpuOutputId_; + VectorPtr cpuParam; }; } // namespace paddle diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 867303b4fa0d49..1fcdb543beeb69 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -54,8 +54,6 @@ bool CRFLayer::init(const LayerMap& layerMap, void CRFLayer::forward(PassType passType) { Layer::forward(passType); - CHECK(!useGpu_) << "GPU is not supported"; - const Argument& output = getInput(0); const Argument& label = getInput(1); CHECK(label.sequenceStartPositions); @@ -68,16 +66,55 @@ void CRFLayer::forward(PassType passType) { const int* starts = label.sequenceStartPositions->getData(false); CHECK_EQ(starts[numSequences], batchSize); + MatrixPtr weight_val = weight_->getW(); + MatrixPtr output_val = output_.value; + MatrixPtr output_arg_val = output.value; + IVectorPtr label_val = label.ids; + if (useGpu_) { + Matrix::resizeOrCreate(cpuWeight_, + /* height */ weight_val->getHeight(), + /* width */ weight_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + Matrix::resizeOrCreate(cpuOutput_, + /* height */ output_val->getHeight(), + /* width */ output_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + Matrix::resizeOrCreate(cpuOutputArg_, + /* height */ output_arg_val->getHeight(), + /* width */ output_arg_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + IVector::resizeOrCreate(cpuLabel_, + label_val->getSize(), + false); + cpuWeight_->copyFrom(*weight_val); + cpuOutputArg_->copyFrom(*output_arg_val); + cpuOutput_->copyFrom(*output_val); + cpuLabel_->copyFrom(*label_val); + } else { + cpuWeight_ = weight_val; + cpuOutputArg_ = output_arg_val; + cpuOutput_ = output_val; + cpuLabel_ = label_val; + } for (size_t i = 0; i < numSequences; ++i) { if (i >= crfs_.size()) { - crfs_.emplace_back(numClasses_, weight_->getW()->getData()); + crfs_.emplace_back(numClasses_, cpuWeight_->getData()); } - output_.value->getData()[i] = - crfs_[i].forward(output.value->getData() + numClasses_ * starts[i], - label.ids->getData() + starts[i], + cpuOutput_->getData()[i] = + crfs_[i].forward(cpuOutputArg_->getData() + numClasses_ * starts[i], + cpuLabel_->getData() + starts[i], starts[i + 1] - starts[i]); } - + if (useGpu_) { + output_val->copyFrom(*cpuOutput_); + output_arg_val->copyFrom(*cpuOutputArg_); + } else { + output_val = cpuOutput_; + output_arg_val = cpuOutputArg_; + } if (weightLayer_) { const MatrixPtr& weight = getInputValue(*weightLayer_); getOutputValue()->dotMul(*getOutputValue(), *weight); @@ -91,9 +128,38 @@ void CRFLayer::backward(const UpdateCallback& callback) { int numSequences = label.sequenceStartPositions->getSize() - 1; bool needWGrad = weight_->getWGrad() ? true : false; + MatrixPtr output_arg_grad = output.grad; + MatrixPtr weight_grad = weight_->getWGrad(); + MatrixPtr output_arg_val = output.value; + IVectorPtr label_val = label.ids; + if (useGpu_) { + cpuOutputArg_->copyFrom(*output_arg_val); + cpuLabel_->copyFrom(*label_val); + if (output_arg_grad) { + Matrix::resizeOrCreate(cpuOutputArgGrad_, + /* height */ output_arg_grad->getHeight(), + /* width */ output_arg_grad->getWidth(), + /* trans */ false, + /* useGpu */ false); + cpuOutputArgGrad_->copyFrom(*output_arg_grad); + } + if (needWGrad) { + Matrix::resizeOrCreate(cpuWeightGrad_, + /* height */ weight_grad->getHeight(), + /* width */ weight_grad->getWidth(), + /* trans */ false, + /* useGpu */ false); + cpuWeightGrad_->copyFrom(*weight_grad); + } + } else { + cpuOutputArg_ = output_arg_val; + cpuWeightGrad_ = weight_grad; + cpuOutputArgGrad_ = output_arg_grad; + cpuLabel_ = label_val; + } for (int i = 0; i < numSequences; ++i) { - crfs_[i].backward(output.value->getData() + numClasses_ * starts[i], - label.ids->getData() + starts[i], + crfs_[i].backward(cpuOutputArg_->getData() + numClasses_ * starts[i], + cpuLabel_->getData() + starts[i], starts[i + 1] - starts[i], needWGrad); real instanceWeight = weightLayer_ @@ -102,15 +168,32 @@ void CRFLayer::backward(const UpdateCallback& callback) { instanceWeight *= coeff_; if (output.grad) { - MatrixPtr grad = output.grad->subRowMatrix(starts[i], starts[i + 1]); + MatrixPtr grad = cpuOutputArgGrad_->subRowMatrix(starts[i], + starts[i + 1]); grad->add(*crfs_[i].getXGrad(), real(1.0f), instanceWeight); } if (needWGrad) { - weight_->getWGrad()->add( + cpuWeightGrad_->add( *crfs_[i].getWGrad(), real(1.0f), instanceWeight); } } - + if (useGpu_) { + if (output.grad) { + output_arg_grad->copyFrom(*cpuOutputArgGrad_); + } + if (needWGrad) { + weight_grad->copyFrom(*cpuWeightGrad_); + } + output_arg_val->copyFrom(*cpuOutputArg_); + } else { + if (output.grad) { + output_arg_grad = cpuOutputArgGrad_; + } + if (needWGrad) { + weight_grad = cpuWeightGrad_; + } + output_arg_val = cpuOutputArg_; + } parameter_->incUpdate(callback); } diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/gserver/layers/CRFLayer.h index 00ec13cede9740..10650933bb6d76 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/gserver/layers/CRFLayer.h @@ -41,6 +41,14 @@ class CRFLayer : public Layer { LayerPtr weightLayer_; // weight for each sequence std::unique_ptr weight_; // parameters real coeff_; // weight for the layer + + // The temporary variables in CPU memory. + MatrixPtr cpuWeight_; + MatrixPtr cpuOutputArg_; + MatrixPtr cpuOutput_; + MatrixPtr cpuWeightGrad_; + MatrixPtr cpuOutputArgGrad_; + IVectorPtr cpuLabel_; }; } // namespace paddle diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index f010066ebc6c33..dd51c8f6908d23 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -134,13 +134,15 @@ TEST(Layer, CRFLayer) { TestConfig config = initTestConfig(numClasses, /* withWeight= */ false); for (int length : {1, 3, 100}) { // Not support GPU now + for (auto useGpu : {false, true}) { testLayerGrad(config, "crf", length, /* trans= */ false, - /* useGpu= */ false, + /* useGpu= */ useGpu, /* useWeight= */ false, epsilon()); + } } } } @@ -151,13 +153,15 @@ TEST(Layer, CRFLayerUseWeight) { TestConfig config = initTestConfig(numClasses, /* withWeight= */ true); for (int length : {1, 3, 100}) { // Not support GPU now + for (auto useGpu : {false, true}) { testLayerGrad(config, "crf", length, /* trans= */ false, - /* useGpu= */ false, + /* useGpu= */ useGpu, /* useWeight= */ false, epsilon()); + } } } } From 2ee77ac7e85225eb559f7f7aaf7e6cdcc0639af1 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Tue, 12 Dec 2017 17:03:08 +0800 Subject: [PATCH 2/5] add gpu support for ChunckEvaluator --- paddle/gserver/evaluators/ChunkEvaluator.cpp | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/paddle/gserver/evaluators/ChunkEvaluator.cpp b/paddle/gserver/evaluators/ChunkEvaluator.cpp index a2ab15eedee4aa..cdfaedea5ae917 100644 --- a/paddle/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/gserver/evaluators/ChunkEvaluator.cpp @@ -75,6 +75,8 @@ class ChunkEvaluator : public Evaluator { std::vector labelSegments_; std::vector outputSegments_; std::set excludedChunkTypes_; + IVectorPtr cpuOutput_; + IVectorPtr cpuLabel_; mutable std::unordered_map values_; public: @@ -142,16 +144,27 @@ class ChunkEvaluator : public Evaluator { CHECK_EQ(arguments.size(), (size_t)2); IVectorPtr& output = arguments[0].ids; IVectorPtr& label = arguments[1].ids; - CHECK(!output->useGpu() && !label->useGpu()) << "Not supported"; auto sequenceStartPositions = arguments[1].sequenceStartPositions->getVector(false); CHECK_EQ(output->getSize(), label->getSize()); CHECK(sequenceStartPositions); size_t numSequences = sequenceStartPositions->getSize() - 1; const int* starts = sequenceStartPositions->getData(); + if (output->useGpu()) { + IVector::resizeOrCreate(cpuOutput_, output->getSize(), false); + cpuOutput_->copyFrom(*output); + } else { + cpuOutput_ = output; + } + if (label->useGpu()) { + IVector::resizeOrCreate(cpuLabel_, label->getSize(), false); + cpuLabel_->copyFrom(*label); + } else { + cpuLabel_ = label; + } for (size_t i = 0; i < numSequences; ++i) { - eval1(output->getData() + starts[i], - label->getData() + starts[i], + eval1(cpuOutput_->getData() + starts[i], + cpuLabel_->getData() + starts[i], starts[i + 1] - starts[i]); } return 0; From 53179a4bcce4cdef1c90601808a99908b93f8676 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Tue, 12 Dec 2017 17:15:38 +0800 Subject: [PATCH 3/5] fix code style --- paddle/gserver/layers/CRFDecodingLayer.cpp | 26 +++++----- paddle/gserver/layers/CRFLayer.cpp | 57 ++++++++++------------ paddle/gserver/tests/test_CRFLayerGrad.cpp | 30 ++++++------ 3 files changed, 54 insertions(+), 59 deletions(-) diff --git a/paddle/gserver/layers/CRFDecodingLayer.cpp b/paddle/gserver/layers/CRFDecodingLayer.cpp index 8da3ab3d63eaa3..3b2e7b9c9d27bc 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.cpp +++ b/paddle/gserver/layers/CRFDecodingLayer.cpp @@ -25,7 +25,7 @@ bool CRFDecodingLayer::init(const LayerMap& layerMap, } if (!useGpu_) { crf_.reset(new LinearChainCRF( - numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData())); + numClasses_, parameter_->getBuf(PARAMETER_VALUE)->getData())); } return true; } @@ -34,12 +34,10 @@ void CRFDecodingLayer::forward(PassType passType) { Layer::forward(passType); if (useGpu_) { - cpuParam = Vector::create( - parameter_->getBuf(PARAMETER_VALUE)->getSize(), - false); + cpuParam = + Vector::create(parameter_->getBuf(PARAMETER_VALUE)->getSize(), false); cpuParam->copyFrom(*parameter_->getBuf(PARAMETER_VALUE)); - crf_.reset(new LinearChainCRF( - numClasses_, cpuParam->getData())); + crf_.reset(new LinearChainCRF(numClasses_, cpuParam->getData())); } const Argument& output = getInput(0); CHECK(output.sequenceStartPositions); @@ -52,10 +50,10 @@ void CRFDecodingLayer::forward(PassType passType) { MatrixPtr output_arg_val = output.value; if (useGpu_) { Matrix::resizeOrCreate(cpuOutputArg_, - /* height */ output_arg_val->getHeight(), - /* width */ output_arg_val->getWidth(), - /* trans */ false, - /* useGpu */ false); + /* height */ output_arg_val->getHeight(), + /* width */ output_arg_val->getWidth(), + /* trans */ false, + /* useGpu */ false); IVector::resizeOrCreate(cpuOutputId_, batchSize, false); cpuOutputArg_->copyFrom(*output_arg_val); } else { @@ -78,10 +76,10 @@ void CRFDecodingLayer::forward(PassType passType) { MatrixPtr output_val = output_.value; if (useGpu_) { Matrix::resizeOrCreate(cpuOutput_, - /* height */ output_val->getHeight(), - /* width */ output_val->getWidth(), - /* trans */ false, - /* useGpu */ false); + /* height */ output_val->getHeight(), + /* width */ output_val->getWidth(), + /* trans */ false, + /* useGpu */ false); IVector::resizeOrCreate(cpuLabel_, label.ids->getSize(), false); cpuOutput_->copyFrom(*output_val); cpuLabel_->copyFrom(*label.ids); diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 1fcdb543beeb69..203b4335847f6b 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -72,23 +72,21 @@ void CRFLayer::forward(PassType passType) { IVectorPtr label_val = label.ids; if (useGpu_) { Matrix::resizeOrCreate(cpuWeight_, - /* height */ weight_val->getHeight(), - /* width */ weight_val->getWidth(), - /* trans */ false, - /* useGpu */ false); + /* height */ weight_val->getHeight(), + /* width */ weight_val->getWidth(), + /* trans */ false, + /* useGpu */ false); Matrix::resizeOrCreate(cpuOutput_, - /* height */ output_val->getHeight(), - /* width */ output_val->getWidth(), - /* trans */ false, - /* useGpu */ false); + /* height */ output_val->getHeight(), + /* width */ output_val->getWidth(), + /* trans */ false, + /* useGpu */ false); Matrix::resizeOrCreate(cpuOutputArg_, - /* height */ output_arg_val->getHeight(), - /* width */ output_arg_val->getWidth(), - /* trans */ false, - /* useGpu */ false); - IVector::resizeOrCreate(cpuLabel_, - label_val->getSize(), - false); + /* height */ output_arg_val->getHeight(), + /* width */ output_arg_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + IVector::resizeOrCreate(cpuLabel_, label_val->getSize(), false); cpuWeight_->copyFrom(*weight_val); cpuOutputArg_->copyFrom(*output_arg_val); cpuOutput_->copyFrom(*output_val); @@ -137,18 +135,18 @@ void CRFLayer::backward(const UpdateCallback& callback) { cpuLabel_->copyFrom(*label_val); if (output_arg_grad) { Matrix::resizeOrCreate(cpuOutputArgGrad_, - /* height */ output_arg_grad->getHeight(), - /* width */ output_arg_grad->getWidth(), - /* trans */ false, - /* useGpu */ false); - cpuOutputArgGrad_->copyFrom(*output_arg_grad); + /* height */ output_arg_grad->getHeight(), + /* width */ output_arg_grad->getWidth(), + /* trans */ false, + /* useGpu */ false); + cpuOutputArgGrad_->copyFrom(*output_arg_grad); } if (needWGrad) { Matrix::resizeOrCreate(cpuWeightGrad_, - /* height */ weight_grad->getHeight(), - /* width */ weight_grad->getWidth(), - /* trans */ false, - /* useGpu */ false); + /* height */ weight_grad->getHeight(), + /* width */ weight_grad->getWidth(), + /* trans */ false, + /* useGpu */ false); cpuWeightGrad_->copyFrom(*weight_grad); } } else { @@ -168,13 +166,12 @@ void CRFLayer::backward(const UpdateCallback& callback) { instanceWeight *= coeff_; if (output.grad) { - MatrixPtr grad = cpuOutputArgGrad_->subRowMatrix(starts[i], - starts[i + 1]); + MatrixPtr grad = + cpuOutputArgGrad_->subRowMatrix(starts[i], starts[i + 1]); grad->add(*crfs_[i].getXGrad(), real(1.0f), instanceWeight); } if (needWGrad) { - cpuWeightGrad_->add( - *crfs_[i].getWGrad(), real(1.0f), instanceWeight); + cpuWeightGrad_->add(*crfs_[i].getWGrad(), real(1.0f), instanceWeight); } } if (useGpu_) { @@ -184,7 +181,7 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad->copyFrom(*cpuWeightGrad_); } - output_arg_val->copyFrom(*cpuOutputArg_); + output_arg_val->copyFrom(*cpuOutputArg_); } else { if (output.grad) { output_arg_grad = cpuOutputArgGrad_; @@ -192,7 +189,7 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad = cpuWeightGrad_; } - output_arg_val = cpuOutputArg_; + output_arg_val = cpuOutputArg_; } parameter_->incUpdate(callback); } diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index dd51c8f6908d23..b78f9b8a9d0624 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -135,13 +135,13 @@ TEST(Layer, CRFLayer) { for (int length : {1, 3, 100}) { // Not support GPU now for (auto useGpu : {false, true}) { - testLayerGrad(config, - "crf", - length, - /* trans= */ false, - /* useGpu= */ useGpu, - /* useWeight= */ false, - epsilon()); + testLayerGrad(config, + "crf", + length, + /* trans= */ false, + /* useGpu= */ useGpu, + /* useWeight= */ false, + epsilon()); } } } @@ -153,14 +153,14 @@ TEST(Layer, CRFLayerUseWeight) { TestConfig config = initTestConfig(numClasses, /* withWeight= */ true); for (int length : {1, 3, 100}) { // Not support GPU now - for (auto useGpu : {false, true}) { - testLayerGrad(config, - "crf", - length, - /* trans= */ false, - /* useGpu= */ useGpu, - /* useWeight= */ false, - epsilon()); + for (auto useGpu : {false}) { + testLayerGrad(config, + "crf", + length, + /* trans= */ false, + /* useGpu= */ useGpu, + /* useWeight= */ false, + epsilon()); } } } From 5daca792ed6b35f9dc176f24a74d183a77f48c47 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Fri, 15 Dec 2017 15:30:21 +0800 Subject: [PATCH 4/5] update the duplicate of weight --- paddle/gserver/layers/CRFLayer.cpp | 40 +++++++++++++++------- paddle/gserver/layers/CRFLayer.h | 2 ++ paddle/gserver/tests/test_CRFLayerGrad.cpp | 18 +++++----- 3 files changed, 37 insertions(+), 23 deletions(-) diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 203b4335847f6b..280bbd27e39ab8 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -1,11 +1,8 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. - Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -107,9 +104,13 @@ void CRFLayer::forward(PassType passType) { starts[i + 1] - starts[i]); } if (useGpu_) { + label_val->copyFrom(*cpuLabel_); + weight_val->copyFrom(*cpuWeight_); output_val->copyFrom(*cpuOutput_); output_arg_val->copyFrom(*cpuOutputArg_); } else { + label_val = cpuLabel_; + weight_val = cpuWeight_; output_val = cpuOutput_; output_arg_val = cpuOutputArg_; } @@ -131,8 +132,14 @@ void CRFLayer::backward(const UpdateCallback& callback) { MatrixPtr output_arg_val = output.value; IVectorPtr label_val = label.ids; if (useGpu_) { - cpuOutputArg_->copyFrom(*output_arg_val); - cpuLabel_->copyFrom(*label_val); + Matrix::resizeOrCreate(cpuOutputBackArg_, + /* height */ output_arg_val->getHeight(), + /* width */ output_arg_val->getWidth(), + /* trans */ false, + /* useGpu */ false); + IVector::resizeOrCreate(cpuBackLabel_, label_val->getSize(), false); + cpuOutputBackArg_->copyFrom(*output_arg_val); + cpuBackLabel_->copyFrom(*label_val); if (output_arg_grad) { Matrix::resizeOrCreate(cpuOutputArgGrad_, /* height */ output_arg_grad->getHeight(), @@ -150,14 +157,18 @@ void CRFLayer::backward(const UpdateCallback& callback) { cpuWeightGrad_->copyFrom(*weight_grad); } } else { - cpuOutputArg_ = output_arg_val; - cpuWeightGrad_ = weight_grad; - cpuOutputArgGrad_ = output_arg_grad; - cpuLabel_ = label_val; + cpuOutputBackArg_ = output_arg_val; + cpuBackLabel_ = label_val; + if (output_arg_grad) { + cpuOutputArgGrad_ = output_arg_grad; + } + if (needWGrad) { + cpuWeightGrad_ = weight_grad; + } } for (int i = 0; i < numSequences; ++i) { - crfs_[i].backward(cpuOutputArg_->getData() + numClasses_ * starts[i], - cpuLabel_->getData() + starts[i], + crfs_[i].backward(cpuOutputBackArg_->getData() + numClasses_ * starts[i], + cpuBackLabel_->getData() + starts[i], starts[i + 1] - starts[i], needWGrad); real instanceWeight = weightLayer_ @@ -181,7 +192,8 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad->copyFrom(*cpuWeightGrad_); } - output_arg_val->copyFrom(*cpuOutputArg_); + output_arg_val->copyFrom(*cpuOutputBackArg_); + label_val->copyFrom(*cpuBackLabel_); } else { if (output.grad) { output_arg_grad = cpuOutputArgGrad_; @@ -189,8 +201,10 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad = cpuWeightGrad_; } - output_arg_val = cpuOutputArg_; + output_arg_val = cpuOutputBackArg_; + label_val = cpuBackLabel_; } + parameter_->incUpdate(callback); } diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/gserver/layers/CRFLayer.h index 10650933bb6d76..11e6e2a9b5ee46 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/gserver/layers/CRFLayer.h @@ -49,6 +49,8 @@ class CRFLayer : public Layer { MatrixPtr cpuWeightGrad_; MatrixPtr cpuOutputArgGrad_; IVectorPtr cpuLabel_; + MatrixPtr cpuOutputBackArg_; + IVectorPtr cpuBackLabel_; }; } // namespace paddle diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/gserver/tests/test_CRFLayerGrad.cpp index b78f9b8a9d0624..988442a9e8b1ba 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/gserver/tests/test_CRFLayerGrad.cpp @@ -130,11 +130,10 @@ TestConfig initTestConfig(size_t numClasses, bool withWeight) { TEST(Layer, CRFLayer) { size_t numClasses = 10; - for (int tries = 0; tries < 5; ++tries) { - TestConfig config = initTestConfig(numClasses, /* withWeight= */ false); - for (int length : {1, 3, 100}) { - // Not support GPU now - for (auto useGpu : {false, true}) { + for (auto useGpu : {false, true}) { + for (int tries = 0; tries < 5; ++tries) { + TestConfig config = initTestConfig(numClasses, /* withWeight= */ false); + for (int length : {1, 3, 100}) { testLayerGrad(config, "crf", length, @@ -149,11 +148,10 @@ TEST(Layer, CRFLayer) { TEST(Layer, CRFLayerUseWeight) { size_t numClasses = 10; - for (int tries = 0; tries < 5; ++tries) { - TestConfig config = initTestConfig(numClasses, /* withWeight= */ true); - for (int length : {1, 3, 100}) { - // Not support GPU now - for (auto useGpu : {false}) { + for (auto useGpu : {false, true}) { + for (int tries = 0; tries < 5; ++tries) { + TestConfig config = initTestConfig(numClasses, /* withWeight= */ true); + for (int length : {1, 3, 100}) { testLayerGrad(config, "crf", length, From 5ca9411d0da168504c9e2dfc536122bbc673fa57 Mon Sep 17 00:00:00 2001 From: peterzhang2029 Date: Mon, 18 Dec 2017 20:59:12 +0800 Subject: [PATCH 5/5] remove unnecessary copy --- paddle/gserver/layers/CRFLayer.cpp | 31 +++++++++++------------------- paddle/gserver/layers/CRFLayer.h | 2 -- 2 files changed, 11 insertions(+), 22 deletions(-) diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/gserver/layers/CRFLayer.cpp index 280bbd27e39ab8..68f9ee411be29d 100644 --- a/paddle/gserver/layers/CRFLayer.cpp +++ b/paddle/gserver/layers/CRFLayer.cpp @@ -1,8 +1,11 @@ /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -104,13 +107,9 @@ void CRFLayer::forward(PassType passType) { starts[i + 1] - starts[i]); } if (useGpu_) { - label_val->copyFrom(*cpuLabel_); - weight_val->copyFrom(*cpuWeight_); output_val->copyFrom(*cpuOutput_); output_arg_val->copyFrom(*cpuOutputArg_); } else { - label_val = cpuLabel_; - weight_val = cpuWeight_; output_val = cpuOutput_; output_arg_val = cpuOutputArg_; } @@ -132,14 +131,8 @@ void CRFLayer::backward(const UpdateCallback& callback) { MatrixPtr output_arg_val = output.value; IVectorPtr label_val = label.ids; if (useGpu_) { - Matrix::resizeOrCreate(cpuOutputBackArg_, - /* height */ output_arg_val->getHeight(), - /* width */ output_arg_val->getWidth(), - /* trans */ false, - /* useGpu */ false); - IVector::resizeOrCreate(cpuBackLabel_, label_val->getSize(), false); - cpuOutputBackArg_->copyFrom(*output_arg_val); - cpuBackLabel_->copyFrom(*label_val); + cpuOutputArg_->copyFrom(*output_arg_val); + cpuLabel_->copyFrom(*label_val); if (output_arg_grad) { Matrix::resizeOrCreate(cpuOutputArgGrad_, /* height */ output_arg_grad->getHeight(), @@ -157,8 +150,8 @@ void CRFLayer::backward(const UpdateCallback& callback) { cpuWeightGrad_->copyFrom(*weight_grad); } } else { - cpuOutputBackArg_ = output_arg_val; - cpuBackLabel_ = label_val; + cpuOutputArg_ = output_arg_val; + cpuLabel_ = label_val; if (output_arg_grad) { cpuOutputArgGrad_ = output_arg_grad; } @@ -167,8 +160,8 @@ void CRFLayer::backward(const UpdateCallback& callback) { } } for (int i = 0; i < numSequences; ++i) { - crfs_[i].backward(cpuOutputBackArg_->getData() + numClasses_ * starts[i], - cpuBackLabel_->getData() + starts[i], + crfs_[i].backward(cpuOutputArg_->getData() + numClasses_ * starts[i], + cpuLabel_->getData() + starts[i], starts[i + 1] - starts[i], needWGrad); real instanceWeight = weightLayer_ @@ -192,8 +185,7 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad->copyFrom(*cpuWeightGrad_); } - output_arg_val->copyFrom(*cpuOutputBackArg_); - label_val->copyFrom(*cpuBackLabel_); + output_arg_val->copyFrom(*cpuOutputArg_); } else { if (output.grad) { output_arg_grad = cpuOutputArgGrad_; @@ -201,8 +193,7 @@ void CRFLayer::backward(const UpdateCallback& callback) { if (needWGrad) { weight_grad = cpuWeightGrad_; } - output_arg_val = cpuOutputBackArg_; - label_val = cpuBackLabel_; + output_arg_val = cpuOutputArg_; } parameter_->incUpdate(callback); diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/gserver/layers/CRFLayer.h index 11e6e2a9b5ee46..10650933bb6d76 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/gserver/layers/CRFLayer.h @@ -49,8 +49,6 @@ class CRFLayer : public Layer { MatrixPtr cpuWeightGrad_; MatrixPtr cpuOutputArgGrad_; IVectorPtr cpuLabel_; - MatrixPtr cpuOutputBackArg_; - IVectorPtr cpuBackLabel_; }; } // namespace paddle