From e38da5bddaa3be529b08574bd27ba34fbcab8251 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 29 Jan 2024 16:05:07 +0800 Subject: [PATCH 01/38] fix --- .../pir/dialect/op_generator/ops_api_gen.py | 1 + paddle/fluid/pir/dialect/operator/ir/ops.yaml | 11 +++++ paddle/phi/api/yaml/op_compat.yaml | 6 +++ paddle/phi/infermeta/multiary.cc | 45 +++++++++++++++++++ paddle/phi/infermeta/multiary.h | 41 +++++++++++++++++ 5 files changed, 104 insertions(+) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 4e1f9414a77ae2..389460fcf5e08e 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -154,6 +154,7 @@ 'c_reduce_min_', 'push_sparse_v2', 'push_sparse_v2_', + 'distributed_fused_lamb', ] diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 4593a7e7d7c427..7d8b27bc152405 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -386,6 +386,17 @@ data_type : fpn_rois optional : rois_num, multi_level_rois_num +- op : distributed_fuse_lamb + args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) + output : Tensor fp32fusedparamout, Tensor fp16fusedparamout, Tensor fp32accfusedgrad, Tensor fp16accfusedgrad, Tensor moment1out, Tensor moment2out, Tensor beta1powout, Tensor beta2powout, Tensor[] (paramout){param.size()}, Tensor foundinf, Tensor accstep, Tensor stopupdate, Tensor step) + infer_meta : + func : DistributedFuseLambInferMeta + kernel : + func : distributed_fuse_lamb + data_type : param + optional : fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32fusedparamout, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate + inplace : (fp32fusedparam -> fp32fusedparamout), (fp16fusedparam -> fp16fusedparamout) + - op : distributed_lookup_table args : (Tensor[] ids, Tensor w, int table_id = 0, bool is_distributed = false, str lookup_table_version = "lookup_table", int64_t padding_idx = -1, DataType dtype = DataType::FLOAT32, bool is_test = false) output : Tensor[](outputs){ids.size()} diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 8fb731e4770d90..4c89d9f215f8df 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -3511,6 +3511,12 @@ multi_level_rois_num: MultiLevelRoIsNum restore_index: RestoreIndex +- op: distributed_fused_lamb + inputs: + {param: Param, grad: Grad, fp32fusedparam: FP32FusedParam, fp32fusedgrad: FP32FusedGrad, fp16fusedparam: FP16FusedParam, fp16fusedgrad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fusedparamoffsets: FusedParamOffsets, fp32shardfusedparamoffsets: FP32ShardFusedParamOffsets, fp16shardfusedparamoffsets: FP16ShardFusedParamOffsets, paraminfo: ParamInfo, paramorder: ParamOrder, learningrate: LearningRate, globalscale: GlobalCsale} + outputs: + {paramout : ParamOut, fp32fusedparamout: FP32FusedParamOut, fp16fusedparamout: FP16FusedParamOut, fp32accfusedgrad: FP32AccFusedGrad, fp16accfusedgrad: FP16AccFusedGrad, moment1out: Moment1Out, moment2out: Moment2Out, beta1powout: Beta1PowOut, beta2powout: Beta2PowOut, foundinf: FoundInf, accstep: AccStep, stopupdate: StopUpdate, step: Step} + - op: distributed_lookup_table inputs: {ids: Ids, w: W} diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index e3bb14ffd8210e..331bce1b2dfea1 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -1508,6 +1508,51 @@ void DGCMomentumInferMeta(const MetaTensor& param, } } +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32fusedgrad, + const paddle::optional>& fp16fusedparam, + const paddle::optional>& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + int64_t nranks, + const std::vector& ring_ids, + bool use_hierarchical_allreduce, + MetaTensor* fp32fusedparamout, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step) {} + void EditDistanceInferMeta(const MetaTensor& hyps, const MetaTensor& refs, const MetaTensor& hypslength, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 9fe4d962c1eb00..14a3b73bc64117 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -294,6 +294,47 @@ void DeformableConvInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32fusedgrad, + const paddle::optional>& fp16fusedparam, + const paddle::optional>& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + int64_t nranks, + const std::vector& ring_ids, + bool use_hierarchical_allreduce, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step); + void DGCMomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, From 36c00639fe7f4d03dc691f711034c109f3509c73 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 1 Feb 2024 15:09:58 +0800 Subject: [PATCH 02/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 7d8b27bc152405..cec47004522b30 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -388,7 +388,7 @@ - op : distributed_fuse_lamb args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) - output : Tensor fp32fusedparamout, Tensor fp16fusedparamout, Tensor fp32accfusedgrad, Tensor fp16accfusedgrad, Tensor moment1out, Tensor moment2out, Tensor beta1powout, Tensor beta2powout, Tensor[] (paramout){param.size()}, Tensor foundinf, Tensor accstep, Tensor stopupdate, Tensor step) + output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[] (paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFuseLambInferMeta kernel : From 79b74c360c3a9477ff1809b36efb677635070edb Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 1 Feb 2024 17:13:24 +0800 Subject: [PATCH 03/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index cec47004522b30..19e6e5c895c71f 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -388,7 +388,7 @@ - op : distributed_fuse_lamb args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) - output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[] (paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) + output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFuseLambInferMeta kernel : From 1cbd853ce600c5f5b7fdb3a33a28f239b4e75739 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 6 Mar 2024 09:29:07 +0800 Subject: [PATCH 04/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 11 +++++++++++ paddle/phi/api/yaml/op_compat.yaml | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 6a655d9851ec58..5cb939fd4512a9 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -431,6 +431,17 @@ data_type : fpn_rois optional : rois_num, multi_level_rois_num +- op : distributed_fuse_lamb + args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) + output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) + infer_meta : + func : DistributedFuseLambInferMeta + kernel : + func : distributed_fuse_lamb + data_type : param + optional : fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32fusedparamout, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate + inplace : (fp32fusedparam -> fp32fusedparamout), (fp16fusedparam -> fp16fusedparamout) + - op : distributed_fused_lamb_init args : (Tensor[] param, Tensor[] grad, float beta1, float beta2, int[] apply_weight_decay, int alignment, int rank, int nranks) output : Tensor(fp32_fused_param), Tensor(fp32_fused_grad), Tensor(fp16_fused_param), Tensor(fp16_fused_grad), Tensor(moment1), Tensor(moment2), Tensor(beta1_pow), Tensor(beta2_pow), Tensor(fused_param_offsets), Tensor(fp32_shard_fused_param_offsets), Tensor(fp16_shard_fused_param_offsets), Tensor(param_info), Tensor(param_order), Tensor[](param_out){param.size()}, Tensor[](master_param_out){param.size()}, Tensor[](grad_out){grad.size()}, Tensor(global_scale), Tensor(step) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 2c6129c30fb817..6c0118fd0728a1 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -3637,6 +3637,12 @@ multi_level_rois_num: MultiLevelRoIsNum restore_index: RestoreIndex +- op: distributed_fused_lamb + inputs: + {param: Param, grad: Grad, fp32fusedparam: FP32FusedParam, fp32fusedgrad: FP32FusedGrad, fp16fusedparam: FP16FusedParam, fp16fusedgrad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fusedparamoffsets: FusedParamOffsets, fp32shardfusedparamoffsets: FP32ShardFusedParamOffsets, fp16shardfusedparamoffsets: FP16ShardFusedParamOffsets, paraminfo: ParamInfo, paramorder: ParamOrder, learningrate: LearningRate, globalscale: GlobalCsale} + outputs: + {paramout : ParamOut, fp32fusedparamout: FP32FusedParamOut, fp16fusedparamout: FP16FusedParamOut, fp32accfusedgrad: FP32AccFusedGrad, fp16accfusedgrad: FP16AccFusedGrad, moment1out: Moment1Out, moment2out: Moment2Out, beta1powout: Beta1PowOut, beta2powout: Beta2PowOut, foundinf: FoundInf, accstep: AccStep, stopupdate: StopUpdate, step: Step} + - op: distributed_fused_lamb_init inputs: {param: Param, grad: Grad} From 8ff226d2d120349d57616ccde373dbb55b9a216d Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 6 Mar 2024 10:46:50 +0800 Subject: [PATCH 05/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 5cb939fd4512a9..14e90d2fd681b7 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -431,14 +431,13 @@ data_type : fpn_rois optional : rois_num, multi_level_rois_num -- op : distributed_fuse_lamb +- op : distributed_fused_lamb args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : - func : DistributedFuseLambInferMeta + func : DistributedFusedLambInferMeta kernel : - func : distributed_fuse_lamb - data_type : param + func : distributed_fused_lamb optional : fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32fusedparamout, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate inplace : (fp32fusedparam -> fp32fusedparamout), (fp16fusedparam -> fp16fusedparamout) From 584ee74051d40cc68b4b1dece2779751b7070d06 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 6 Mar 2024 16:17:53 +0800 Subject: [PATCH 06/38] fix --- .../optimizers/distributed_fused_lamb_op.cc | 61 ------------------- 1 file changed, 61 deletions(-) diff --git a/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc b/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc index 651c446db9007e..dbf335a88154cf 100644 --- a/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc +++ b/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc @@ -169,64 +169,3 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb, ops::DistributedFusedLambOp, ops::DistributedFusedLambOpMaker); - -namespace phi { -namespace fusion { - -template -void DistributedFusedLambKernel(const Context &dev_ctx, - const std::vector ¶m, - const std::vector &grad, - const paddle::optional &fp32_param, - const paddle::optional &fp32_grad, - const paddle::optional &fp16_param, - const paddle::optional &fp16_grad, - const DenseTensor &moment1, - const DenseTensor &moment2, - const DenseTensor &beta1_pow, - const DenseTensor &beta2_pow, - const DenseTensor ¶m_offsets, - const DenseTensor &fp32_partial_offsets, - const DenseTensor &fp16_partial_offsets, - const DenseTensor ¶m_info, - const DenseTensor ¶m_order, - const DenseTensor &learning_rate, - const DenseTensor &global_scale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - bool use_hierarchical_allreduce, - int64_t nranks, - const std::vector &ring_ids, - DenseTensor *fp32_param_out, - DenseTensor *fp16_param_out, - DenseTensor *fp32_acc_grad, - DenseTensor *fp16_acc_grad, - DenseTensor *moment1_out, - DenseTensor *moment2_out, - DenseTensor *beta1_pow_out, - DenseTensor *beta2_pow_out, - DenseTensor *param_out, - DenseTensor *found_inf, - DenseTensor *acc_step, - DenseTensor *stop_update, - DenseTensor *step) { - PADDLE_THROW(phi::errors::Unimplemented( - "The distributed_fused_lamb operator does not support CPU yet.")); -} - -} // namespace fusion -} // namespace phi - -PD_REGISTER_KERNEL(distributed_fused_lamb, - CPU, - ALL_LAYOUT, - phi::fusion::DistributedFusedLambKernel, - float) {} From b3b2586e9f5dc2fb5c6ad6651d599adbb113563c Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 7 Mar 2024 16:58:06 +0800 Subject: [PATCH 07/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 14e90d2fd681b7..447d89fdf940c1 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -432,12 +432,14 @@ optional : rois_num, multi_level_rois_num - op : distributed_fused_lamb - args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, int acc_steps = 1, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false) + args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false, int acc_steps = 1) output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta + param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] kernel : func : distributed_fused_lamb + param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] optional : fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32fusedparamout, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate inplace : (fp32fusedparam -> fp32fusedparamout), (fp16fusedparam -> fp16fusedparamout) From 51385d72705426c2dbfbe4e47b0b9d89b07d0a8f Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 8 Mar 2024 09:58:23 +0800 Subject: [PATCH 08/38] fix --- paddle/phi/infermeta/multiary.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 33c5ed9370e99d..1529e5c493beba 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -325,6 +325,10 @@ void DistributedFusedLambInferMeta( int64_t nranks, const std::vector& ring_ids, bool use_hierarchical_allreduce, + MetaTensor* fp32fusedparamout, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, MetaTensor* moment1out, MetaTensor* moment2out, MetaTensor* beta1powout, From 59d0f3dfc63f7dfc4ba0c002d8a360aec4b8de1c Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 8 Mar 2024 15:20:25 +0800 Subject: [PATCH 09/38] fix --- paddle/phi/infermeta/fusion.cc | 45 ++++++++++++++++++++++++++++++++ paddle/phi/infermeta/fusion.h | 45 ++++++++++++++++++++++++++++++++ paddle/phi/infermeta/multiary.cc | 45 -------------------------------- paddle/phi/infermeta/multiary.h | 45 -------------------------------- 4 files changed, 90 insertions(+), 90 deletions(-) diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index b56e7fab0bfe67..4bd3fbacf5e8b4 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -568,6 +568,51 @@ void Conv2dXPUInferMeta(const MetaTensor& x, out->set_dtype(out_dtype); } +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32fusedgrad, + const paddle::optional>& fp16fusedparam, + const paddle::optional>& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + int64_t nranks, + const std::vector& ring_ids, + bool use_hierarchical_allreduce, + MetaTensor* fp32fusedparamout, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step) {} + void EmbeddingWithEltwiseAddXPUInferMeta( const std::vector& ids, const std::vector& tables, diff --git a/paddle/phi/infermeta/fusion.h b/paddle/phi/infermeta/fusion.h index 0a7224e39f73b3..ab44d4e40c8d65 100644 --- a/paddle/phi/infermeta/fusion.h +++ b/paddle/phi/infermeta/fusion.h @@ -145,6 +145,51 @@ void Conv2dXPUInferMeta(const MetaTensor& x, MetaTensor* out, MetaTensor* out_max); +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32fusedgrad, + const paddle::optional>& fp16fusedparam, + const paddle::optional>& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + int64_t nranks, + const std::vector& ring_ids, + bool use_hierarchical_allreduce, + MetaTensor* fp32fusedparamout, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step); + void EmbeddingWithEltwiseAddXPUInferMeta( const std::vector& ids, const std::vector& tables, diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 26e339c9d653f2..7575cc3cf14344 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -1508,51 +1508,6 @@ void DGCMomentumInferMeta(const MetaTensor& param, } } -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const paddle::optional>& fp32fusedparam, - const paddle::optional>& fp32fusedgrad, - const paddle::optional>& fp16fusedparam, - const paddle::optional>& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - int64_t nranks, - const std::vector& ring_ids, - bool use_hierarchical_allreduce, - MetaTensor* fp32fusedparamout, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step) {} - void EditDistanceInferMeta(const MetaTensor& hyps, const MetaTensor& refs, const MetaTensor& hypslength, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 1529e5c493beba..e83ef2ed1825de 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -294,51 +294,6 @@ void DeformableConvInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const paddle::optional>& fp32fusedparam, - const paddle::optional>& fp32fusedgrad, - const paddle::optional>& fp16fusedparam, - const paddle::optional>& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - int64_t nranks, - const std::vector& ring_ids, - bool use_hierarchical_allreduce, - MetaTensor* fp32fusedparamout, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step); - void DGCMomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, From d62cd213b668333117515d2ca3544f32099d4c19 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 09:41:30 +0800 Subject: [PATCH 10/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 7 +++---- paddle/phi/infermeta/fusion.cc | 4 ++-- paddle/phi/infermeta/fusion.h | 4 ++-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 447d89fdf940c1..6ab446a51770d6 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -432,16 +432,15 @@ optional : rois_num, multi_level_rois_num - op : distributed_fused_lamb - args : (Tensor[] param, Tensor[] grad, Tensor fp32fusedparam, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false, int acc_steps = 1) - output : Tensor(fp32fusedparamout), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) + args : (Tensor[] param, Tensor[] grad, Tensor fp32_fused_param, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false, int acc_steps = 1) + output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] kernel : func : distributed_fused_lamb param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] - optional : fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32fusedparamout, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate - inplace : (fp32fusedparam -> fp32fusedparamout), (fp16fusedparam -> fp16fusedparamout) + optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate - op : distributed_fused_lamb_init args : (Tensor[] param, Tensor[] grad, float beta1, float beta2, int[] apply_weight_decay, int alignment, int rank, int nranks) diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index 4bd3fbacf5e8b4..6f8c9bfb80a917 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -571,7 +571,7 @@ void Conv2dXPUInferMeta(const MetaTensor& x, void DistributedFusedLambInferMeta( const std::vector& param, const std::vector& grad, - const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32_fused_param, const paddle::optional>& fp32fusedgrad, const paddle::optional>& fp16fusedparam, const paddle::optional>& fp16fusedgrad, @@ -599,7 +599,7 @@ void DistributedFusedLambInferMeta( int64_t nranks, const std::vector& ring_ids, bool use_hierarchical_allreduce, - MetaTensor* fp32fusedparamout, + MetaTensor* fp32_fused_param_out, MetaTensor* fp16fusedparamout, MetaTensor* fp32accfusedgrad, MetaTensor* fp16accfusedgrad, diff --git a/paddle/phi/infermeta/fusion.h b/paddle/phi/infermeta/fusion.h index ab44d4e40c8d65..7d53b93f3d4858 100644 --- a/paddle/phi/infermeta/fusion.h +++ b/paddle/phi/infermeta/fusion.h @@ -148,7 +148,7 @@ void Conv2dXPUInferMeta(const MetaTensor& x, void DistributedFusedLambInferMeta( const std::vector& param, const std::vector& grad, - const paddle::optional>& fp32fusedparam, + const paddle::optional>& fp32_fused_param, const paddle::optional>& fp32fusedgrad, const paddle::optional>& fp16fusedparam, const paddle::optional>& fp16fusedgrad, @@ -176,7 +176,7 @@ void DistributedFusedLambInferMeta( int64_t nranks, const std::vector& ring_ids, bool use_hierarchical_allreduce, - MetaTensor* fp32fusedparamout, + MetaTensor* fp32_fused_param_out, MetaTensor* fp16fusedparamout, MetaTensor* fp32accfusedgrad, MetaTensor* fp16accfusedgrad, From 06c367f6f7273fcbd47890e2b834fe6595ce31e7 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 09:41:53 +0800 Subject: [PATCH 11/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 6ab446a51770d6..3cc08a0d597211 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -436,10 +436,10 @@ output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta - param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] kernel : func : distributed_fused_lamb - param : [param, grad, fp32fusedparam, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate - op : distributed_fused_lamb_init From 57a84b964385c4787cedfc2438f7c81a9aebe98a Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 10:51:07 +0800 Subject: [PATCH 12/38] fix --- paddle/phi/infermeta/fusion.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/phi/infermeta/fusion.h b/paddle/phi/infermeta/fusion.h index 7d53b93f3d4858..9e99d349ba12fb 100644 --- a/paddle/phi/infermeta/fusion.h +++ b/paddle/phi/infermeta/fusion.h @@ -148,10 +148,10 @@ void Conv2dXPUInferMeta(const MetaTensor& x, void DistributedFusedLambInferMeta( const std::vector& param, const std::vector& grad, - const paddle::optional>& fp32_fused_param, - const paddle::optional>& fp32fusedgrad, - const paddle::optional>& fp16fusedparam, - const paddle::optional>& fp16fusedgrad, + const MetaTensor& fp32_fused_param, + const MetaTensor& fp32fusedgrad, + const MetaTensor& fp16fusedparam, + const MetaTensor& fp16fusedgrad, const MetaTensor& moment1, const MetaTensor& moment2, const MetaTensor& beta1pow, From 6c0a155350e128bef116c91d1be9136814c51fab Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 10:51:37 +0800 Subject: [PATCH 13/38] fix --- paddle/phi/infermeta/fusion.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index 6f8c9bfb80a917..a93bb26f9e0de4 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -571,10 +571,10 @@ void Conv2dXPUInferMeta(const MetaTensor& x, void DistributedFusedLambInferMeta( const std::vector& param, const std::vector& grad, - const paddle::optional>& fp32_fused_param, - const paddle::optional>& fp32fusedgrad, - const paddle::optional>& fp16fusedparam, - const paddle::optional>& fp16fusedgrad, + const MetaTensor& fp32_fused_param, + const MetaTensor& fp32fusedgrad, + const MetaTensor& fp16fusedparam, + const MetaTensor& fp16fusedgrad, const MetaTensor& moment1, const MetaTensor& moment2, const MetaTensor& beta1pow, From ef6089922a57e6db704fda1dee8ce89dc512fb41 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 10:56:39 +0800 Subject: [PATCH 14/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 3cc08a0d597211..66f00f1f93e6b1 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -441,6 +441,7 @@ func : distributed_fused_lamb param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate + inplace : (fp32_fused_param -> fp32_fused_param_out), (fp16fusedparam -> fp16fusedparamout), (moment1 -> moment1out), (moment2 -> moment2out), (beta1pow -> beta1powout), (beta2pow -> beta2powout), (param -> paramout) - op : distributed_fused_lamb_init args : (Tensor[] param, Tensor[] grad, float beta1, float beta2, int[] apply_weight_decay, int alignment, int rank, int nranks) From 2ada845cf4063aa6ef83acb98f4ae6d4394adb5b Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 11:39:36 +0800 Subject: [PATCH 15/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 2 +- paddle/phi/infermeta/fusion.cc | 2 +- paddle/phi/infermeta/fusion.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 66f00f1f93e6b1..e416b7a7c1de32 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -432,7 +432,7 @@ optional : rois_num, multi_level_rois_num - op : distributed_fused_lamb - args : (Tensor[] param, Tensor[] grad, Tensor fp32_fused_param, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, int[] ring_ids= {0}, bool use_hierarchical_allreduce = false, int acc_steps = 1) + args : (Tensor[] param, Tensor[] grad, Tensor fp32_fused_param, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, int[] ring_ids= {}, int acc_steps = 1, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, bool use_hierarchical_allreduce = false) output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index a93bb26f9e0de4..b54d6a598cda9f 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -596,9 +596,9 @@ void DistributedFusedLambInferMeta( bool use_master_param_norm, bool use_master_acc_grad, bool is_grad_scaled_by_nranks, + bool use_hierarchical_allreduce, int64_t nranks, const std::vector& ring_ids, - bool use_hierarchical_allreduce, MetaTensor* fp32_fused_param_out, MetaTensor* fp16fusedparamout, MetaTensor* fp32accfusedgrad, diff --git a/paddle/phi/infermeta/fusion.h b/paddle/phi/infermeta/fusion.h index 9e99d349ba12fb..e904ee866ed2f4 100644 --- a/paddle/phi/infermeta/fusion.h +++ b/paddle/phi/infermeta/fusion.h @@ -173,9 +173,9 @@ void DistributedFusedLambInferMeta( bool use_master_param_norm, bool use_master_acc_grad, bool is_grad_scaled_by_nranks, + bool use_hierarchical_allreduce, int64_t nranks, const std::vector& ring_ids, - bool use_hierarchical_allreduce, MetaTensor* fp32_fused_param_out, MetaTensor* fp16fusedparamout, MetaTensor* fp32accfusedgrad, From 6127fe5a92866d728fde1a0c37c8c0cda235ab41 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 11:41:11 +0800 Subject: [PATCH 16/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index e416b7a7c1de32..d7c4de44146032 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -436,10 +436,10 @@ output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] kernel : func : distributed_fused_lamb - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, nranks, ring_ids, use_hierarchical_allreduce] + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate inplace : (fp32_fused_param -> fp32_fused_param_out), (fp16fusedparam -> fp16fusedparamout), (moment1 -> moment1out), (moment2 -> moment2out), (beta1pow -> beta1powout), (beta2pow -> beta2powout), (param -> paramout) From abe04466a5eb793af46fbbe0b06e703dc05e02e5 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 11 Mar 2024 14:44:36 +0800 Subject: [PATCH 17/38] fix --- .../pir/dialect/op_generator/ops_api_gen.py | 2 +- paddle/phi/api/yaml/op_compat.yaml | 4 +- paddle/phi/infermeta/fusion.cc | 45 ------------------- paddle/phi/infermeta/fusion.h | 45 ------------------- paddle/phi/infermeta/multiary.cc | 45 +++++++++++++++++++ paddle/phi/infermeta/multiary.h | 45 +++++++++++++++++++ 6 files changed, 93 insertions(+), 93 deletions(-) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 1dea8ccae8105b..833f2211ccae5b 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -69,6 +69,7 @@ {{"{name}", (PyCFunction)(void (*)(void)){name}, METH_VARARGS | METH_KEYWORDS, "C++ interface function for {name}."}},""" NEED_GEN_STATIC_ONLY_APIS = [ + 'distributed_fused_lamb', 'distributed_fused_lamb_init', 'distributed_fused_lamb_init_', 'fetch', @@ -172,7 +173,6 @@ 'c_reduce_prod_', 'push_sparse_v2', 'push_sparse_v2_', - 'distributed_fused_lamb', 'partial_send', 'nop', 'nop_', diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 6c0118fd0728a1..a5cdcbcd85d60e 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -3639,9 +3639,9 @@ - op: distributed_fused_lamb inputs: - {param: Param, grad: Grad, fp32fusedparam: FP32FusedParam, fp32fusedgrad: FP32FusedGrad, fp16fusedparam: FP16FusedParam, fp16fusedgrad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fusedparamoffsets: FusedParamOffsets, fp32shardfusedparamoffsets: FP32ShardFusedParamOffsets, fp16shardfusedparamoffsets: FP16ShardFusedParamOffsets, paraminfo: ParamInfo, paramorder: ParamOrder, learningrate: LearningRate, globalscale: GlobalCsale} + {param: Param, grad: Grad, fp32_fused_param: FP32FusedParam, fp32fusedgrad: FP32FusedGrad, fp16fusedparam: FP16FusedParam, fp16fusedgrad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fusedparamoffsets: FusedParamOffsets, fp32shardfusedparamoffsets: FP32ShardFusedParamOffsets, fp16shardfusedparamoffsets: FP16ShardFusedParamOffsets, paraminfo: ParamInfo, paramorder: ParamOrder, learningrate: LearningRate, globalscale: GlobalScale} outputs: - {paramout : ParamOut, fp32fusedparamout: FP32FusedParamOut, fp16fusedparamout: FP16FusedParamOut, fp32accfusedgrad: FP32AccFusedGrad, fp16accfusedgrad: FP16AccFusedGrad, moment1out: Moment1Out, moment2out: Moment2Out, beta1powout: Beta1PowOut, beta2powout: Beta2PowOut, foundinf: FoundInf, accstep: AccStep, stopupdate: StopUpdate, step: Step} + {paramout : ParamOut, fp32_fused_param_out: FP32FusedParamOut, fp16fusedparamout: FP16FusedParamOut, fp32accfusedgrad: FP32AccFusedGrad, fp16accfusedgrad: FP16AccFusedGrad, moment1out: Moment1Out, moment2out: Moment2Out, beta1powout: Beta1PowOut, beta2powout: Beta2PowOut, foundinf: FoundInf, accstep: AccStep, stopupdate: StopUpdate, step: Step} - op: distributed_fused_lamb_init inputs: diff --git a/paddle/phi/infermeta/fusion.cc b/paddle/phi/infermeta/fusion.cc index b54d6a598cda9f..b56e7fab0bfe67 100644 --- a/paddle/phi/infermeta/fusion.cc +++ b/paddle/phi/infermeta/fusion.cc @@ -568,51 +568,6 @@ void Conv2dXPUInferMeta(const MetaTensor& x, out->set_dtype(out_dtype); } -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const MetaTensor& fp32_fused_param, - const MetaTensor& fp32fusedgrad, - const MetaTensor& fp16fusedparam, - const MetaTensor& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - bool use_hierarchical_allreduce, - int64_t nranks, - const std::vector& ring_ids, - MetaTensor* fp32_fused_param_out, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step) {} - void EmbeddingWithEltwiseAddXPUInferMeta( const std::vector& ids, const std::vector& tables, diff --git a/paddle/phi/infermeta/fusion.h b/paddle/phi/infermeta/fusion.h index e904ee866ed2f4..0a7224e39f73b3 100644 --- a/paddle/phi/infermeta/fusion.h +++ b/paddle/phi/infermeta/fusion.h @@ -145,51 +145,6 @@ void Conv2dXPUInferMeta(const MetaTensor& x, MetaTensor* out, MetaTensor* out_max); -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const MetaTensor& fp32_fused_param, - const MetaTensor& fp32fusedgrad, - const MetaTensor& fp16fusedparam, - const MetaTensor& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - bool use_hierarchical_allreduce, - int64_t nranks, - const std::vector& ring_ids, - MetaTensor* fp32_fused_param_out, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step); - void EmbeddingWithEltwiseAddXPUInferMeta( const std::vector& ids, const std::vector& tables, diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index 7575cc3cf14344..d293fc4ef7976c 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -1508,6 +1508,51 @@ void DGCMomentumInferMeta(const MetaTensor& param, } } +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const MetaTensor& fp32_fused_param, + const MetaTensor& fp32fusedgrad, + const MetaTensor& fp16fusedparam, + const MetaTensor& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + bool use_hierarchical_allreduce, + int64_t nranks, + const std::vector& ring_ids, + MetaTensor* fp32_fused_param_out, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step) {} + void EditDistanceInferMeta(const MetaTensor& hyps, const MetaTensor& refs, const MetaTensor& hypslength, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index e83ef2ed1825de..f01dc7ce40d8a2 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -294,6 +294,51 @@ void DeformableConvInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void DistributedFusedLambInferMeta( + const std::vector& param, + const std::vector& grad, + const MetaTensor& fp32_fused_param, + const MetaTensor& fp32fusedgrad, + const MetaTensor& fp16fusedparam, + const MetaTensor& fp16fusedgrad, + const MetaTensor& moment1, + const MetaTensor& moment2, + const MetaTensor& beta1pow, + const MetaTensor& beta2pow, + const MetaTensor& fusedparamoffsets, + const MetaTensor& fp32shardfusedparamoffsets, + const MetaTensor& fp16shardfusedparamoffsets, + const MetaTensor& paraminfo, + const MetaTensor& paramorder, + const MetaTensor& learningrate, + const MetaTensor& globalscale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + bool use_hierarchical_allreduce, + int64_t nranks, + const std::vector& ring_ids, + MetaTensor* fp32_fused_param_out, + MetaTensor* fp16fusedparamout, + MetaTensor* fp32accfusedgrad, + MetaTensor* fp16accfusedgrad, + MetaTensor* moment1out, + MetaTensor* moment2out, + MetaTensor* beta1powout, + MetaTensor* beta2powout, + std::vector paramout, + MetaTensor* foundinf, + MetaTensor* accstep, + MetaTensor* stopupdate, + MetaTensor* step); + void DGCMomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, From fbc0884b102941afb8ab0f45e96f66f8c807bfe7 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 12 Mar 2024 10:37:40 +0800 Subject: [PATCH 18/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index d7c4de44146032..963389e2b4f02c 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -436,10 +436,11 @@ output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) infer_meta : func : DistributedFusedLambInferMeta - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] kernel : func : distributed_fused_lamb - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] + data_type : DataType::FLOAT32 + param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate inplace : (fp32_fused_param -> fp32_fused_param_out), (fp16fusedparam -> fp16fusedparamout), (moment1 -> moment1out), (moment2 -> moment2out), (beta1pow -> beta1powout), (beta2pow -> beta2powout), (param -> paramout) From 76016805e0be83e4069f8a45b9b212dfb843a5f5 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 12 Mar 2024 11:47:39 +0800 Subject: [PATCH 19/38] fix --- .../optimizers/distributed_fused_lamb_op.cc | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc b/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc index dbf335a88154cf..651c446db9007e 100644 --- a/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc +++ b/paddle/fluid/operators/optimizers/distributed_fused_lamb_op.cc @@ -169,3 +169,64 @@ namespace ops = paddle::operators; REGISTER_OP_WITHOUT_GRADIENT(distributed_fused_lamb, ops::DistributedFusedLambOp, ops::DistributedFusedLambOpMaker); + +namespace phi { +namespace fusion { + +template +void DistributedFusedLambKernel(const Context &dev_ctx, + const std::vector ¶m, + const std::vector &grad, + const paddle::optional &fp32_param, + const paddle::optional &fp32_grad, + const paddle::optional &fp16_param, + const paddle::optional &fp16_grad, + const DenseTensor &moment1, + const DenseTensor &moment2, + const DenseTensor &beta1_pow, + const DenseTensor &beta2_pow, + const DenseTensor ¶m_offsets, + const DenseTensor &fp32_partial_offsets, + const DenseTensor &fp16_partial_offsets, + const DenseTensor ¶m_info, + const DenseTensor ¶m_order, + const DenseTensor &learning_rate, + const DenseTensor &global_scale, + int acc_steps, + float beta1, + float beta2, + float epsilon, + float max_global_grad_norm, + float weight_decay, + bool clip_after_allreduce, + bool use_master_param_norm, + bool use_master_acc_grad, + bool is_grad_scaled_by_nranks, + bool use_hierarchical_allreduce, + int64_t nranks, + const std::vector &ring_ids, + DenseTensor *fp32_param_out, + DenseTensor *fp16_param_out, + DenseTensor *fp32_acc_grad, + DenseTensor *fp16_acc_grad, + DenseTensor *moment1_out, + DenseTensor *moment2_out, + DenseTensor *beta1_pow_out, + DenseTensor *beta2_pow_out, + DenseTensor *param_out, + DenseTensor *found_inf, + DenseTensor *acc_step, + DenseTensor *stop_update, + DenseTensor *step) { + PADDLE_THROW(phi::errors::Unimplemented( + "The distributed_fused_lamb operator does not support CPU yet.")); +} + +} // namespace fusion +} // namespace phi + +PD_REGISTER_KERNEL(distributed_fused_lamb, + CPU, + ALL_LAYOUT, + phi::fusion::DistributedFusedLambKernel, + float) {} From f9c8eb9767ab79e112b906b6d46e8c3960dcf59b Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 12 Mar 2024 15:02:50 +0800 Subject: [PATCH 20/38] fix --- paddle/fluid/pir/dialect/op_generator/ops_api_gen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 833f2211ccae5b..c2bcfcd43c58fe 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -70,6 +70,7 @@ NEED_GEN_STATIC_ONLY_APIS = [ 'distributed_fused_lamb', + 'distributed_fused_lamb_', 'distributed_fused_lamb_init', 'distributed_fused_lamb_init_', 'fetch', From 812afa7604fc95b29615342cad7ea5b926e69e31 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 13 Mar 2024 11:06:05 +0800 Subject: [PATCH 21/38] fix --- test/ir/pir/translator/CMakeLists.txt | 1 + .../translator/test_distributed_fused_lamb.py | 400 ++++++++++++++++++ 2 files changed, 401 insertions(+) create mode 100644 test/ir/pir/translator/test_distributed_fused_lamb.py diff --git a/test/ir/pir/translator/CMakeLists.txt b/test/ir/pir/translator/CMakeLists.txt index b7fd892ea35a5c..8ef514aaf3825d 100644 --- a/test/ir/pir/translator/CMakeLists.txt +++ b/test/ir/pir/translator/CMakeLists.txt @@ -10,6 +10,7 @@ list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_allreduce_prod_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_lookup_table_translate) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb_init) +list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_send_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_reduce_max_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_reduce_prod_translator) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py new file mode 100644 index 00000000000000..fd06adbd53231f --- /dev/null +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -0,0 +1,400 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import unittest + +import test_op_translator + +import paddle +from paddle.base import core, unique_name +from paddle.base.layer_helper import LayerHelper +from paddle.nn import ClipGradByGlobalNorm + + +def init_communicator(block, rank, ranks, ring_id): + eps = os.environ['PADDLE_TRAINER_ENDPOINTS'] + eps = [ep.strip() for ep in eps.split(",") if ep.strip()] + cur_ep = eps[rank] + other_eps = [eps[r] for r in ranks if r != rank] + + local_rank = ranks.index(rank) + comm_var_name = unique_name.generate('comm_id') + comm_id_var = block.create_var( + name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW + ) + if core.is_compiled_with_cuda(): + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + elif core.is_compiled_with_xpu(): + block.append_op( + type='c_gen_bkcl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + elif ( + paddle.distributed.ParallelEnv().device_type + in paddle.device.get_all_custom_device_type() + ): + block.append_op( + type='c_gen_xccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': comm_id_var}, + outputs={}, + attrs={ + 'nranks': len(ranks), + 'rank': local_rank, + 'ring_id': ring_id, + 'endpoints': ','.join(eps), + }, + ) + tmp_var = block.create_var(name=unique_name.generate('tmp')) + block.append_op( + type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1} + ) + block.append_op( + type='c_allreduce_sum', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + attrs={'ring_id': ring_id, 'use_calc_stream': True}, + ) + block.append_op( + type='c_sync_calc_stream', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + ) + return ring_id + + +class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): + def __init__( + self, + learning_rate=0.001, + lamb_weight_decay=0.01, + beta1=0.9, + beta2=0.999, + epsilon=1e-6, + parameters=None, + grad_clip=None, + exclude_from_weight_decay_fn=None, + clip_after_allreduce=True, + is_grad_scaled_by_nranks=True, + alignment=128, + use_master_param_norm=True, + gradient_accumulation_steps=1, + use_master_acc_grad=True, + nproc_per_node=None, + use_hierarchical_allreduce=False, + name=None, + ): + assert ( + not paddle.in_dynamic_mode() + ), "DistributedFusedLamb does not support dygraph mode" + super().__init__(learning_rate=learning_rate, grad_clip=None, name=name) + + self._beta1 = beta1 + self._beta2 = beta2 + self._epsilon = epsilon + self._weight_decay = ( + lamb_weight_decay if lamb_weight_decay is not None else 0.0 + ) + if grad_clip is not None: + assert isinstance( + grad_clip, ClipGradByGlobalNorm + ), "Only ClipGradByGlobalNorm is supported in DistributedFusedLamb" + max_global_grad_norm = grad_clip.clip_norm + else: + max_global_grad_norm = -1.0 + self._max_global_grad_norm = max_global_grad_norm + self._alignment = alignment if alignment is not None else -1 + self._clip_after_allreduce = clip_after_allreduce + self._is_grad_scaled_by_nranks = is_grad_scaled_by_nranks + self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn + self._scale = None + self._use_master_param_norm = use_master_param_norm + self._gradient_accumulation_steps = gradient_accumulation_steps + self._use_master_acc_grad = use_master_acc_grad + self._nproc_per_node = nproc_per_node + self._use_hierarchical_allreduce = use_hierarchical_allreduce + assert self._gradient_accumulation_steps >= 1 + + self.helper = LayerHelper('distributed_fused_lamb') + self._supports_check_nan_inf = True # very import flag for AMP + + main_block = self.helper.main_program.global_block() + self._found_inf = main_block.create_var( + name=unique_name.generate('found_inf'), + shape=[1], + dtype=core.VarDesc.VarType.BOOL, + ) + self._step = None + + if self._gradient_accumulation_steps > 1: + self._stop_update = main_block.create_var( + name=unique_name.generate('stop_update'), + shape=[1], + dtype=core.VarDesc.VarType.BOOL, + ) + else: + self._stop_update = None + + self._param_to_master_param = {} + + def _create_persistable_var(self, name=None, shape=[-1], dtype='float32'): + startup_block = self.helper.startup_program.global_block() + if name is not None: + name = unique_name.generate(name) + startup_var = startup_block.create_var( + name=name, + shape=shape, + dtype=dtype, + persistable=True, + stop_gradient=True, + ) + main_block = self.helper.main_program.global_block() + main_var = main_block.create_var( + name=startup_var.name, + shape=startup_var.shape, + dtype=startup_var.dtype, + persistable=True, + stop_gradient=True, + ) + return main_var + + def _create_scale_from_constant(self): + name = unique_name.generate('global_scale') + return paddle.static.create_global_var( + name=name, + shape=[1], + dtype='float32', + value=1.0, + persistable=True, + ) + + def _get_or_create_scale(self): + if self._scale is None: + self._scale = self._create_scale_from_constant(1.0) + return self._scale + + def append_op(self): + self.op_type = "distributed_fused_lamb" + self.helper = LayerHelper(self.op_type) + + params = [paddle.ones(shape=(1, 1), dtype='float32')] + grads = [paddle.ones(shape=(1, 1), dtype='float32')] + + rank = paddle.distributed.get_rank() + nranks = paddle.distributed.get_world_size() + + fp32_fused_param = self._create_persistable_var('fp32_fused_param') + fp32_fused_grad = self._create_persistable_var('fp32_fused_grad') + fp16_fused_param = self._create_persistable_var( + 'fp16_fused_param', dtype='float16' + ) + fp16_fused_grad = self._create_persistable_var( + 'fp16_fused_grad', dtype='float16' + ) + + master_params = [] + for p in params: + master_p = self._create_persistable_var('master_weight') + master_params.append(master_p) + + moment1 = self._create_persistable_var('moment1') + moment1.is_distributed = True + moment2 = self._create_persistable_var('moment2') + moment2.is_distributed = True + beta1pow = self._create_persistable_var('beta1pow') + beta2pow = self._create_persistable_var('beta2pow') + + param_info = self._create_persistable_var('param_info', dtype='int32') + param_info.is_distributed = True + + fused_offsets = self._create_persistable_var( + 'fused_offsets', dtype='int32' + ) + + fp32_partial_fused_offsets = self._create_persistable_var( + 'fp32_partial_fused_offsets', dtype='int32' + ) + fp32_partial_fused_offsets.is_distributed = True + + fp16_partial_fused_offsets = self._create_persistable_var( + 'fp16_partial_fused_offsets', dtype='int32' + ) + fp16_partial_fused_offsets.is_distributed = True + + param_order = self._create_persistable_var('param_order', dtype='int32') + param_order.is_distributed = True + + if self._gradient_accumulation_steps > 1: + fp32_acc_fused_grad = [ + self._create_persistable_var('fp32_acc_fused_grad') + ] + fp16_acc_fused_grad = [ + self._create_persistable_var( + 'fp16_acc_fused_grad', dtype='float16' + ) + ] + acc_step = [self._create_persistable_var('acc_step', dtype='int64')] + else: + fp32_acc_fused_grad = [] + fp16_acc_fused_grad = [] + acc_step = [] + + lr = None + for p_g in params: + if lr is None: + lr = self._create_param_lr(p_g) + else: + new_lr = self._create_param_lr(p_g) + assert id(lr) == id( + new_lr + ), "The learning rate for each parameter should be the same" + assert lr is not None + + scale = self._get_or_create_scale() + + step = self._get_or_create_step() + + ring_ids = [] + startup_block = self.helper.startup_program.global_block() + if nranks > 1: + ring_id = init_communicator( + startup_block, rank, list(range(nranks)), 0 + ) + ring_ids.append(ring_id) + + use_hierarchical_allreduce = False + if self._nproc_per_node is None: + nproc_per_node = nranks + else: + nproc_per_node = self._nproc_per_node + assert ( + nranks % nproc_per_node == 0 + ), "nranks should be exactly divided by nproc_per_node" + node_id = int(rank / nproc_per_node) + node_num = int(nranks / nproc_per_node) + shard_inside_node = nranks > nproc_per_node + + if node_num > 1 and len(ring_ids) <= 1 and shard_inside_node: + local_group_ranks = list( + range(node_id * nproc_per_node, (node_id + 1) * nproc_per_node) + ) + ring_id = init_communicator( + startup_block, rank, local_group_ranks, 1 + ) + ring_ids.append(ring_id) + + if self._use_hierarchical_allreduce and nranks > nproc_per_node: + use_hierarchical_allreduce = True + outer_group_ranks = list( + range(rank % nproc_per_node, nranks, nproc_per_node) + ) + ring_id = init_communicator( + startup_block, rank, outer_group_ranks, ring_ids[-1] + 1 + ) + ring_ids.append(ring_id) + + rank = paddle.distributed.get_rank() + nranks = paddle.distributed.get_world_size() + helper = LayerHelper(self.op_type) + helper.append_op( + type=self.op_type, + inputs={ + 'FP32FusedParam': [fp32_fused_param], + 'FP32FusedGrad': [fp32_fused_grad], + 'FP16FusedParam': [fp16_fused_param], + 'FP16FusedGrad': [fp16_fused_grad], + 'LearningRate': [lr], + 'Moment1': [moment1], + 'Moment2': [moment2], + 'Beta1Pow': [beta1pow], + 'Beta2Pow': [beta2pow], + 'GlobalScale': [scale], + 'ParamInfo': [param_info], + 'Param': params, + 'Grad': grads, + 'FusedParamOffsets': [fused_offsets], + 'FP32ShardFusedParamOffsets': [fp32_partial_fused_offsets], + 'FP16ShardFusedParamOffsets': [fp16_partial_fused_offsets], + 'ParamOrder': [param_order], + }, + outputs={ + 'FP32FusedParamOut': [fp32_fused_param], + 'FP16FusedParamOut': [fp16_fused_param], + 'Moment1Out': [moment1], + 'Moment2Out': [moment2], + 'Beta1PowOut': [beta1pow], + 'Beta2PowOut': [beta2pow], + 'ParamOut': params, + 'GradOut': grads, + 'FoundInf': [self._found_inf], + 'FP32AccFusedGrad': fp32_acc_fused_grad, + 'FP16AccFusedGrad': fp16_acc_fused_grad, + 'AccStep': acc_step, + 'StopUpdate': self._stop_update + if self._stop_update is not None + else [], + 'Step': [step], + }, + attrs={ + 'weight_decay': self._weight_decay, + 'beta1': self._beta1, + 'beta2': self._beta2, + 'epsilon': self._epsilon, + 'max_global_grad_norm': self._max_global_grad_norm, + 'clip_after_allreduce': self._clip_after_allreduce, + 'rank': rank, + 'nranks': nranks, + 'ring_ids': ring_ids, + 'use_master_param_norm': self._use_master_param_norm, + 'is_grad_scaled_by_nranks': self._is_grad_scaled_by_nranks, + 'acc_steps': self._gradient_accumulation_steps, + 'use_master_acc_grad': self._use_master_acc_grad, + 'use_hierarchical_allreduce': use_hierarchical_allreduce, + }, + ) + + def test_translator(self): + self.check() + + +if __name__ == "__main__": + unittest.main() From daa52115ce48b47c01193f40caea5ab5c5af8327 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 13 Mar 2024 13:05:59 +0800 Subject: [PATCH 22/38] fix --- test/ir/pir/translator/test_distributed_fused_lamb.py | 1 - 1 file changed, 1 deletion(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index fd06adbd53231f..7920393350e120 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -126,7 +126,6 @@ def __init__( assert ( not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" - super().__init__(learning_rate=learning_rate, grad_clip=None, name=name) self._beta1 = beta1 self._beta2 = beta2 From 782fe3f00cb48a0db7914ccf2b2136c5bbec0912 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 13 Mar 2024 16:37:24 +0800 Subject: [PATCH 23/38] fix --- test/ir/pir/translator/test_distributed_fused_lamb.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 7920393350e120..6d2062ea046eaa 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -213,14 +213,10 @@ def _get_or_create_scale(self): def append_op(self): self.op_type = "distributed_fused_lamb" - self.helper = LayerHelper(self.op_type) - params = [paddle.ones(shape=(1, 1), dtype='float32')] grads = [paddle.ones(shape=(1, 1), dtype='float32')] - rank = paddle.distributed.get_rank() nranks = paddle.distributed.get_world_size() - fp32_fused_param = self._create_persistable_var('fp32_fused_param') fp32_fused_grad = self._create_persistable_var('fp32_fused_grad') fp16_fused_param = self._create_persistable_var( @@ -331,8 +327,6 @@ def append_op(self): ) ring_ids.append(ring_id) - rank = paddle.distributed.get_rank() - nranks = paddle.distributed.get_world_size() helper = LayerHelper(self.op_type) helper.append_op( type=self.op_type, From 0935545bc55bac58760590498ca71187925ed382 Mon Sep 17 00:00:00 2001 From: enkilee Date: Thu, 14 Mar 2024 13:01:36 +0800 Subject: [PATCH 24/38] fix --- paddle/fluid/pir/dialect/op_generator/ops_api_gen.py | 3 +-- test/ir/pir/translator/CMakeLists.txt | 3 +-- test/ir/pir/translator/test_distributed_fused_lamb.py | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 81f2e444fc6c29..cb5bdf95e6ee0b 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -69,8 +69,6 @@ {{"{name}", (PyCFunction)(void (*)(void)){name}, METH_VARARGS | METH_KEYWORDS, "C++ interface function for {name}."}},""" NEED_GEN_STATIC_ONLY_APIS = [ - 'distributed_fused_lamb', - 'distributed_fused_lamb_', 'distributed_fused_lamb_init', 'distributed_fused_lamb_init_', 'fetch', @@ -138,6 +136,7 @@ 'c_softmax_with_cross_entropy', 'c_split', 'decayed_adagrad', + 'distributed_fused_lamb', 'distributed_lookup_table', 'dpsgd', 'embedding_grad_sparse', diff --git a/test/ir/pir/translator/CMakeLists.txt b/test/ir/pir/translator/CMakeLists.txt index 94f23cfb907b6d..1e5f8c922eba42 100644 --- a/test/ir/pir/translator/CMakeLists.txt +++ b/test/ir/pir/translator/CMakeLists.txt @@ -11,10 +11,9 @@ list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_reduce_max_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_reduce_prod_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_c_split_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb_init) +list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_lookup_table_translate) -list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb_init) -list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_nop_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_send_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_recv_translator) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 6d2062ea046eaa..aafe44b8184c65 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -126,7 +126,7 @@ def __init__( assert ( not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" - + super().__init__() self._beta1 = beta1 self._beta2 = beta2 self._epsilon = epsilon From 14d275db6cdd60580c343daf30a0f66c7cbd37aa Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 15 Mar 2024 13:05:43 +0800 Subject: [PATCH 25/38] fix --- paddle/fluid/pir/dialect/op_generator/ops_api_gen.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index cb5bdf95e6ee0b..555746d4a12d3b 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -137,6 +137,7 @@ 'c_split', 'decayed_adagrad', 'distributed_fused_lamb', + 'distributed_fused_lamb_', 'distributed_lookup_table', 'dpsgd', 'embedding_grad_sparse', From af9be1cd9e490444683d8a53c9032e1bcc2215a5 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 18 Mar 2024 14:52:57 +0800 Subject: [PATCH 26/38] fix --- .../translator/test_distributed_fused_lamb.py | 271 ++++++++---------- 1 file changed, 121 insertions(+), 150 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index aafe44b8184c65..a14dc7ee7b0869 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -20,42 +20,41 @@ import paddle from paddle.base import core, unique_name from paddle.base.layer_helper import LayerHelper -from paddle.nn import ClipGradByGlobalNorm def init_communicator(block, rank, ranks, ring_id): - eps = os.environ['PADDLE_TRAINER_ENDPOINTS'] + eps = os.environ["PADDLE_TRAINER_ENDPOINTS"] eps = [ep.strip() for ep in eps.split(",") if ep.strip()] cur_ep = eps[rank] other_eps = [eps[r] for r in ranks if r != rank] local_rank = ranks.index(rank) - comm_var_name = unique_name.generate('comm_id') + comm_var_name = unique_name.generate("comm_id") comm_id_var = block.create_var( name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW ) if core.is_compiled_with_cuda(): block.append_op( - type='c_gen_nccl_id', + type="c_gen_nccl_id", inputs={}, - outputs={'Out': comm_id_var}, + outputs={"Out": comm_id_var}, attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, + "rank": local_rank, + "endpoint": cur_ep, + "other_endpoints": other_eps, + "ring_id": ring_id, }, ) elif core.is_compiled_with_xpu(): block.append_op( - type='c_gen_bkcl_id', + type="c_gen_bkcl_id", inputs={}, - outputs={'Out': comm_id_var}, + outputs={"Out": comm_id_var}, attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, + "rank": local_rank, + "endpoint": cur_ep, + "other_endpoints": other_eps, + "ring_id": ring_id, }, ) elif ( @@ -63,102 +62,74 @@ def init_communicator(block, rank, ranks, ring_id): in paddle.device.get_all_custom_device_type() ): block.append_op( - type='c_gen_xccl_id', + type="c_gen_xccl_id", inputs={}, - outputs={'Out': comm_id_var}, + outputs={"Out": comm_id_var}, attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, + "rank": local_rank, + "endpoint": cur_ep, + "other_endpoints": other_eps, + "ring_id": ring_id, }, ) block.append_op( - type='c_comm_init', - inputs={'X': comm_id_var}, + type="c_comm_init", + inputs={"X": comm_id_var}, outputs={}, attrs={ - 'nranks': len(ranks), - 'rank': local_rank, - 'ring_id': ring_id, - 'endpoints': ','.join(eps), + "nranks": len(ranks), + "rank": local_rank, + "ring_id": ring_id, + "endpoints": ",".join(eps), }, ) - tmp_var = block.create_var(name=unique_name.generate('tmp')) + tmp_var = block.create_var(name=unique_name.generate("tmp")) block.append_op( - type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1} + type="fill_constant", outputs={"Out": tmp_var}, attrs={"value": 1} ) block.append_op( - type='c_allreduce_sum', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}, - attrs={'ring_id': ring_id, 'use_calc_stream': True}, + type="c_allreduce_sum", + inputs={"X": tmp_var}, + outputs={"Out": tmp_var}, + attrs={"ring_id": ring_id, "use_calc_stream": True}, ) block.append_op( - type='c_sync_calc_stream', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}, + type="c_sync_calc_stream", + inputs={"X": tmp_var}, + outputs={"Out": tmp_var}, ) return ring_id class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): - def __init__( - self, - learning_rate=0.001, - lamb_weight_decay=0.01, - beta1=0.9, - beta2=0.999, - epsilon=1e-6, - parameters=None, - grad_clip=None, - exclude_from_weight_decay_fn=None, - clip_after_allreduce=True, - is_grad_scaled_by_nranks=True, - alignment=128, - use_master_param_norm=True, - gradient_accumulation_steps=1, - use_master_acc_grad=True, - nproc_per_node=None, - use_hierarchical_allreduce=False, - name=None, - ): + def setUp(self): assert ( not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" - super().__init__() - self._beta1 = beta1 - self._beta2 = beta2 - self._epsilon = epsilon - self._weight_decay = ( - lamb_weight_decay if lamb_weight_decay is not None else 0.0 - ) - if grad_clip is not None: - assert isinstance( - grad_clip, ClipGradByGlobalNorm - ), "Only ClipGradByGlobalNorm is supported in DistributedFusedLamb" - max_global_grad_norm = grad_clip.clip_norm - else: - max_global_grad_norm = -1.0 - self._max_global_grad_norm = max_global_grad_norm - self._alignment = alignment if alignment is not None else -1 - self._clip_after_allreduce = clip_after_allreduce - self._is_grad_scaled_by_nranks = is_grad_scaled_by_nranks - self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn + self.name = None + self._beta1 = 0.9 + self._beta2 = 0.999 + self._epsilon = 1e-6 + self._weight_decay = 0.01 + self._max_global_grad_norm = -1.0 + self._alignment = 128 + self._clip_after_allreduce = True + self._is_grad_scaled_by_nranks = True + self._exclude_from_weight_decay_fn = None self._scale = None - self._use_master_param_norm = use_master_param_norm - self._gradient_accumulation_steps = gradient_accumulation_steps - self._use_master_acc_grad = use_master_acc_grad - self._nproc_per_node = nproc_per_node - self._use_hierarchical_allreduce = use_hierarchical_allreduce + self._use_master_param_norm = True + self._gradient_accumulation_steps = 1 + self._use_master_acc_grad = True + self._nproc_per_node = None + self._use_hierarchical_allreduce = False assert self._gradient_accumulation_steps >= 1 - self.helper = LayerHelper('distributed_fused_lamb') + self.helper = LayerHelper("distributed_fused_lamb") self._supports_check_nan_inf = True # very import flag for AMP main_block = self.helper.main_program.global_block() self._found_inf = main_block.create_var( - name=unique_name.generate('found_inf'), + name=unique_name.generate("found_inf"), shape=[1], dtype=core.VarDesc.VarType.BOOL, ) @@ -166,7 +137,7 @@ def __init__( if self._gradient_accumulation_steps > 1: self._stop_update = main_block.create_var( - name=unique_name.generate('stop_update'), + name=unique_name.generate("stop_update"), shape=[1], dtype=core.VarDesc.VarType.BOOL, ) @@ -175,7 +146,7 @@ def __init__( self._param_to_master_param = {} - def _create_persistable_var(self, name=None, shape=[-1], dtype='float32'): + def _create_persistable_var(self, name=None, shape=[-1], dtype="float32"): startup_block = self.helper.startup_program.global_block() if name is not None: name = unique_name.generate(name) @@ -197,11 +168,11 @@ def _create_persistable_var(self, name=None, shape=[-1], dtype='float32'): return main_var def _create_scale_from_constant(self): - name = unique_name.generate('global_scale') + name = unique_name.generate("global_scale") return paddle.static.create_global_var( name=name, shape=[1], - dtype='float32', + dtype="float32", value=1.0, persistable=True, ) @@ -213,61 +184,61 @@ def _get_or_create_scale(self): def append_op(self): self.op_type = "distributed_fused_lamb" - params = [paddle.ones(shape=(1, 1), dtype='float32')] - grads = [paddle.ones(shape=(1, 1), dtype='float32')] + params = [paddle.ones(shape=(1, 1), dtype="float32")] + grads = [paddle.ones(shape=(1, 1), dtype="float32")] rank = paddle.distributed.get_rank() nranks = paddle.distributed.get_world_size() - fp32_fused_param = self._create_persistable_var('fp32_fused_param') - fp32_fused_grad = self._create_persistable_var('fp32_fused_grad') + fp32_fused_param = self._create_persistable_var("fp32_fused_param") + fp32_fused_grad = self._create_persistable_var("fp32_fused_grad") fp16_fused_param = self._create_persistable_var( - 'fp16_fused_param', dtype='float16' + "fp16_fused_param", dtype="float16" ) fp16_fused_grad = self._create_persistable_var( - 'fp16_fused_grad', dtype='float16' + "fp16_fused_grad", dtype="float16" ) master_params = [] for p in params: - master_p = self._create_persistable_var('master_weight') + master_p = self._create_persistable_var("master_weight") master_params.append(master_p) - moment1 = self._create_persistable_var('moment1') + moment1 = self._create_persistable_var("moment1") moment1.is_distributed = True - moment2 = self._create_persistable_var('moment2') + moment2 = self._create_persistable_var("moment2") moment2.is_distributed = True - beta1pow = self._create_persistable_var('beta1pow') - beta2pow = self._create_persistable_var('beta2pow') + beta1pow = self._create_persistable_var("beta1pow") + beta2pow = self._create_persistable_var("beta2pow") - param_info = self._create_persistable_var('param_info', dtype='int32') + param_info = self._create_persistable_var("param_info", dtype="int32") param_info.is_distributed = True fused_offsets = self._create_persistable_var( - 'fused_offsets', dtype='int32' + "fused_offsets", dtype="int32" ) fp32_partial_fused_offsets = self._create_persistable_var( - 'fp32_partial_fused_offsets', dtype='int32' + "fp32_partial_fused_offsets", dtype="int32" ) fp32_partial_fused_offsets.is_distributed = True fp16_partial_fused_offsets = self._create_persistable_var( - 'fp16_partial_fused_offsets', dtype='int32' + "fp16_partial_fused_offsets", dtype="int32" ) fp16_partial_fused_offsets.is_distributed = True - param_order = self._create_persistable_var('param_order', dtype='int32') + param_order = self._create_persistable_var("param_order", dtype="int32") param_order.is_distributed = True if self._gradient_accumulation_steps > 1: fp32_acc_fused_grad = [ - self._create_persistable_var('fp32_acc_fused_grad') + self._create_persistable_var("fp32_acc_fused_grad") ] fp16_acc_fused_grad = [ self._create_persistable_var( - 'fp16_acc_fused_grad', dtype='float16' + "fp16_acc_fused_grad", dtype="float16" ) ] - acc_step = [self._create_persistable_var('acc_step', dtype='int64')] + acc_step = [self._create_persistable_var("acc_step", dtype="int64")] else: fp32_acc_fused_grad = [] fp16_acc_fused_grad = [] @@ -331,57 +302,57 @@ def append_op(self): helper.append_op( type=self.op_type, inputs={ - 'FP32FusedParam': [fp32_fused_param], - 'FP32FusedGrad': [fp32_fused_grad], - 'FP16FusedParam': [fp16_fused_param], - 'FP16FusedGrad': [fp16_fused_grad], - 'LearningRate': [lr], - 'Moment1': [moment1], - 'Moment2': [moment2], - 'Beta1Pow': [beta1pow], - 'Beta2Pow': [beta2pow], - 'GlobalScale': [scale], - 'ParamInfo': [param_info], - 'Param': params, - 'Grad': grads, - 'FusedParamOffsets': [fused_offsets], - 'FP32ShardFusedParamOffsets': [fp32_partial_fused_offsets], - 'FP16ShardFusedParamOffsets': [fp16_partial_fused_offsets], - 'ParamOrder': [param_order], + "FP32FusedParam": [fp32_fused_param], + "FP32FusedGrad": [fp32_fused_grad], + "FP16FusedParam": [fp16_fused_param], + "FP16FusedGrad": [fp16_fused_grad], + "LearningRate": [lr], + "Moment1": [moment1], + "Moment2": [moment2], + "Beta1Pow": [beta1pow], + "Beta2Pow": [beta2pow], + "GlobalScale": [scale], + "ParamInfo": [param_info], + "Param": params, + "Grad": grads, + "FusedParamOffsets": [fused_offsets], + "FP32ShardFusedParamOffsets": [fp32_partial_fused_offsets], + "FP16ShardFusedParamOffsets": [fp16_partial_fused_offsets], + "ParamOrder": [param_order], }, outputs={ - 'FP32FusedParamOut': [fp32_fused_param], - 'FP16FusedParamOut': [fp16_fused_param], - 'Moment1Out': [moment1], - 'Moment2Out': [moment2], - 'Beta1PowOut': [beta1pow], - 'Beta2PowOut': [beta2pow], - 'ParamOut': params, - 'GradOut': grads, - 'FoundInf': [self._found_inf], - 'FP32AccFusedGrad': fp32_acc_fused_grad, - 'FP16AccFusedGrad': fp16_acc_fused_grad, - 'AccStep': acc_step, - 'StopUpdate': self._stop_update + "FP32FusedParamOut": [fp32_fused_param], + "FP16FusedParamOut": [fp16_fused_param], + "Moment1Out": [moment1], + "Moment2Out": [moment2], + "Beta1PowOut": [beta1pow], + "Beta2PowOut": [beta2pow], + "ParamOut": params, + "GradOut": grads, + "FoundInf": [self._found_inf], + "FP32AccFusedGrad": fp32_acc_fused_grad, + "FP16AccFusedGrad": fp16_acc_fused_grad, + "AccStep": acc_step, + "StopUpdate": self._stop_update if self._stop_update is not None else [], - 'Step': [step], + "Step": [step], }, attrs={ - 'weight_decay': self._weight_decay, - 'beta1': self._beta1, - 'beta2': self._beta2, - 'epsilon': self._epsilon, - 'max_global_grad_norm': self._max_global_grad_norm, - 'clip_after_allreduce': self._clip_after_allreduce, - 'rank': rank, - 'nranks': nranks, - 'ring_ids': ring_ids, - 'use_master_param_norm': self._use_master_param_norm, - 'is_grad_scaled_by_nranks': self._is_grad_scaled_by_nranks, - 'acc_steps': self._gradient_accumulation_steps, - 'use_master_acc_grad': self._use_master_acc_grad, - 'use_hierarchical_allreduce': use_hierarchical_allreduce, + "weight_decay": self._weight_decay, + "beta1": self._beta1, + "beta2": self._beta2, + "epsilon": self._epsilon, + "max_global_grad_norm": self._max_global_grad_norm, + "clip_after_allreduce": self._clip_after_allreduce, + "rank": rank, + "nranks": nranks, + "ring_ids": ring_ids, + "use_master_param_norm": self._use_master_param_norm, + "is_grad_scaled_by_nranks": self._is_grad_scaled_by_nranks, + "acc_steps": self._gradient_accumulation_steps, + "use_master_acc_grad": self._use_master_acc_grad, + "use_hierarchical_allreduce": use_hierarchical_allreduce, }, ) From 4428c2fef0044d16ce9602e2c887c1cbfe4d34cd Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 18 Mar 2024 14:53:24 +0800 Subject: [PATCH 27/38] fix --- test/ir/pir/translator/CMakeLists.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/test/ir/pir/translator/CMakeLists.txt b/test/ir/pir/translator/CMakeLists.txt index 71634bc6f49f30..27fbc6046bf7a9 100644 --- a/test/ir/pir/translator/CMakeLists.txt +++ b/test/ir/pir/translator/CMakeLists.txt @@ -18,7 +18,6 @@ list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_lookup_table_translate) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_push_sparse_translator) -list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb_init) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_nop_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_send_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_partial_recv_translator) From d66d37582a711e24f16c46145026c0d6484d5478 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 18 Mar 2024 16:54:29 +0800 Subject: [PATCH 28/38] fix --- .../translator/test_distributed_fused_lamb.py | 153 +++--------------- 1 file changed, 18 insertions(+), 135 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index a14dc7ee7b0869..2c3ac5263867bc 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import unittest import test_op_translator @@ -22,85 +21,6 @@ from paddle.base.layer_helper import LayerHelper -def init_communicator(block, rank, ranks, ring_id): - eps = os.environ["PADDLE_TRAINER_ENDPOINTS"] - eps = [ep.strip() for ep in eps.split(",") if ep.strip()] - cur_ep = eps[rank] - other_eps = [eps[r] for r in ranks if r != rank] - - local_rank = ranks.index(rank) - comm_var_name = unique_name.generate("comm_id") - comm_id_var = block.create_var( - name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW - ) - if core.is_compiled_with_cuda(): - block.append_op( - type="c_gen_nccl_id", - inputs={}, - outputs={"Out": comm_id_var}, - attrs={ - "rank": local_rank, - "endpoint": cur_ep, - "other_endpoints": other_eps, - "ring_id": ring_id, - }, - ) - elif core.is_compiled_with_xpu(): - block.append_op( - type="c_gen_bkcl_id", - inputs={}, - outputs={"Out": comm_id_var}, - attrs={ - "rank": local_rank, - "endpoint": cur_ep, - "other_endpoints": other_eps, - "ring_id": ring_id, - }, - ) - elif ( - paddle.distributed.ParallelEnv().device_type - in paddle.device.get_all_custom_device_type() - ): - block.append_op( - type="c_gen_xccl_id", - inputs={}, - outputs={"Out": comm_id_var}, - attrs={ - "rank": local_rank, - "endpoint": cur_ep, - "other_endpoints": other_eps, - "ring_id": ring_id, - }, - ) - block.append_op( - type="c_comm_init", - inputs={"X": comm_id_var}, - outputs={}, - attrs={ - "nranks": len(ranks), - "rank": local_rank, - "ring_id": ring_id, - "endpoints": ",".join(eps), - }, - ) - tmp_var = block.create_var(name=unique_name.generate("tmp")) - block.append_op( - type="fill_constant", outputs={"Out": tmp_var}, attrs={"value": 1} - ) - block.append_op( - type="c_allreduce_sum", - inputs={"X": tmp_var}, - outputs={"Out": tmp_var}, - attrs={"ring_id": ring_id, "use_calc_stream": True}, - ) - block.append_op( - type="c_sync_calc_stream", - inputs={"X": tmp_var}, - outputs={"Out": tmp_var}, - ) - return ring_id - - class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): def setUp(self): assert ( @@ -260,43 +180,23 @@ def append_op(self): step = self._get_or_create_step() ring_ids = [] - startup_block = self.helper.startup_program.global_block() - if nranks > 1: - ring_id = init_communicator( - startup_block, rank, list(range(nranks)), 0 - ) - ring_ids.append(ring_id) - - use_hierarchical_allreduce = False - if self._nproc_per_node is None: - nproc_per_node = nranks - else: - nproc_per_node = self._nproc_per_node - assert ( - nranks % nproc_per_node == 0 - ), "nranks should be exactly divided by nproc_per_node" - node_id = int(rank / nproc_per_node) - node_num = int(nranks / nproc_per_node) - shard_inside_node = nranks > nproc_per_node - - if node_num > 1 and len(ring_ids) <= 1 and shard_inside_node: - local_group_ranks = list( - range(node_id * nproc_per_node, (node_id + 1) * nproc_per_node) - ) - ring_id = init_communicator( - startup_block, rank, local_group_ranks, 1 - ) - ring_ids.append(ring_id) - if self._use_hierarchical_allreduce and nranks > nproc_per_node: - use_hierarchical_allreduce = True - outer_group_ranks = list( - range(rank % nproc_per_node, nranks, nproc_per_node) - ) - ring_id = init_communicator( - startup_block, rank, outer_group_ranks, ring_ids[-1] + 1 - ) - ring_ids.append(ring_id) + attrs = { + "weight_decay": self._weight_decay, + "beta1": self._beta1, + "beta2": self._beta2, + "epsilon": self._epsilon, + "max_global_grad_norm": self._max_global_grad_norm, + "clip_after_allreduce": self._clip_after_allreduce, + "rank": rank, + "nranks": nranks, + "ring_ids": ring_ids, + "use_master_param_norm": self._use_master_param_norm, + "is_grad_scaled_by_nranks": self._is_grad_scaled_by_nranks, + "acc_steps": self._gradient_accumulation_steps, + "use_master_acc_grad": self._use_master_acc_grad, + "use_hierarchical_allreduce": self._use_hierarchical_allreduce, + } helper = LayerHelper(self.op_type) helper.append_op( @@ -333,27 +233,10 @@ def append_op(self): "FP32AccFusedGrad": fp32_acc_fused_grad, "FP16AccFusedGrad": fp16_acc_fused_grad, "AccStep": acc_step, - "StopUpdate": self._stop_update - if self._stop_update is not None - else [], + "StopUpdate": self._stop_update, "Step": [step], }, - attrs={ - "weight_decay": self._weight_decay, - "beta1": self._beta1, - "beta2": self._beta2, - "epsilon": self._epsilon, - "max_global_grad_norm": self._max_global_grad_norm, - "clip_after_allreduce": self._clip_after_allreduce, - "rank": rank, - "nranks": nranks, - "ring_ids": ring_ids, - "use_master_param_norm": self._use_master_param_norm, - "is_grad_scaled_by_nranks": self._is_grad_scaled_by_nranks, - "acc_steps": self._gradient_accumulation_steps, - "use_master_acc_grad": self._use_master_acc_grad, - "use_hierarchical_allreduce": use_hierarchical_allreduce, - }, + attrs=attrs, ) def test_translator(self): From 11807425bd0130703d0f53d48202ce61f8199c95 Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 20 Mar 2024 10:52:40 +0800 Subject: [PATCH 29/38] fix --- test/ir/pir/translator/test_distributed_fused_lamb.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 2c3ac5263867bc..4768aa9e9c3c19 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -23,6 +23,7 @@ class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): def setUp(self): + super().setUp() assert ( not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" From 00f941c26890838bac78b1d2d4a112e4b2a4b924 Mon Sep 17 00:00:00 2001 From: enkilee Date: Fri, 22 Mar 2024 16:56:02 +0800 Subject: [PATCH 30/38] fix --- .../translator/test_distributed_fused_lamb.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 4768aa9e9c3c19..610e2b306bb12c 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -103,10 +103,16 @@ def _get_or_create_scale(self): self._scale = self._create_scale_from_constant(1.0) return self._scale + def _get_or_create_step(self): + if self._step is None: + self._step = self._create_persistable_var('step', dtype='int64') + return self._step + def append_op(self): self.op_type = "distributed_fused_lamb" params = [paddle.ones(shape=(1, 1), dtype="float32")] grads = [paddle.ones(shape=(1, 1), dtype="float32")] + lr = paddle.to_tensor(0.001, dtype="float32") rank = paddle.distributed.get_rank() nranks = paddle.distributed.get_world_size() fp32_fused_param = self._create_persistable_var("fp32_fused_param") @@ -165,17 +171,6 @@ def append_op(self): fp16_acc_fused_grad = [] acc_step = [] - lr = None - for p_g in params: - if lr is None: - lr = self._create_param_lr(p_g) - else: - new_lr = self._create_param_lr(p_g) - assert id(lr) == id( - new_lr - ), "The learning rate for each parameter should be the same" - assert lr is not None - scale = self._get_or_create_scale() step = self._get_or_create_step() From 0a167bcd4a73b4553f732f5cd7b87a112005dce4 Mon Sep 17 00:00:00 2001 From: enkilee Date: Sat, 23 Mar 2024 19:18:37 +0800 Subject: [PATCH 31/38] fix --- test/ir/pir/translator/test_distributed_fused_lamb.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 610e2b306bb12c..c16b723beed920 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -88,13 +88,13 @@ def _create_persistable_var(self, name=None, shape=[-1], dtype="float32"): ) return main_var - def _create_scale_from_constant(self): - name = unique_name.generate("global_scale") + def _create_scale_from_constant(self, value): + name = unique_name.generate('global_scale') return paddle.static.create_global_var( name=name, shape=[1], - dtype="float32", - value=1.0, + dtype='float32', + value=float(value), persistable=True, ) From f082a2379db812c604bf529b95298434149996a9 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 26 Mar 2024 17:17:42 +0800 Subject: [PATCH 32/38] fix --- .../translator/test_distributed_fused_lamb.py | 43 +++++++------------ 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index c16b723beed920..6bb6859db01158 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -43,7 +43,6 @@ def setUp(self): self._use_master_acc_grad = True self._nproc_per_node = None self._use_hierarchical_allreduce = False - assert self._gradient_accumulation_steps >= 1 self.helper = LayerHelper("distributed_fused_lamb") self._supports_check_nan_inf = True # very import flag for AMP @@ -56,14 +55,11 @@ def setUp(self): ) self._step = None - if self._gradient_accumulation_steps > 1: - self._stop_update = main_block.create_var( - name=unique_name.generate("stop_update"), - shape=[1], - dtype=core.VarDesc.VarType.BOOL, - ) - else: - self._stop_update = None + self._stop_update = main_block.create_var( + name=unique_name.generate("stop_update"), + shape=[1], + dtype=core.VarDesc.VarType.BOOL, + ) self._param_to_master_param = {} @@ -156,24 +152,17 @@ def append_op(self): param_order = self._create_persistable_var("param_order", dtype="int32") param_order.is_distributed = True - if self._gradient_accumulation_steps > 1: - fp32_acc_fused_grad = [ - self._create_persistable_var("fp32_acc_fused_grad") - ] - fp16_acc_fused_grad = [ - self._create_persistable_var( - "fp16_acc_fused_grad", dtype="float16" - ) - ] - acc_step = [self._create_persistable_var("acc_step", dtype="int64")] - else: - fp32_acc_fused_grad = [] - fp16_acc_fused_grad = [] - acc_step = [] - - scale = self._get_or_create_scale() - - step = self._get_or_create_step() + fp32_acc_fused_grad = [ + self._create_persistable_var("fp32_acc_fused_grad") + ] + fp16_acc_fused_grad = [ + self._create_persistable_var("fp16_acc_fused_grad", dtype="float16") + ] + acc_step = [self._create_persistable_var("acc_step", dtype="int64")] + + scale = self._create_scale_from_constant() + + step = self._create_persistable_var('step', dtype='int64') ring_ids = [] From 36dde2aeeabd6581436a26f5b84c26bd2db16ded Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 27 Mar 2024 09:57:38 +0800 Subject: [PATCH 33/38] fix --- .../translator/test_distributed_fused_lamb.py | 127 +++++++++++++----- 1 file changed, 95 insertions(+), 32 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 6bb6859db01158..1cda7449525c02 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import unittest import test_op_translator @@ -21,13 +22,91 @@ from paddle.base.layer_helper import LayerHelper +def init_communicator(block, rank, ranks, ring_id): + eps = os.environ['PADDLE_TRAINER_ENDPOINTS'] + eps = [ep.strip() for ep in eps.split(",") if ep.strip()] + cur_ep = eps[rank] + other_eps = [eps[r] for r in ranks if r != rank] + + local_rank = ranks.index(rank) + comm_var_name = unique_name.generate('comm_id') + comm_id_var = block.create_var( + name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW + ) + if core.is_compiled_with_cuda(): + block.append_op( + type='c_gen_nccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + elif core.is_compiled_with_xpu(): + block.append_op( + type='c_gen_bkcl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + elif ( + paddle.distributed.ParallelEnv().device_type + in paddle.device.get_all_custom_device_type() + ): + block.append_op( + type='c_gen_xccl_id', + inputs={}, + outputs={'Out': comm_id_var}, + attrs={ + 'rank': local_rank, + 'endpoint': cur_ep, + 'other_endpoints': other_eps, + 'ring_id': ring_id, + }, + ) + block.append_op( + type='c_comm_init', + inputs={'X': comm_id_var}, + outputs={}, + attrs={ + 'nranks': len(ranks), + 'rank': local_rank, + 'ring_id': ring_id, + 'endpoints': ','.join(eps), + }, + ) + tmp_var = block.create_var(name=unique_name.generate('tmp')) + block.append_op( + type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1} + ) + block.append_op( + type='c_allreduce_sum', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + attrs={'ring_id': ring_id, 'use_calc_stream': True}, + ) + block.append_op( + type='c_sync_calc_stream', + inputs={'X': tmp_var}, + outputs={'Out': tmp_var}, + ) + return ring_id + + class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): def setUp(self): super().setUp() assert ( not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" - self.name = None self._beta1 = 0.9 self._beta2 = 0.999 self._epsilon = 1e-6 @@ -36,16 +115,12 @@ def setUp(self): self._alignment = 128 self._clip_after_allreduce = True self._is_grad_scaled_by_nranks = True - self._exclude_from_weight_decay_fn = None self._scale = None self._use_master_param_norm = True self._gradient_accumulation_steps = 1 self._use_master_acc_grad = True - self._nproc_per_node = None self._use_hierarchical_allreduce = False - self.helper = LayerHelper("distributed_fused_lamb") - self._supports_check_nan_inf = True # very import flag for AMP main_block = self.helper.main_program.global_block() self._found_inf = main_block.create_var( @@ -94,16 +169,6 @@ def _create_scale_from_constant(self, value): persistable=True, ) - def _get_or_create_scale(self): - if self._scale is None: - self._scale = self._create_scale_from_constant(1.0) - return self._scale - - def _get_or_create_step(self): - if self._step is None: - self._step = self._create_persistable_var('step', dtype='int64') - return self._step - def append_op(self): self.op_type = "distributed_fused_lamb" params = [paddle.ones(shape=(1, 1), dtype="float32")] @@ -120,11 +185,6 @@ def append_op(self): "fp16_fused_grad", dtype="float16" ) - master_params = [] - for p in params: - master_p = self._create_persistable_var("master_weight") - master_params.append(master_p) - moment1 = self._create_persistable_var("moment1") moment1.is_distributed = True moment2 = self._create_persistable_var("moment2") @@ -160,27 +220,30 @@ def append_op(self): ] acc_step = [self._create_persistable_var("acc_step", dtype="int64")] - scale = self._create_scale_from_constant() + scale = self._create_scale_from_constant(1.0) step = self._create_persistable_var('step', dtype='int64') ring_ids = [] + startup_block = self.helper.startup_program.global_block() + ring_id = init_communicator(startup_block, rank, list(range(nranks)), 0) + ring_ids.append(ring_id) attrs = { - "weight_decay": self._weight_decay, - "beta1": self._beta1, - "beta2": self._beta2, - "epsilon": self._epsilon, - "max_global_grad_norm": self._max_global_grad_norm, - "clip_after_allreduce": self._clip_after_allreduce, + "weight_decay": 0.01, + "beta1": 0.9, + "beta2": 0.999, + "epsilon": 1e-6, + "max_global_grad_norm": -1.0, + "clip_after_allreduce": True, "rank": rank, "nranks": nranks, "ring_ids": ring_ids, - "use_master_param_norm": self._use_master_param_norm, - "is_grad_scaled_by_nranks": self._is_grad_scaled_by_nranks, - "acc_steps": self._gradient_accumulation_steps, - "use_master_acc_grad": self._use_master_acc_grad, - "use_hierarchical_allreduce": self._use_hierarchical_allreduce, + "use_master_param_norm": True, + "is_grad_scaled_by_nranks": True, + "acc_steps": 1, + "use_master_acc_grad": True, + "use_hierarchical_allreduce": False, } helper = LayerHelper(self.op_type) From 33a33c19d76558749ea5040f54e38535cc5e35bd Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 27 Mar 2024 11:04:09 +0800 Subject: [PATCH 34/38] fix --- .../translator/test_distributed_fused_lamb.py | 83 +------------------ 1 file changed, 1 insertion(+), 82 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 1cda7449525c02..88e54432f01ae8 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os import unittest import test_op_translator @@ -22,85 +21,6 @@ from paddle.base.layer_helper import LayerHelper -def init_communicator(block, rank, ranks, ring_id): - eps = os.environ['PADDLE_TRAINER_ENDPOINTS'] - eps = [ep.strip() for ep in eps.split(",") if ep.strip()] - cur_ep = eps[rank] - other_eps = [eps[r] for r in ranks if r != rank] - - local_rank = ranks.index(rank) - comm_var_name = unique_name.generate('comm_id') - comm_id_var = block.create_var( - name=comm_var_name, persistable=True, type=core.VarDesc.VarType.RAW - ) - if core.is_compiled_with_cuda(): - block.append_op( - type='c_gen_nccl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, - }, - ) - elif core.is_compiled_with_xpu(): - block.append_op( - type='c_gen_bkcl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, - }, - ) - elif ( - paddle.distributed.ParallelEnv().device_type - in paddle.device.get_all_custom_device_type() - ): - block.append_op( - type='c_gen_xccl_id', - inputs={}, - outputs={'Out': comm_id_var}, - attrs={ - 'rank': local_rank, - 'endpoint': cur_ep, - 'other_endpoints': other_eps, - 'ring_id': ring_id, - }, - ) - block.append_op( - type='c_comm_init', - inputs={'X': comm_id_var}, - outputs={}, - attrs={ - 'nranks': len(ranks), - 'rank': local_rank, - 'ring_id': ring_id, - 'endpoints': ','.join(eps), - }, - ) - tmp_var = block.create_var(name=unique_name.generate('tmp')) - block.append_op( - type='fill_constant', outputs={'Out': tmp_var}, attrs={'value': 1} - ) - block.append_op( - type='c_allreduce_sum', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}, - attrs={'ring_id': ring_id, 'use_calc_stream': True}, - ) - block.append_op( - type='c_sync_calc_stream', - inputs={'X': tmp_var}, - outputs={'Out': tmp_var}, - ) - return ring_id - - class TestDistributedFusedLambOpTranslator(test_op_translator.TestOpTranslator): def setUp(self): super().setUp() @@ -225,8 +145,7 @@ def append_op(self): step = self._create_persistable_var('step', dtype='int64') ring_ids = [] - startup_block = self.helper.startup_program.global_block() - ring_id = init_communicator(startup_block, rank, list(range(nranks)), 0) + ring_id = 0 ring_ids.append(ring_id) attrs = { From 45bebe0e51c8f7feb52ddffcbf37669292886c0f Mon Sep 17 00:00:00 2001 From: enkilee Date: Wed, 27 Mar 2024 12:46:25 +0800 Subject: [PATCH 35/38] fix --- .../translator/test_distributed_fused_lamb.py | 22 +++++++++++-------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/test/ir/pir/translator/test_distributed_fused_lamb.py b/test/ir/pir/translator/test_distributed_fused_lamb.py index 88e54432f01ae8..9493772d637992 100644 --- a/test/ir/pir/translator/test_distributed_fused_lamb.py +++ b/test/ir/pir/translator/test_distributed_fused_lamb.py @@ -50,12 +50,6 @@ def setUp(self): ) self._step = None - self._stop_update = main_block.create_var( - name=unique_name.generate("stop_update"), - shape=[1], - dtype=core.VarDesc.VarType.BOOL, - ) - self._param_to_master_param = {} def _create_persistable_var(self, name=None, shape=[-1], dtype="float32"): @@ -147,7 +141,17 @@ def append_op(self): ring_ids = [] ring_id = 0 ring_ids.append(ring_id) - + main_block = self.helper.main_program.global_block() + _found_inf = main_block.create_var( + name=unique_name.generate("found_inf"), + shape=[1], + dtype=core.VarDesc.VarType.BOOL, + ) + _stop_update = main_block.create_var( + name=unique_name.generate("stop_update"), + shape=[1], + dtype=core.VarDesc.VarType.BOOL, + ) attrs = { "weight_decay": 0.01, "beta1": 0.9, @@ -196,11 +200,11 @@ def append_op(self): "Beta2PowOut": [beta2pow], "ParamOut": params, "GradOut": grads, - "FoundInf": [self._found_inf], + "FoundInf": [_found_inf], "FP32AccFusedGrad": fp32_acc_fused_grad, "FP16AccFusedGrad": fp16_acc_fused_grad, "AccStep": acc_step, - "StopUpdate": self._stop_update, + "StopUpdate": _stop_update, "Step": [step], }, attrs=attrs, From 8afa4f2427250757c2440df988fc8afc84c3b542 Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 2 Apr 2024 08:58:49 +0800 Subject: [PATCH 36/38] fix --- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 13 +++--- paddle/phi/api/yaml/op_compat.yaml | 4 +- paddle/phi/infermeta/multiary.cc | 45 ------------------- paddle/phi/infermeta/multiary.h | 45 ------------------- 4 files changed, 7 insertions(+), 100 deletions(-) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index 35b7915bf8d56e..b18fa7d3dd94cc 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -476,17 +476,14 @@ optional : rois_num, multi_level_rois_num - op : distributed_fused_lamb - args : (Tensor[] param, Tensor[] grad, Tensor fp32_fused_param, Tensor fp32fusedgrad, Tensor fp16fusedparam, Tensor fp16fusedgrad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fusedparamoffsets, Tensor fp32shardfusedparamoffsets, Tensor fp16shardfusedparamoffsets, Tensor paraminfo, Tensor paramorder, Tensor learningrate, Tensor globalscale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, int[] ring_ids= {}, int acc_steps = 1, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, bool use_hierarchical_allreduce = false) - output : Tensor(fp32_fused_param_out), Tensor(fp16fusedparamout), Tensor(fp32accfusedgrad), Tensor(fp16accfusedgrad), Tensor(moment1out), Tensor(moment2out), Tensor(beta1powout), Tensor(beta2powout), Tensor[](paramout){param.size()}, Tensor(foundinf), Tensor(accstep), Tensor(stopupdate), Tensor(step) - infer_meta : - func : DistributedFusedLambInferMeta - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] + args : (Tensor[] param, Tensor[] grad, Tensor fp32_fused_param, Tensor fp32_fused_grad, Tensor fp16_fused_param, Tensor fp16_fused_grad, Tensor moment1, Tensor moment2, Tensor beta1pow, Tensor beta2pow, Tensor fused_param_offsets, Tensor fp32_shard_fused_param_offsets, Tensor fp16_shard_fused_param_offsets, Tensor param_info, Tensor param_order, Tensor learning_rate, Tensor global_scale, float beta1, float beta2, float epsilon, float max_global_grad_norm, float weight_decay, bool clip_after_allreduce, int[] ring_ids= {}, int acc_steps = 1, bool use_master_param_norm = true, bool use_master_acc_grad = true, bool is_grad_scaled_by_nranks = true, int64_t nranks = 1, bool use_hierarchical_allreduce = false) + output : Tensor(fp32_fused_param_out), Tensor(fp16_fused_param_out), Tensor(fp32_acc_fused_grad), Tensor(fp16_acc_fused_grad), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1pow_out), Tensor(beta2pow_out), Tensor[](param_out){param.size()}, Tensor(found_inf), Tensor(acc_step), Tensor(stop_update), Tensor(step) kernel : func : distributed_fused_lamb data_type : DataType::FLOAT32 - param : [param, grad, fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, moment1, moment2, beta1pow, beta2pow, fusedparamoffsets, fp32shardfusedparamoffsets, fp16shardfusedparamoffsets, paraminfo, paramorder, learningrate, globalscale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] - optional : fp32_fused_param, fp32fusedgrad, fp16fusedparam, fp16fusedgrad, fp32_fused_param_out, fp16fusedparamout, fp32accfusedgrad, fp16accfusedgrad, accstep, stopupdate - inplace : (fp32_fused_param -> fp32_fused_param_out), (fp16fusedparam -> fp16fusedparamout), (moment1 -> moment1out), (moment2 -> moment2out), (beta1pow -> beta1powout), (beta2pow -> beta2powout), (param -> paramout) + param : [param, grad, fp32_fused_param, fp32_fused_grad, fp16_fused_param, fp16fused_grad, moment1, moment2, beta1pow, beta2pow, fused_param_offsets, fp32_shard_fused_param_offsets, fp16_shard_fused_param_offsets, param_info, param_order, learning_rate, global_scale, acc_steps, beta1, beta2, epsilon, max_global_grad_norm, weight_decay, clip_after_allreduce, use_master_param_norm, use_master_acc_grad, is_grad_scaled_by_nranks, use_hierarchical_allreduce, nranks, ring_ids] + optional : fp32_fused_param, fp32_fused_grad, fp16_fused_param, fp16_fused_grad, fp32_fused_param_out, fp16_fused_param_out, fp32_acc_fused_grad, fp16_acc_fused_grad, acc_step, stop_update + inplace : (fp32_fused_param -> fp32_fused_param_out), (fp16_fused_param -> fp16_fused_param_out), (moment1 -> moment1_out), (moment2 -> moment2_out), (beta1pow -> beta1pow_out), (beta2pow -> beta2pow_out), (param -> param_out) - op : distributed_fused_lamb_init args : (Tensor[] param, Tensor[] grad, float beta1, float beta2, int[] apply_weight_decay, int alignment, int rank, int nranks) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 17d4679607a601..7ad3e62a7582d1 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -3659,9 +3659,9 @@ - op: distributed_fused_lamb inputs: - {param: Param, grad: Grad, fp32_fused_param: FP32FusedParam, fp32fusedgrad: FP32FusedGrad, fp16fusedparam: FP16FusedParam, fp16fusedgrad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fusedparamoffsets: FusedParamOffsets, fp32shardfusedparamoffsets: FP32ShardFusedParamOffsets, fp16shardfusedparamoffsets: FP16ShardFusedParamOffsets, paraminfo: ParamInfo, paramorder: ParamOrder, learningrate: LearningRate, globalscale: GlobalScale} + {param: Param, grad: Grad, fp32_fused_param: FP32FusedParam, fp32_fused_grad: FP32FusedGrad, fp16_fused_param: FP16FusedParam, fp16_fused_grad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fused_param_offsets: FusedParamOffsets, fp32_shard_fused_param_offsets: FP32ShardFusedParamOffsets, fp16_shard_fused_param_offsets: FP16ShardFusedParamOffsets, param_info: ParamInfo, param_order: ParamOrder, learning_rate: LearningRate, global_scale: GlobalScale} outputs: - {paramout : ParamOut, fp32_fused_param_out: FP32FusedParamOut, fp16fusedparamout: FP16FusedParamOut, fp32accfusedgrad: FP32AccFusedGrad, fp16accfusedgrad: FP16AccFusedGrad, moment1out: Moment1Out, moment2out: Moment2Out, beta1powout: Beta1PowOut, beta2powout: Beta2PowOut, foundinf: FoundInf, accstep: AccStep, stopupdate: StopUpdate, step: Step} + {paramout : ParamOut, fp32_fused_param_out: FP32FusedParamOut, fp16_fused_param_out: FP16FusedParamOut, fp32_acc_fused_grad: FP32AccFusedGrad, fp16_acc_fused_grad: FP16AccFusedGrad, moment1_out: Moment1Out, moment2_out: Moment2Out, beta1pow_out: Beta1PowOut, beta2pow_out: Beta2PowOut, found_inf: FoundInf, acc_step: AccStep, stop_update: StopUpdate, step: Step} - op: distributed_fused_lamb_init inputs: diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index d293fc4ef7976c..7575cc3cf14344 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -1508,51 +1508,6 @@ void DGCMomentumInferMeta(const MetaTensor& param, } } -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const MetaTensor& fp32_fused_param, - const MetaTensor& fp32fusedgrad, - const MetaTensor& fp16fusedparam, - const MetaTensor& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - bool use_hierarchical_allreduce, - int64_t nranks, - const std::vector& ring_ids, - MetaTensor* fp32_fused_param_out, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step) {} - void EditDistanceInferMeta(const MetaTensor& hyps, const MetaTensor& refs, const MetaTensor& hypslength, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index f01dc7ce40d8a2..e83ef2ed1825de 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -294,51 +294,6 @@ void DeformableConvInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); -void DistributedFusedLambInferMeta( - const std::vector& param, - const std::vector& grad, - const MetaTensor& fp32_fused_param, - const MetaTensor& fp32fusedgrad, - const MetaTensor& fp16fusedparam, - const MetaTensor& fp16fusedgrad, - const MetaTensor& moment1, - const MetaTensor& moment2, - const MetaTensor& beta1pow, - const MetaTensor& beta2pow, - const MetaTensor& fusedparamoffsets, - const MetaTensor& fp32shardfusedparamoffsets, - const MetaTensor& fp16shardfusedparamoffsets, - const MetaTensor& paraminfo, - const MetaTensor& paramorder, - const MetaTensor& learningrate, - const MetaTensor& globalscale, - int acc_steps, - float beta1, - float beta2, - float epsilon, - float max_global_grad_norm, - float weight_decay, - bool clip_after_allreduce, - bool use_master_param_norm, - bool use_master_acc_grad, - bool is_grad_scaled_by_nranks, - bool use_hierarchical_allreduce, - int64_t nranks, - const std::vector& ring_ids, - MetaTensor* fp32_fused_param_out, - MetaTensor* fp16fusedparamout, - MetaTensor* fp32accfusedgrad, - MetaTensor* fp16accfusedgrad, - MetaTensor* moment1out, - MetaTensor* moment2out, - MetaTensor* beta1powout, - MetaTensor* beta2powout, - std::vector paramout, - MetaTensor* foundinf, - MetaTensor* accstep, - MetaTensor* stopupdate, - MetaTensor* step); - void DGCMomentumInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& velocity, From d3c0cae72e71504debbf52f112c0464b50289efa Mon Sep 17 00:00:00 2001 From: enkilee Date: Tue, 2 Apr 2024 15:02:45 +0800 Subject: [PATCH 37/38] fix --- paddle/phi/api/yaml/op_compat.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 7ad3e62a7582d1..64e0b7f65adefd 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -3661,7 +3661,7 @@ inputs: {param: Param, grad: Grad, fp32_fused_param: FP32FusedParam, fp32_fused_grad: FP32FusedGrad, fp16_fused_param: FP16FusedParam, fp16_fused_grad: FP16FusedGrad, moment1: Moment1, moment2: Moment2, beta1pow: Beta1Pow, beta2pow: Beta2Pow, fused_param_offsets: FusedParamOffsets, fp32_shard_fused_param_offsets: FP32ShardFusedParamOffsets, fp16_shard_fused_param_offsets: FP16ShardFusedParamOffsets, param_info: ParamInfo, param_order: ParamOrder, learning_rate: LearningRate, global_scale: GlobalScale} outputs: - {paramout : ParamOut, fp32_fused_param_out: FP32FusedParamOut, fp16_fused_param_out: FP16FusedParamOut, fp32_acc_fused_grad: FP32AccFusedGrad, fp16_acc_fused_grad: FP16AccFusedGrad, moment1_out: Moment1Out, moment2_out: Moment2Out, beta1pow_out: Beta1PowOut, beta2pow_out: Beta2PowOut, found_inf: FoundInf, acc_step: AccStep, stop_update: StopUpdate, step: Step} + {param_out : ParamOut, fp32_fused_param_out: FP32FusedParamOut, fp16_fused_param_out: FP16FusedParamOut, fp32_acc_fused_grad: FP32AccFusedGrad, fp16_acc_fused_grad: FP16AccFusedGrad, moment1_out: Moment1Out, moment2_out: Moment2Out, beta1pow_out: Beta1PowOut, beta2pow_out: Beta2PowOut, found_inf: FoundInf, acc_step: AccStep, stop_update: StopUpdate, step: Step} - op: distributed_fused_lamb_init inputs: From d59ebfbecbfd1941a1800e7b55a7eedbcb678e85 Mon Sep 17 00:00:00 2001 From: enkilee Date: Mon, 8 Apr 2024 08:38:50 +0800 Subject: [PATCH 38/38] fix --- test/ir/pir/translator/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/ir/pir/translator/CMakeLists.txt b/test/ir/pir/translator/CMakeLists.txt index c8d5d892533bf3..d293fad021c6bb 100644 --- a/test/ir/pir/translator/CMakeLists.txt +++ b/test/ir/pir/translator/CMakeLists.txt @@ -18,7 +18,7 @@ list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_lookup_table_translate) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST - test_distributed_push_sparse_translator + test_distributed_push_sparse_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_distributed_fused_lamb_init) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_dgc_momentum_translator) list(APPEND DISTRIBUTED_OP_TRANSLATOR_TEST test_nop_translator)