Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions paddle/fluid/pir/dialect/op_generator/api_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@

PD_MANUAL_API_LIST = {
'embedding_grad',
'assign',
}

H_FILE_TEMPLATE = """
Expand Down Expand Up @@ -241,7 +242,7 @@ def _parse_yaml(self, op_yaml_files, op_compat_yaml_file):
def _need_skip(self, op_info, op_name):
return (
op_info.infer_meta_func is None and op_name not in PD_MANUAL_OP_LIST
) or op_name in PD_MANUAL_API_LIST
)

def _is_optional_input(self, op_info, input_name):
name_list = op_info.input_name_list
Expand Down Expand Up @@ -377,7 +378,10 @@ def _gen_h_file(self, op_info_items, namespaces, h_file_path):
for op_name in op_info.op_phi_name:
# NOTE:When infer_meta_func is None, the Build() function generated in pd_op
# is wrong, so temporarily skip the automatic generation of these APIs
if self._need_skip(op_info, op_name):
if (
self._need_skip(op_info, op_name)
or op_name in PD_MANUAL_API_LIST
):
continue
declare_str += self._gen_one_declare(
op_info, op_name, False, False
Expand Down Expand Up @@ -828,7 +832,10 @@ def _gen_cpp_file(self, op_info_items, namespaces, cpp_file_path):
for op_name in op_info.op_phi_name:
# NOTE:When infer_meta_func is None, the Build() function generated in pd_op
# is wrong, so temporarily skip the automatic generation of these APIs
if self._need_skip(op_info, op_name):
if (
self._need_skip(op_info, op_name)
or op_name in PD_MANUAL_API_LIST
):
continue
impl_str += self._gen_one_impl(op_info, op_name, False, False)
if len(op_info.mutable_attribute_name_list) > 0:
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/pir/dialect/op_generator/python_c_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ def _gen_one_impl(self, op_info, op_name):
def _need_skip(self, op_info, op_name):
return (
super()._need_skip(op_info, op_name)
or op_name.endswith(('_grad', '_grad_', 'xpu'))
or op_name in MANUAL_STATIC_OP_FUNCTION_LIST
)

Expand Down
20 changes: 20 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/manual_api.cc
Original file line number Diff line number Diff line change
Expand Up @@ -200,5 +200,25 @@ pir::OpResult slice_array_dense(pir::Value input, pir::Value starts) {
return op.result(0);
}

pir::OpResult assign(const pir::Value& x) {
CheckValueDataType(x, "x", "assign");
if (x.type().isa<paddle::dialect::DenseTensorType>()) {
paddle::dialect::AssignOp assign_op =
ApiBuilder::Instance().GetBuilder()->Build<paddle::dialect::AssignOp>(
x);
return assign_op.result(0);
} else if (x.type().isa<paddle::dialect::DenseTensorArrayType>()) {
paddle::dialect::AssignArrayOp assign_array_op =
ApiBuilder::Instance()
.GetBuilder()
->Build<paddle::dialect::AssignArrayOp>(x);
return assign_array_op.result(0);
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Currently, assign only supports DenseTensorType and "
"DenseTensorArrayType."));
}
}

} // namespace dialect
} // namespace paddle
2 changes: 2 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/manual_api.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,5 +83,7 @@ pir::OpResult add_n_array(const std::vector<pir::Value>& inputs);

pir::OpResult slice_array_dense(pir::Value input, pir::Value starts);

pir::OpResult assign(const pir::Value& x);

} // namespace dialect
} // namespace paddle
175 changes: 170 additions & 5 deletions paddle/fluid/pir/dialect/operator/ir/manual_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@ paddle::dialect::AddNOp, paddle::dialect::AddN_Op,
paddle::dialect::CreateArrayLikeOp, paddle::dialect::ArrayLengthOp,
paddle::dialect::ArrayReadOp, paddle::dialect::ArrayWrite_Op,
paddle::dialect::SliceArrayOp, paddle::dialect::SliceArrayDenseOp,
paddle::dialect::AssignArray_Op, paddle::dialect::ArrayToTensorOp,
paddle::dialect::TensorToArrayOp, paddle::dialect::SelectInputOp,
paddle::dialect::IncrementOp, paddle::dialect::Increment_Op,
paddle::dialect::ShapeBroadcastOp, paddle::dialect::MemcpyD2hMultiIoOp
paddle::dialect::AssignArrayOp, paddle::dialect::AssignArray_Op,
paddle::dialect::ArrayToTensorOp, paddle::dialect::TensorToArrayOp,
paddle::dialect::SelectInputOp, paddle::dialect::IncrementOp,
paddle::dialect::Increment_Op, paddle::dialect::ShapeBroadcastOp,
paddle::dialect::MemcpyD2hMultiIoOp
#else

#include "paddle/fluid/pir/dialect/operator/ir/manual_op.h"
Expand Down Expand Up @@ -3687,6 +3688,170 @@ phi::DataType SliceArrayDenseOp::GetKernelTypeForVar(
return expected_kernel_dtype;
}

OpInfoTuple AssignArrayOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo("x",
"paddle::dialect::DenseTensorArrayType",
false,
false,
false,
true)};
std::vector<paddle::dialect::OpAttributeInfo> attributes = {};
std::vector<paddle::dialect::OpOutputInfo> outputs = {
paddle::dialect::OpOutputInfo(
"out", "paddle::dialect::DenseTensorArrayType", false, false)};
paddle::dialect::OpRunTimeInfo run_time_info = paddle::dialect::OpRunTimeInfo(
"UnchangedArrayInferMeta", {"x"}, "assign_array", {"x"}, {}, {}, {}, {});
return std::make_tuple(
inputs, attributes, outputs, run_time_info, "assign_array");
}

void AssignArrayOp::Build(pir::Builder &builder,
pir::OperationArgument &argument,
pir::Value x_) {
VLOG(4) << "Start build AssignArrayOp";

VLOG(4) << "Builder construction inputs";
std::vector<pir::Value> argument_inputs = {x_};
argument.AddInputs(argument_inputs);

VLOG(4) << "Builder construction attributes";

VLOG(4) << "Builder construction outputs";
paddle::dialect::DenseTensorArrayType x_type;
if (x_.type().isa<paddle::dialect::DenseTensorArrayType>()) {
x_type = x_.type().dyn_cast<paddle::dialect::DenseTensorArrayType>();
} else if (x_.type().isa<paddle::dialect::AllocatedDenseTensorArrayType>()) {
paddle::dialect::AllocatedDenseTensorArrayType allocated_input =
x_.type().dyn_cast<paddle::dialect::AllocatedDenseTensorArrayType>();
x_type = paddle::dialect::DenseTensorArrayType::get(
pir::IrContext::Instance(),
allocated_input.dtype(),
allocated_input.data_layout());
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support paddle::dialect::DenseTensorArrayType or "
"paddle::dialect::AllocatedDenseTensorArrayType"));
}

VLOG(4) << "Builder construction dense_tensor_array_x";
paddle::dialect::IrTensor ir_tensor_x(
paddle::dialect::TransToPhiDataType(x_type.dtype()),
{},
x_type.data_layout(),
{});
VLOG(4) << "Builder construction meta_x";
paddle::dialect::IrMetaTensor meta_x(&ir_tensor_x);
paddle::dialect::IrTensor dense_out;
paddle::dialect::IrMetaTensor meta_out(&dense_out);

phi::UnchangedArrayInferMeta(meta_x, &meta_out);

std::vector<pir::Type> argument_outputs;
pir::Type out_dense_tensor_array_type =
paddle::dialect::DenseTensorArrayType::get(
pir::IrContext::Instance(),
TransToIrDataType(dense_out.dtype()),
dense_out.layout());
argument_outputs.push_back(out_dense_tensor_array_type);
argument.AddOutputs(argument_outputs.begin(), argument_outputs.end());
::pir::PassStopGradientsDefaultly(argument);
}

void AssignArrayOp::VerifySig() {
VLOG(4)
<< "Start Verifying inputs, outputs and attributes for: AssignArrayOp.";
VLOG(4) << "Verifying inputs:";
{
auto input_size = num_operands();
IR_ENFORCE(input_size == 1u,
"The size %d of inputs must be equal to 1.",
input_size);
IR_ENFORCE((*this)
->operand_source(0)
.type()
.isa<paddle::dialect::DenseTensorArrayType>(),
"Type validation failed for the 0th input, got %s.",
(*this)->operand_source(0).type());
}
VLOG(4) << "Verifying attributes:";
{
// Attributes num is 0, not need to check attributes type.
}
VLOG(4) << "Verifying outputs:";
{
auto output_size = num_results();
IR_ENFORCE(output_size == 1u,
"The size %d of outputs must be equal to 1.",
output_size);
IR_ENFORCE(
(*this)->result(0).type().isa<paddle::dialect::DenseTensorArrayType>(),
"Type validation failed for the 0th output.");
}
VLOG(4) << "End Verifying for: AssignArrayOp.";
}

void AssignArrayOp::InferMeta(phi::InferMetaContext *infer_meta) {
auto fn = PD_INFER_META(phi::UnchangedArrayInferMeta);
fn(infer_meta);
}

phi::DataType AssignArrayOp::GetKernelTypeForVar(
const std::string &var_name,
const phi::DataType &tensor_dtype,
const phi::DataType &expected_kernel_dtype) {
VLOG(4) << "Get KernelType for Var of op: AssignArrayOp";

return expected_kernel_dtype;
}

std::vector<pir::Type> AssignArrayOp::InferMeta(
const std::vector<pir::Value> &input_values,
const pir::AttributeMap &attributes) {
VLOG(4) << "Start infermeta AssignArrayOp";
IR_ENFORCE(input_values.size() == 1,
"Num of inputs is expected to be 1 but got %d.",
input_values.size());
pir::Value x_ = input_values[0];

VLOG(4) << "Builder construction outputs";
paddle::dialect::DenseTensorArrayType x_type;
if (x_.type().isa<paddle::dialect::DenseTensorArrayType>()) {
x_type = x_.type().dyn_cast<paddle::dialect::DenseTensorArrayType>();
} else if (x_.type().isa<paddle::dialect::AllocatedDenseTensorArrayType>()) {
paddle::dialect::AllocatedDenseTensorArrayType allocated_input =
x_.type().dyn_cast<paddle::dialect::AllocatedDenseTensorArrayType>();
x_type = paddle::dialect::DenseTensorArrayType::get(
pir::IrContext::Instance(),
allocated_input.dtype(),
allocated_input.data_layout());
} else {
PADDLE_THROW(phi::errors::Unimplemented(
"Only support paddle::dialect::DenseTensorArrayType or "
"paddle::dialect::AllocatedDenseTensorArrayType"));
}
paddle::dialect::IrTensor dense_input(
paddle::dialect::TransToPhiDataType(x_type.dtype()),
{},
x_type.data_layout(),
{});
paddle::dialect::IrMetaTensor meta_input(&dense_input);

paddle::dialect::IrTensor dense_out;
paddle::dialect::IrMetaTensor meta_out(&dense_out);

phi::UnchangedArrayInferMeta(meta_input, &meta_out);

std::vector<pir::Type> argument_outputs;
pir::Type out_dense_tensor_array_type =
paddle::dialect::DenseTensorArrayType::get(
pir::IrContext::Instance(),
TransToIrDataType(dense_out.dtype()),
dense_out.layout());
argument_outputs.push_back(out_dense_tensor_array_type);
return argument_outputs;
}

OpInfoTuple AssignArray_Op::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
paddle::dialect::OpInputInfo("x",
Expand Down Expand Up @@ -4953,7 +5118,6 @@ void MemcpyD2hMultiIoOp::VerifySig() {
IR_ENFORCE(input_size == 1u,
"The size %d of inputs must be equal to 1.",
input_size);

IR_ENFORCE((*this)
->operand_source(0)
.type()
Expand Down Expand Up @@ -5065,6 +5229,7 @@ IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayReadOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayWrite_Op)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SliceArrayOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::SliceArrayDenseOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AssignArrayOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::AssignArray_Op)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayToTensorOp)
IR_DEFINE_EXPLICIT_TYPE_ID(paddle::dialect::TensorToArrayOp)
Expand Down
32 changes: 32 additions & 0 deletions paddle/fluid/pir/dialect/operator/ir/manual_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,37 @@ class SliceArrayDenseOp
const pir::AttributeMap &attributes);
};

class AssignArrayOp
: public pir::Op<AssignArrayOp,
paddle::dialect::OpYamlInfoInterface,
paddle::dialect::InferMetaInterface,
paddle::dialect::GetKernelTypeForVarInterface> {
public:
using Op::Op;
static const char *name() { return "pd_op.assign_array"; }
static constexpr const char **attributes_name = nullptr;
static constexpr uint32_t attributes_num = 0;
static OpInfoTuple GetOpInfo();
static void Build(pir::Builder &builder, // NOLINT
pir::OperationArgument &argument, // NOLINT
pir::Value x_);

void VerifySig();

static phi::DataType GetKernelTypeForVar(
const std::string &var_name,
const phi::DataType &tensor_dtype,
const phi::DataType &expected_kernel_dtype);

pir::Value x() { return operand_source(0); }
pir::OpResult out() { return result(0); }

static void InferMeta(phi::InferMetaContext *infer_meta);
static std::vector<pir::Type> InferMeta(
const std::vector<pir::Value> &input_values,
const pir::AttributeMap &attributes);
};

class AssignArray_Op
: public pir::Op<AssignArray_Op,
paddle::dialect::OpYamlInfoInterface,
Expand Down Expand Up @@ -727,6 +758,7 @@ IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayReadOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayWrite_Op)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SliceArrayOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::SliceArrayDenseOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AssignArrayOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::AssignArray_Op)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::ArrayToTensorOp)
IR_DECLARE_EXPLICIT_TYPE_ID(paddle::dialect::TensorToArrayOp)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/convert_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,7 +858,7 @@ def true_fn():
return null_array

def false_fn(array, start, end):
new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0])
new_array = array[start:end]
return new_array

new_array = paddle.static.nn.cond(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/pir/math_op_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ def clear_gradient(self):
def append(self, var):
"""
**Notes**:
**The type Value must be LoD Tensor Array.
**The type Value must be Tensor Array.

"""
if not self.is_dense_tensor_array_type():
Expand Down
Loading