Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
1962a9a
Merge some of the previous interfaces into a branch
Le-soleile Feb 23, 2026
74f284f
Some interfaces that were not committed before
Le-soleile Feb 23, 2026
21d33fc
merge
Le-soleile Feb 23, 2026
1629589
merge
Le-soleile Feb 23, 2026
7d264c0
Merge branch '217' of https://github.com/Le-soleile/Paddle into 217
Le-soleile Feb 23, 2026
0c79448
Add include for squeeze operation
Le-soleile Feb 23, 2026
948edb7
fix
Le-soleile Feb 24, 2026
7679832
fix and add test
Le-soleile Feb 24, 2026
4ba11d2
solve conflicts
Le-soleile Feb 24, 2026
f0d5ab1
fix
Le-soleile Feb 24, 2026
867baff
fix
Le-soleile Feb 24, 2026
a82e739
fix
Le-soleile Feb 24, 2026
27068e2
improve coverage
Le-soleile Feb 25, 2026
ba924ad
fix test
Le-soleile Feb 25, 2026
bf84ee3
fix test
Le-soleile Feb 25, 2026
dbe28a6
fix
Le-soleile Feb 25, 2026
ec0e598
fix index
Le-soleile Feb 25, 2026
ea804dd
fix test
Le-soleile Feb 26, 2026
13a3bf7
fix test
Le-soleile Feb 26, 2026
51f60e2
fix test
Le-soleile Feb 26, 2026
fb366ca
Update paddle/phi/api/include/compat/c10/core/List.h
Le-soleile Feb 27, 2026
4dddbf7
fix
Le-soleile Feb 27, 2026
b5def57
Merge branch '217' of https://github.com/Le-soleile/Paddle into 217
Le-soleile Feb 27, 2026
675e095
fix
Le-soleile Feb 27, 2026
81d1eba
fix error
Le-soleile Feb 28, 2026
75c049f
Add and fix as_strided test
Le-soleile Mar 1, 2026
af2f91f
fix
Le-soleile Mar 1, 2026
92151c3
fix
Le-soleile Mar 1, 2026
7a0665f
fix
Le-soleile Mar 2, 2026
834ef07
Merge branch 'develop' into 217
Le-soleile Mar 2, 2026
fedd418
codestyle after conflict
Le-soleile Mar 2, 2026
3bfcd87
codestyle after conflict
Le-soleile Mar 2, 2026
4a7ed29
fix
Le-soleile Mar 2, 2026
1713cf3
fix
Le-soleile Mar 2, 2026
eec6fc2
Merge branch 'PaddlePaddle:develop' into 217
Le-soleile Mar 2, 2026
6af70c7
fix
Le-soleile Mar 2, 2026
f93f1c5
Merge branch '217' of https://github.com/Le-soleile/Paddle into 217
Le-soleile Mar 2, 2026
7d30522
Merge branch 'develop' into 217
Le-soleile Mar 3, 2026
f6f463e
Improve test coverage
Le-soleile Mar 3, 2026
e7de3b0
Merge branch '217' of https://github.com/Le-soleile/Paddle into 217
Le-soleile Mar 3, 2026
1a3217c
Resolve conflict
Le-soleile Mar 3, 2026
0f33719
Merge branch 'develop' into 217
Le-soleile Mar 3, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddle/phi/api/include/compat/ATen/Functions.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,10 @@
#include <ATen/ops/abs.h>
#include <ATen/ops/any.h>
#include <ATen/ops/arange.h>
#include <ATen/ops/as_strided.h>
#include <ATen/ops/cat.h>
#include <ATen/ops/chunk.h>
#include <ATen/ops/clamp.h>
#include <ATen/ops/coalesce.h>
#include <ATen/ops/detach.h>
#include <ATen/ops/dsplit.h>
Expand All @@ -36,6 +38,7 @@
#include <ATen/ops/full.h>
#include <ATen/ops/hsplit.h>
#include <ATen/ops/index.h>
#include <ATen/ops/index_put.h>
#include <ATen/ops/is_coalesced.h>
#include <ATen/ops/item.h>
#include <ATen/ops/masked_select.h>
Expand All @@ -53,6 +56,7 @@
#include <ATen/ops/split.h>
#include <ATen/ops/split_with_sizes.h>
#include <ATen/ops/squeeze.h>
#include <ATen/ops/std.h>
#include <ATen/ops/sum.h>
#include <ATen/ops/t.h>
#include <ATen/ops/tensor_split.h>
Expand Down
131 changes: 131 additions & 0 deletions paddle/phi/api/include/compat/ATen/core/TensorBody.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,28 @@
#endif

#include <c10/core/Device.h>
#include <c10/core/List.h>
#include <c10/core/ScalarType.h>
#include <c10/core/SymIntArrayRef.h>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "paddle/common/ddim.h"
#include "paddle/phi/common/place.h"

namespace at {
class Tensor;

// Type aliases for ATen compatibility
using Scalar = c10::Scalar;
using TensorOptions = c10::TensorOptions;
using MemoryFormat = c10::MemoryFormat;
using IntArrayRef = c10::IntArrayRef;
using OptionalIntArrayRef = c10::OptionalIntArrayRef;
using ScalarType = c10::ScalarType;
} // namespace at

namespace at { // NOLINT(build/namespaces)
using PaddleTensor = paddle::Tensor;
using PaddlePlace = phi::Place;
Expand Down Expand Up @@ -209,6 +227,119 @@ class Tensor : public TensorBase {

bool equal(const at::Tensor& other) const;

// Clamp functions
at::Tensor clamp(
const ::std::optional<at::Scalar>& min,
const ::std::optional<at::Scalar>& max = ::std::nullopt) const;

at::Tensor clamp(const ::std::optional<at::Tensor>& min = {},
const ::std::optional<at::Tensor>& max = {}) const;

at::Tensor& clamp_(
const ::std::optional<at::Scalar>& min,
const ::std::optional<at::Scalar>& max = ::std::nullopt) const;

at::Tensor& clamp_(const ::std::optional<at::Tensor>& min = {},
const ::std::optional<at::Tensor>& max = {}) const;

at::Tensor clamp_max(const at::Scalar& max) const;
at::Tensor clamp_max(const at::Tensor& max) const;
at::Tensor& clamp_max_(const at::Scalar& max) const;
at::Tensor& clamp_max_(const at::Tensor& max) const;

at::Tensor clamp_min(const at::Scalar& min) const;
at::Tensor clamp_min(const at::Tensor& min) const;
at::Tensor& clamp_min_(const at::Scalar& min) const;
at::Tensor& clamp_min_(const at::Tensor& min) const;

// as_strided: Create a tensor view with custom size, stride, and
// storage_offset
at::Tensor as_strided(
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset = ::std::nullopt) const;

// as_strided_: Inplace version
const at::Tensor& as_strided_(
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset = ::std::nullopt) const;

// as_strided_scatter: Scatter src into a strided view
at::Tensor as_strided_scatter(
const at::Tensor& src,
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset = ::std::nullopt) const;

// Standard deviation functions
Tensor std(int dim) const;
Tensor std(bool unbiased = true) const;
Tensor std(at::OptionalIntArrayRef dim,
bool unbiased = true,
bool keepdim = false) const;
Tensor std(at::OptionalIntArrayRef dim,
const ::std::optional<at::Scalar>& correction,
bool keepdim = false) const;

Tensor tensor_data() const {
PaddleTensor result;
if (tensor_.initialized()) {
auto src_impl = tensor_.impl();
auto* src_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(src_impl).get();
if (src_tensor && src_tensor->meta().is_contiguous()) {
result.set_impl(std::make_shared<phi::DenseTensor>());
auto* dst_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(result.impl()).get();
dst_tensor->ShareDataWith(*src_tensor);
} else {
result = paddle::experimental::assign(tensor_);
}
}
// For uninitialized tensor, return an uninitialized tensor (no assign
// needed)
return Tensor(result);
}

Tensor variable_data() const {
PaddleTensor result;
if (tensor_.initialized()) {
auto src_impl = tensor_.impl();
auto* src_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(src_impl).get();
if (src_tensor && src_tensor->meta().is_contiguous()) {
result.set_impl(std::make_shared<phi::DenseTensor>());
auto* dst_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(result.impl()).get();
dst_tensor->ShareDataWith(*src_tensor);
} else {
result = paddle::experimental::assign(tensor_);
}
}
// For uninitialized tensor, return an uninitialized tensor (no assign
// needed)
return Tensor(result);
}

// index: Get values at specified tensor indices
at::Tensor index(const c10::List<::std::optional<at::Tensor>>& indices) const;

// index_put_: Set values at specified indices in-place
at::Tensor& index_put_(const c10::List<::std::optional<at::Tensor>>& indices,
const at::Tensor& values,
bool accumulate = false) const;

// index_put_: Set scalar value at specified indices in-place
at::Tensor& index_put_(const c10::List<::std::optional<at::Tensor>>& indices,
const at::Scalar& v,
bool accumulate = false) const;

// index_put: Non-inplace version of index_put_
at::Tensor index_put(const c10::List<::std::optional<at::Tensor>>& indices,
const at::Tensor& values,
bool accumulate = false) const;

Tensor toType(ScalarType t) const {
return Tensor(paddle::experimental::cast(
tensor_, compat::_PD_AtenScalarTypeToPhiDataType(t)));
Expand Down
100 changes: 100 additions & 0 deletions paddle/phi/api/include/compat/ATen/ops/as_strided.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
// Copyright (c) 2026 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <ATen/core/Tensor.h>
#include <c10/util/ArrayRef.h>
#include <optional>
#include <vector>

#include "paddle/common/ddim.h"
#include "paddle/phi/core/dense_tensor.h"

namespace at {

// as_strided: Create a tensor view with custom size, stride, and storage_offset
inline at::Tensor Tensor::as_strided(
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset) const {
auto src_impl = tensor_.impl();
auto* src_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(src_impl).get();
if (!src_tensor) {
PD_THROW("as_strided: tensor must be a DenseTensor");
}
auto new_tensor = std::make_shared<phi::DenseTensor>();
new_tensor->ShareDataWith(*src_tensor);
std::vector<int64_t> size_vec(size.begin(), size.end());
std::vector<int64_t> stride_vec(stride.begin(), stride.end());
new_tensor->Resize(common::make_ddim(size_vec));
new_tensor->set_strides(common::make_ddim(stride_vec));
int64_t offset = storage_offset.has_value() ? storage_offset.value() : 0;
if (offset != 0) {
auto meta = phi::DenseTensorMeta(new_tensor->meta());
// meta.offset is in bytes; storage_offset is in elements
meta.offset =
static_cast<size_t>(offset) * phi::SizeOf(new_tensor->dtype());
new_tensor->set_meta(meta);
}
PaddleTensor result;
result.set_impl(new_tensor);
return Tensor(result);
}

// as_strided_: Inplace version
inline const at::Tensor& Tensor::as_strided_(
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset) const {
auto src_impl = tensor_.impl();
auto* src_tensor =
std::dynamic_pointer_cast<phi::DenseTensor>(src_impl).get();
if (!src_tensor) {
PD_THROW("as_strided_: tensor must be a DenseTensor");
}
std::vector<int64_t> size_vec(size.begin(), size.end());
std::vector<int64_t> stride_vec(stride.begin(), stride.end());
src_tensor->Resize(common::make_ddim(size_vec));
src_tensor->set_strides(common::make_ddim(stride_vec));
int64_t offset = storage_offset.has_value() ? storage_offset.value() : 0;
if (offset != 0) {
auto meta = phi::DenseTensorMeta(src_tensor->meta());
// meta.offset is in bytes; storage_offset is in elements
meta.offset =
static_cast<size_t>(offset) * phi::SizeOf(src_tensor->dtype());
src_tensor->set_meta(meta);
}
return *this;
}

// as_strided_scatter: Scatter src into a strided view
// Returns a new tensor (copy of self) with the strided window filled by src.
// The original tensor is NOT modified.
inline at::Tensor Tensor::as_strided_scatter(
const at::Tensor& src,
at::IntArrayRef size,
at::IntArrayRef stride,
::std::optional<int64_t> storage_offset) const {
// Clone self to an independent copy so the original tensor is left unchanged
PaddleTensor self_copy = tensor_.copy_to(tensor_.place(), /*blocking=*/true);
at::Tensor copy_tensor(self_copy);
at::Tensor strided_view =
copy_tensor.as_strided(size, stride, storage_offset);
strided_view.copy_(src);
return strided_view;
}

} // namespace at
Loading
Loading