Skip to content

Commit 3765c70

Browse files
author
Feiyu Chan
authored
Add numpy-based implementation of spectral ops (PaddlePaddle#33)
* add numpy reference implementation of spectral ops
1 parent 1cf4587 commit 3765c70

File tree

3 files changed

+114
-12
lines changed

3 files changed

+114
-12
lines changed

paddle/fluid/operators/spectral_op.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -578,7 +578,7 @@ void exec_fft(const DeviceContext& ctx, const Tensor* x, Tensor* out,
578578
TransCompute<platform::CPUDeviceContext, To>(ndim, ctx, transposed_output,
579579
out, reverse_dim_permute);
580580
}
581-
} // namespace anonymous
581+
} // anonymous namespace
582582

583583
template <typename Ti, typename To>
584584
struct FFTC2CFunctor<platform::CPUDeviceContext, Ti, To> {
@@ -640,7 +640,7 @@ T compute_factor(int64_t size, FFTNormMode normalization) {
640640
PADDLE_THROW(
641641
platform::errors::InvalidArgument("Unsupported normalization type"));
642642
}
643-
} //namespace anonymous
643+
} // anonymous namespace
644644

645645
template <typename Ti, typename To>
646646
struct FFTC2CFunctor<platform::CPUDeviceContext, Ti, To> {

paddle/fluid/operators/spectral_op.cu

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -520,9 +520,10 @@ static inline PlanLRUCache& cufft_get_plan_cache(int64_t device_index) {
520520
return *plan_caches[device_index];
521521
}
522522

523-
// Execute a general unnormalized fft operation (can be c2c, onesided r2c or onesided c2r)
523+
// Execute a general unnormalized fft operation (can be c2c, onesided r2c or
524+
// onesided c2r)
524525
template <typename DeviceContext, typename Ti, typename To>
525-
void exec_fft(const DeviceContext& ctx, const Tensor* X, Tensor* out,
526+
void exec_fft(const DeviceContext& ctx, const Tensor* X, Tensor* out,
526527
const std::vector<int64_t>& dim, bool forward) {
527528
const auto x_dims = framework::vectorize(X->dims());
528529
const auto out_dims = framework::vectorize(out->dims());
@@ -753,8 +754,8 @@ struct FFTC2CFunctor<platform::CUDADeviceContext, Ti, To> {
753754
std::min(static_cast<size_t>(kMaxCUFFTNdim), working_axes.size());
754755
first_dims.assign(working_axes.end() - max_dims, working_axes.end());
755756

756-
exec_fft<platform::CUDADeviceContext, Ti, To>(
757-
ctx, p_working_tensor, p_out, first_dims, forward);
757+
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, p_working_tensor,
758+
p_out, first_dims, forward);
758759
working_axes.resize(working_axes.size() - max_dims);
759760
first_dims.clear();
760761

@@ -781,8 +782,8 @@ struct FFTC2RFunctor<platform::CUDADeviceContext, Ti, To> {
781782
framework::Tensor x_copy(X->type());
782783
x_copy.mutable_data<Ti>(X->dims(), ctx.GetPlace());
783784
framework::TensorCopy(*X, ctx.GetPlace(), &x_copy);
784-
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, &x_copy, out,
785-
axes, forward);
785+
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, &x_copy, out, axes,
786+
forward);
786787
} else {
787788
framework::Tensor temp_tensor;
788789
temp_tensor.mutable_data<Ti>(X->dims(), ctx.GetPlace());
@@ -791,8 +792,8 @@ struct FFTC2RFunctor<platform::CUDADeviceContext, Ti, To> {
791792
FFTC2CFunctor<platform::CUDADeviceContext, Ti, Ti> c2c_functor;
792793
c2c_functor(ctx, X, &temp_tensor, dims, FFTNormMode::none, forward);
793794

794-
exec_fft<platform::CUDADeviceContext, Ti, To>(
795-
ctx, &temp_tensor, out, {axes.back()}, forward);
795+
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, &temp_tensor, out,
796+
{axes.back()}, forward);
796797
}
797798
exec_normalization<platform::CUDADeviceContext, To>(
798799
ctx, out, out, normalization, out_dims, axes);
@@ -809,8 +810,8 @@ struct FFTR2CFunctor<platform::CUDADeviceContext, Ti, To> {
809810
framework::Tensor* r2c_out = out;
810811
const std::vector<int64_t> last_dim{axes.back()};
811812
std::vector<int64_t> out_dims = framework::vectorize(out->dims());
812-
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, X, r2c_out,
813-
last_dim, forward);
813+
exec_fft<platform::CUDADeviceContext, Ti, To>(ctx, X, r2c_out, last_dim,
814+
forward);
814815

815816
// Step2: C2C transform on the remaining dimension
816817
framework::Tensor c2c_out;
Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,101 @@
1+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import numpy as np
16+
from functools import partial
17+
from numpy import asarray
18+
from numpy.fft._pocketfft import _raw_fft, _raw_fftnd, _get_forward_norm, _get_backward_norm, _cook_nd_args
19+
20+
21+
def _fftc2c(a, n=None, axis=-1, norm=None, forward=None):
22+
a = asarray(a)
23+
if n is None:
24+
n = a.shape[axis]
25+
if forward:
26+
inv_norm = _get_forward_norm(n, norm)
27+
else:
28+
inv_norm = _get_backward_norm(n, norm)
29+
output = _raw_fft(a, n, axis, False, forward, inv_norm)
30+
return output
31+
32+
33+
def _fftr2c(a, n=None, axis=-1, norm=None, forward=None):
34+
a = asarray(a)
35+
if n is None:
36+
n = a.shape[axis]
37+
if forward:
38+
inv_norm = _get_forward_norm(n, norm)
39+
else:
40+
inv_norm = _get_backward_norm(n, norm)
41+
output = _raw_fft(a, n, axis, True, True, inv_norm)
42+
if not forward:
43+
output = output.conj()
44+
return output
45+
46+
47+
def _fftc2r(a, n=None, axis=-1, norm=None, forward=None):
48+
a = asarray(a)
49+
if n is None:
50+
n = (a.shape[axis] - 1) * 2
51+
if forward:
52+
inv_norm = _get_forward_norm(n, norm)
53+
else:
54+
inv_norm = _get_backward_norm(n, norm)
55+
output = _raw_fft(a.conj()
56+
if forward else a, n, axis, True, False, inv_norm)
57+
return output
58+
59+
60+
def fft_c2c(x, axes, normalization, forward):
61+
f = partial(_fftc2c, forward=forward)
62+
y = _raw_fftnd(x, s=None, axes=axes, function=f, norm=normalization)
63+
return y
64+
65+
66+
def fft_c2c_backward(dy, axes, normalization, forward):
67+
f = partial(_fftc2c, forward=forward)
68+
dx = _raw_fftnd(dy, s=None, axes=axes, function=f, norm=normalization)
69+
return dx
70+
71+
72+
def fft_r2c(x, axes, normalization, forward, onesided):
73+
a = asarray(x)
74+
s, axes = _cook_nd_args(a, axes=axes)
75+
if onesided:
76+
a = _fftr2c(a, s[-1], axes[-1], normalization, forward)
77+
for ii in range(len(axes) - 1):
78+
a = _fftc2c(a, s[ii], axes[ii], normalization, forward)
79+
else:
80+
a = fft_c2c(x, axes, normalization, forward)
81+
return a
82+
83+
84+
def fft_r2c_backward(dy, x, axes, normalization, forward, onesided):
85+
a = dy
86+
if not onesided:
87+
a = fft_c2c_backward(a, axes, normalization, forward).real
88+
else:
89+
pad_widths = [(0, 0)] * a.ndim
90+
last_axis = axes[-1]
91+
if last_axis < 0:
92+
last_axis += a.ndim
93+
last_dim_size = a.shape[last_axis]
94+
pad_widths[last_axis] = (0, x.shape[last_axis] - last_dim_size)
95+
a = np.pad(a, pad_width=pad_widths)
96+
a = fft_c2c_backward(a, axes, normalization, forward).real
97+
return a
98+
99+
100+
def fft_c2r(x, axes, normalization, forward, last_dim_size):
101+
pass

0 commit comments

Comments
 (0)