@@ -47,33 +47,33 @@ inline void CopyFrom(const Tensor& src, const platform::Place& dst_place,
4747#ifdef PADDLE_WITH_CUDA
4848 else if (platform::is_gpu_place (src_place) && // NOLINT
4949 platform::is_cpu_place (dst_place)) {
50- auto src_gpu_place = boost::get<platform::GPUPlace >(src_place);
50+ auto src_gpu_place = boost::get<platform::CUDAPlace >(src_place);
5151 auto dst_cpu_place = boost::get<platform::CPUPlace>(dst_place);
5252 auto ctx_place = ctx.GetPlace ();
5353 PADDLE_ENFORCE (platform::is_gpu_place (ctx_place));
54- auto ctx_gpu_place = boost::get<platform::GPUPlace >(ctx_place);
54+ auto ctx_gpu_place = boost::get<platform::CUDAPlace >(ctx_place);
5555 PADDLE_ENFORCE_EQ (src_gpu_place, ctx_gpu_place);
5656 memory::Copy (
5757 dst_cpu_place, dst_ptr, src_gpu_place, src_ptr, size,
5858 reinterpret_cast <const platform::CUDADeviceContext&>(ctx).stream ());
5959 } else if (platform::is_cpu_place (src_place) &&
6060 platform::is_gpu_place (dst_place)) {
6161 auto src_cpu_place = boost::get<platform::CPUPlace>(src_place);
62- auto dst_gpu_place = boost::get<platform::GPUPlace >(dst_place);
62+ auto dst_gpu_place = boost::get<platform::CUDAPlace >(dst_place);
6363 auto ctx_place = ctx.GetPlace ();
6464 PADDLE_ENFORCE (platform::is_gpu_place (ctx_place));
65- auto ctx_gpu_place = boost::get<platform::GPUPlace >(ctx_place);
65+ auto ctx_gpu_place = boost::get<platform::CUDAPlace >(ctx_place);
6666 PADDLE_ENFORCE_EQ (dst_gpu_place, ctx_gpu_place);
6767 memory::Copy (
6868 dst_gpu_place, dst_ptr, src_cpu_place, src_ptr, size,
6969 reinterpret_cast <const platform::CUDADeviceContext&>(ctx).stream ());
7070 } else if (platform::is_gpu_place (src_place) &&
7171 platform::is_gpu_place (dst_place)) {
72- auto src_gpu_place = boost::get<platform::GPUPlace >(src_place);
73- auto dst_gpu_place = boost::get<platform::GPUPlace >(dst_place);
72+ auto src_gpu_place = boost::get<platform::CUDAPlace >(src_place);
73+ auto dst_gpu_place = boost::get<platform::CUDAPlace >(dst_place);
7474 auto ctx_place = ctx.GetPlace ();
7575 PADDLE_ENFORCE (platform::is_gpu_place (ctx_place));
76- auto ctx_gpu_place = boost::get<platform::GPUPlace >(ctx_place);
76+ auto ctx_gpu_place = boost::get<platform::CUDAPlace >(ctx_place);
7777 PADDLE_ENFORCE_EQ (src_gpu_place, ctx_gpu_place);
7878 memory::Copy (
7979 dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size,
@@ -108,7 +108,7 @@ inline void CopyFromVector(const std::vector<T>& src,
108108#ifdef PADDLE_WITH_CUDA
109109 else if (platform::is_gpu_place (dst_place)) { // NOLINT
110110 memory::Copy (
111- boost::get<platform::GPUPlace >(dst_place), dst_ptr, src_place, src_ptr,
111+ boost::get<platform::CUDAPlace >(dst_place), dst_ptr, src_place, src_ptr,
112112 size,
113113 reinterpret_cast <const platform::CUDADeviceContext&>(ctx).stream ());
114114 }
@@ -141,7 +141,7 @@ inline void CopyToVector(const Tensor& src, const platform::DeviceContext& ctx,
141141#ifdef PADDLE_WITH_CUDA
142142 else if (platform::is_gpu_place (src.place ())) { // NOLINT
143143 memory::Copy (
144- dst_place, dst_ptr, boost::get<platform::GPUPlace >(src.place ()),
144+ dst_place, dst_ptr, boost::get<platform::CUDAPlace >(src.place ()),
145145 src_ptr, size,
146146 reinterpret_cast <const platform::CUDADeviceContext&>(ctx).stream ());
147147 }
0 commit comments