Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
86 changes: 86 additions & 0 deletions paddle/fluid/pir/serialize_deserialize/include/interface.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
// limitations under the License.
#pragma once

#include <string>
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/pir/include/core/program.h"
namespace pir {
/**
Expand Down Expand Up @@ -60,4 +62,88 @@ void WriteModule(const pir::Program& program,
void ReadModule(const std::string& file_path,
pir::Program* program,
const uint64_t& pir_version);

/**
* @brief Save the given tensor into a single file at the specified file path
* with its name.
*
* @param[in] x The tensor to be saved.
* @param[in] name The name of the tensor
* @param[in] file_path The path of the file to be written.
* @param[in] overwrite If the file already exists, this flag determines
* whether to overwrite the existing file.
* @param[in] save_as_fp16 If the flag is true, the tensor will be saved as
* fp16 type.
*
* @return void。
*
*/
void SaveFunction(const phi::DenseTensor& x,
const std::string& name,
const std::string& file_path,
bool overwrite,
bool save_as_fp16);

/**
* @brief Save the given tensor list into a combined file at the specified file
* path with the given name.
*
* @param[in] x The tensor list to be saved.
* @param[in] name The names of the tensors.
* @param[in] file_path The path of the file to be written.
* @param[in] overwrite If the file already exists, this flag determines
* whether to overwrite the existing file.
* @param[in] save_as_fp16 If the flag is true, the tensor will be saved as
* fp16 type.
*
* @param[in] save_to_memory If the flag is true, the tensor will be saved in
* memory.
*
* @return void。
*
*/
void SaveCombineFunction(const std::vector<const phi::DenseTensor*>& x,
const std::vector<std::string>& names,
const std::string& file_path,
bool overwrite,
bool save_as_fp16,
bool save_to_memory);

/**
* @brief Save the given tensor into a single file at the specified file path
* with its name.
*
* @param[in] file_path The path of the file to be read.
* @param[in] seek The position of the file to be read.
* @param[in] shape The shape of the tensor to be loaded.
* @param[in] load_as_fp16 If the flag is true, the tensor will be loaded
* as fp16 type.
* @param[out] out The tensor to be loaded.
*
* @return void。
*
*/
void LoadFunction(const std::string& file_path,
int64_t seek,
const std::vector<int64_t>& shape,
bool load_as_fp16,
phi::DenseTensor* out);

/**
* @brief Save the given tensor into a single file at the specified file path
* with its name.
*
* @param[in] file_path The path of the file to be read.
* @param[in] names The names of the tensors.
* @param[out] out The tensor to be loaded.
* @param[in] load_as_fp16 If the flag is true, the tensor will be loaded
* as fp16 type.
*
* @return void。
*
*/
void LoadCombineFunction(const std::string& file_path,
const std::vector<std::string>& names,
std::vector<phi::DenseTensor*>* out,
bool load_as_fp16);
} // namespace pir

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/pir/serialize_deserialize/include/save_load_parameters.h"

#include <cstdint>
#include <fstream>
#include <numeric>

#include "glog/logging.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/pir/serialize_deserialize/include/interface.h"
#include "paddle/phi/common/port.h"
#include "paddle/phi/kernels/funcs/data_type_transform.h"

Expand Down Expand Up @@ -157,7 +156,7 @@ void LoadFunction(const std::string& file_path,
"seek with tensor must great than or equal to 0"));
paddle::framework::DeserializeFromStream(fin, out, *dev_ctx, seek, shape);
} else {
paddle::framework::DeserializeFromStream(fin, out);
paddle::framework::DeserializeFromStream(fin, out, *dev_ctx);
}

auto in_dtype = out->dtype();
Expand Down Expand Up @@ -189,7 +188,7 @@ void LoadCombineFunction(const std::string& file_path,
const phi::DeviceContext* dev_ctx = GetDeviceContext(*(out->at(0)));
for (size_t i = 0; i < names.size(); i++) {
auto tensor = out->at(i);
paddle::framework::DeserializeFromStream(fin, tensor);
paddle::framework::DeserializeFromStream(fin, tensor, *dev_ctx);

auto in_dtype = tensor->dtype();
auto out_dtype = load_as_fp16 ? phi::DataType::FLOAT16 : in_dtype;
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/pybind/io.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ limitations under the License. */
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/pir/serialize_deserialize/include/interface.h"
#include "paddle/fluid/pir/serialize_deserialize/include/save_load_parameters.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/pybind/pybind_variant_caster.h"
#include "paddle/utils/pybind.h"
Expand Down
Loading