Skip to content
Merged
Show file tree
Hide file tree
Changes from 32 commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
225d331
refine profiler && add runtime tracer
cjld Jan 12, 2019
fc6dc7d
test=develop
cjld Jan 12, 2019
41c82ca
test=develop
cjld Jan 12, 2019
e9a625f
test=develop
cjld Jan 12, 2019
98a7543
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cjld Jan 12, 2019
a84497c
test=develop
cjld Jan 12, 2019
c431b6a
test=develop
cjld Jan 13, 2019
e069fab
test=develop
cjld Jan 13, 2019
31be7bf
test=develop
cjld Jan 14, 2019
d17f79e
test=develop
cjld Jan 15, 2019
bb49a5b
fix bug && test=develop
cjld Jan 18, 2019
c2cb082
add thread id map && test=develop
cjld Jan 20, 2019
18f9158
test=develop
cjld Jan 20, 2019
eeb2aa7
testing
cjld Feb 3, 2019
5510f31
bug fix
cjld Feb 3, 2019
b59cd77
Merge branch 'profiler_refine_tmp' into profiler_refine
cjld Feb 3, 2019
fbd3122
remove cuda event && refine code && test=develop
cjld Feb 3, 2019
29c461f
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cjld Feb 3, 2019
dda480c
test=develop
cjld Feb 3, 2019
fa15b9f
test=develop
cjld Feb 3, 2019
6188131
test=develop
cjld Feb 3, 2019
02158db
fix windows temp file && test=develop
cjld Feb 15, 2019
a1ca223
test=develop
cjld Feb 17, 2019
58dea48
fix windows bug && test=develop
cjld Feb 17, 2019
e46c6b9
fix start up issue && test=develop
cjld Feb 17, 2019
b00bee2
code polish && test=develop
cjld Feb 18, 2019
058bcd8
remove unused code && test=develop
cjld Feb 18, 2019
f0c9f10
add some cupti cbid && test=develop
cjld Feb 18, 2019
f878edf
add FLAGS_multiple_of_cupti_buffer_size && test=develop
cjld Feb 19, 2019
3f29bb8
fix compile error && test=develop
cjld Feb 20, 2019
12ecf7d
add keyword && test=develop
cjld Feb 20, 2019
03a7de2
fix && test=develop
cjld Feb 20, 2019
4f39900
code polish && test=develop
cjld Feb 20, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/all_reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ AllReduceOpHandle::AllReduceOpHandle(ir::Node *node,
#endif

void AllReduceOpHandle::RunImpl() {
platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second);
platform::RecordEvent record_event(Name());

WaitInputVarGenerated();
auto in_var_handles = DynamicCast<VarHandle>(this->Inputs());
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/broadcast_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace framework {
namespace details {

void BroadcastOpHandle::RunImpl() {
platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second);
platform::RecordEvent record_event(Name());

if (places_.size() == 1) return;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ namespace framework {
namespace details {

void FusedBroadcastOpHandle::RunImpl() {
platform::RecordEvent record_event(Name(), dev_ctxes_.begin()->second);
platform::RecordEvent record_event(Name());

if (places_.size() == 1UL) return;

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/details/reduce_op_handle.cc
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ void ReduceOpHandle::GatherSelectedRows(
#endif

void ReduceOpHandle::RunImpl() {
platform::RecordEvent record_event(Name(), dev_ctxes_.cbegin()->second);
platform::RecordEvent record_event(Name());

if (places_.size() == 1) return;
// the input and output may have dummy var.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ FeedFetchList ScopeBufferedSSAGraphExecutor::Run(
eptr = std::current_exception();
}

platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun", nullptr);
platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun");
++drop_scope_counter_;

bool stream_end = false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor(
FeedFetchList ThreadedSSAGraphExecutor::Run(
const std::vector<std::string> &fetch_tensors) {
std::unique_ptr<platform::RecordEvent> event(
new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr));
new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare"));
std::unordered_map<OpHandleBase *, size_t> pending_ops;
std::unordered_set<VarHandleBase *> pending_vars;
auto ready_vars = std::make_shared<BlockingQueue<VarHandleBase *>>();
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,9 +177,7 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
// in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern.
if (platform::IsProfileEnabled()) {
platform::DeviceContextPool& pool =
platform::DeviceContextPool::Instance();
platform::RecordEvent record_event(Type(), pool.Get(place));
platform::RecordEvent record_event(Type());
RunImpl(scope, place);
} else {
RunImpl(scope, place);
Expand Down
8 changes: 2 additions & 6 deletions paddle/fluid/inference/tests/test_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -171,9 +171,7 @@ void TestInference(const std::string& dirname,
// Enable the profiler
paddle::platform::EnableProfiler(state);
{
paddle::platform::RecordEvent record_event(
"init_program",
paddle::platform::DeviceContextPool::Instance().Get(place));
paddle::platform::RecordEvent record_event("init_program");
inference_program = InitProgram(&executor, scope, dirname, is_combined);
}

Expand Down Expand Up @@ -230,9 +228,7 @@ void TestInference(const std::string& dirname,

// Run repeat times to profile the performance
for (int i = 0; i < repeat; ++i) {
paddle::platform::RecordEvent record_event(
"run_inference",
paddle::platform::DeviceContextPool::Instance().Get(place));
paddle::platform::RecordEvent record_event("run_inference");

if (PrepareContext) {
// Note: if you change the inference_program, you need to call
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/operators/distributed/brpc/brpc_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ VarHandlePtr BRPCClient::AsyncSendVar(const std::string& ep,
google::protobuf::Closure* done = brpc::NewCallback(
&HandleSendResponse, cntl, response, var_h, ch_ptr, ch_ctx, this);

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

ch_ctx->stub->SendVariable(cntl, &request, response, done);

Expand Down Expand Up @@ -184,7 +184,7 @@ VarHandlePtr BRPCClient::_AsyncGetVar(const std::string& ep,
google::protobuf::Closure* done = brpc::NewCallback(
&HandleGetResponse, cntl, response, var_h, ch_ptr, ch_ctx, this);

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

if (method_name == kGetMonomerRPC) {
ch_ctx->stub->GetMonomerVariable(cntl, &req, response, done);
Expand Down Expand Up @@ -272,7 +272,7 @@ VarHandlePtr BRPCClient::AsyncPrefetchVar(const std::string& ep,
&cntl->request_attachment(), out_var_name_val,
false, 0, table_name_val);

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

google::protobuf::Closure* done = brpc::NewCallback(
&HandleGetResponse, cntl, response, var_h, ch_ptr, ch_ctx, this);
Expand Down Expand Up @@ -311,7 +311,7 @@ VarHandlePtr BRPCClient::AsyncSendFetchBarrier(const std::string& ep,
VarHandlePtr var_h(
new VarHandle(ep, method, FETCH_BARRIER_MESSAGE, nullptr, nullptr));

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

google::protobuf::Closure* done = brpc::NewCallback(
&HandleFetchBarrierResponse, cntl, response, var_h, ch_ptr, ch_ctx, this);
Expand Down Expand Up @@ -406,7 +406,7 @@ VarHandlePtr BRPCClient::AsyncSendVarMessage(
sendrecv::VoidMessage* response = new sendrecv::VoidMessage();
cntl->set_timeout_ms(time_out);

platform::RecordRPCEvent record_event(method_name, nullptr);
platform::RecordRPCEvent record_event(method_name);

VarHandlePtr var_h(
new VarHandle(ep, method_name, req.varname(), nullptr, nullptr));
Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/operators/distributed/grpc/grpc_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ VarHandlePtr GRPCClient::AsyncSendVar(const std::string& ep,
// stub context
s->response_call_back_ = nullptr;

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

auto call = s->stub_g_.PrepareUnaryCall(
s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_);
Expand Down Expand Up @@ -184,7 +184,7 @@ VarHandlePtr GRPCClient::_AsyncGetVar(
// stub context
s->response_call_back_ = ProcGetResponse;

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

auto call =
s->stub_g_.PrepareUnaryCall(s->context_.get(), rpc_path, buf, &cq_);
Expand Down Expand Up @@ -235,7 +235,7 @@ VarHandlePtr GRPCClient::AsyncPrefetchVar(const std::string& ep,
// stub context
s->response_call_back_ = ProcGetResponse;

platform::RecordRPCEvent record_event(method, p_ctx);
platform::RecordRPCEvent record_event(method);

auto call = s->stub_g_.PrepareUnaryCall(
s->context_.get(), "/sendrecv.SendRecvService/PrefetchVariable", req,
Expand Down Expand Up @@ -265,7 +265,7 @@ VarHandlePtr GRPCClient::AsyncSendBatchBarrier(const std::string& ep,
sendrecv::VariableMessage req;
req.set_varname(BATCH_BARRIER_MESSAGE);

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_);
rpc->Finish(&s->reply_, &s->status_, reinterpret_cast<void*>(s));
Expand All @@ -290,7 +290,7 @@ VarHandlePtr GRPCClient::AsyncSendFetchBarrier(const std::string& ep,
sendrecv::VariableMessage req;
req.set_varname(FETCH_BARRIER_MESSAGE);

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

auto rpc = s->stub_->AsyncGetVariable(s->context_.get(), req, &cq_);
rpc->Finish(&s->reply_, &s->status_, reinterpret_cast<void*>(s));
Expand All @@ -317,7 +317,7 @@ VarHandlePtr GRPCClient::AsyncGetMonomerBarrier(const std::string& ep,
sendrecv::VariableMessage req;
req.set_varname(var_name);

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

auto rpc = s->stub_->AsyncGetMonomerBarrier(s->context_.get(), req, &cq_);
rpc->Finish(&s->reply_, &s->status_, reinterpret_cast<void*>(s));
Expand All @@ -342,7 +342,7 @@ VarHandlePtr GRPCClient::AsyncSendComplete(const std::string& ep,
sendrecv::VariableMessage req;
req.set_varname(COMPLETE_MESSAGE);

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_);
rpc->Finish(&s->reply_, &s->status_, reinterpret_cast<void*>(s));
Expand Down Expand Up @@ -372,7 +372,7 @@ VarHandlePtr GRPCClient::AsyncCheckpointNotify(const std::string& ep,
req.set_varname(CHECKPOINT_SAVE_MESSAGE);
req.set_out_varname(dir);

platform::RecordRPCEvent record_event(method, nullptr);
platform::RecordRPCEvent record_event(method);

auto rpc = s->stub_->AsyncCheckpointNotify(s->context_.get(), req, &cq_);
rpc->Finish(&s->reply_, &s->status_, reinterpret_cast<void*>(s));
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/distributed/grpc/grpc_serde.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
::grpc::ByteBuffer* msg, const std::string& out_name,
const int trainer_id,
const std::string& table_name) {
platform::RecordRPCEvent record_event("serial", &ctx);
platform::RecordRPCEvent record_event("serial");
VarMsg request;
TensorPayload* payload = nullptr;

Expand Down Expand Up @@ -147,7 +147,7 @@ void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg,
const platform::DeviceContext& ctx,
const framework::Scope* scope,
framework::Variable** var, int* trainer_id) {
platform::RecordRPCEvent record_event("deserial", &ctx);
platform::RecordRPCEvent record_event("deserial");
operators::distributed::GRPCVariableResponse resp(scope, &ctx);
PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!");
*var = resp.GetVar();
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/operators/reader/read_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@ class ReadOp : public framework::OperatorBase {
std::vector<framework::LoDTensor> ins;

// For profiling
platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
auto& ctx = *pool.Get(dev_place);
platform::RecordEvent record_event(Type(), &ctx);
platform::RecordEvent record_event(Type());

reader->ReadNext(&ins);
if (ins.empty()) {
Expand Down
6 changes: 5 additions & 1 deletion paddle/fluid/platform/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,11 @@ cc_library(timer SRCS timer.cc)
cc_test(timer_test SRCS timer_test.cc DEPS timer)

cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS})
cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer)
if(WITH_GPU)
nv_library(profiler SRCS profiler.cc profiler.cu DEPS device_context device_tracer)
else()
cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer)
endif()
cc_test(profiler_test SRCS profiler_test.cc DEPS profiler)

nv_test(float16_gpu_test SRCS float16_test.cu DEPS lod_tensor)
Expand Down
Loading