Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions paddle/fluid/framework/details/async_ssa_graph_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,14 @@ void ProcessGraph(std::vector<ir::Graph *> graphs, Scope *scope) {
// init communicator here
if (send_varname_to_ctx.size() > 0) {
VLOG(3) << "this is distribute mode, will use communicator";
operators::distributed::Communicator::Init(send_varname_to_ctx,
recv_varname_to_ctx, scope);
operators::distributed::Communicator::GetInstance()->Start();

if (operators::distributed::Communicator::GetInstance() == nullptr) {
operators::distributed::Communicator::Init(send_varname_to_ctx,
recv_varname_to_ctx, scope);
operators::distributed::Communicator::GetInstance()->Start();
} else {
VLOG(3) << "communicator has been initialized, skip";
}
}
#endif
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,13 +133,6 @@ class AsyncSSAGraphBuilder : public MultiDevSSAGraphBuilderBase {
VLOG(1) << "set recv op do_not_run to true";
node->Op()->SetAttr("do_not_run", 1);
node->Op()->Flush();
} else if (node->Name() == "lookup_table" || node->Name() == "nce" ||
node->Name() == "hierarchical_sigmoid") {
// in async_mode, we do not need remote prefetch, because communicator
// will do async parameter recv.
VLOG(1) << "set " << node->Name() << " op remote_prefetch to false";
node->Op()->SetAttr("remote_prefetch", false);
node->Op()->Flush();
}
return false;
}
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ class ExecutionContext {
return op_.Attr<T>(name);
}

bool HasAttr(const std::string& name) const { return op_.HasAttr(name); }

bool HasInput(const std::string& name) const;

bool HasOutput(const std::string& name) const;
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/memory/allocation/allocator_facade.cc
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,9 @@ class AllocatorFacadePrivate {

// Pimpl. Make interface clean.
AllocatorFacade::AllocatorFacade() : m_(new AllocatorFacadePrivate()) {}
AllocatorFacade::~AllocatorFacade() { delete m_; }
// delete m_ may cause core dump when the destructor of python in conflict with
// cpp.
AllocatorFacade::~AllocatorFacade() {}

AllocatorFacade& AllocatorFacade::Instance() {
static AllocatorFacade instance;
Expand Down
52 changes: 37 additions & 15 deletions paddle/fluid/operators/distributed/communicator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,14 +73,26 @@ Communicator::Communicator(const RpcCtxMap &send_varname_to_ctx,
VLOG(0) << "communicator_max_merge_var_num: "
<< FLAGS_communicator_max_merge_var_num;
VLOG(0) << "communicator_fake_rpc: " << FLAGS_communicator_fake_rpc;
send_scope_.reset(new Scope());
for (auto &iter : send_varname_to_ctx_) {
send_varname_to_queue_[iter.first] =
std::make_shared<BlockingQueue<std::shared_ptr<Variable>>>(
FLAGS_communicator_send_queue_size);

if (send_varname_to_ctx.size() == 0) {
VLOG(0) << "nothing need to be send, will not start send_thread";
} else {
send_scope_.reset(new Scope());
for (auto &iter : send_varname_to_ctx_) {
send_varname_to_queue_[iter.first] =
std::make_shared<BlockingQueue<std::shared_ptr<Variable>>>(
FLAGS_communicator_send_queue_size);
}
send_threadpool_.reset(
new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}

if (recv_varname_to_ctx.size() == 0) {
VLOG(0) << "nothing need to be received, will not start recv_thread";
} else {
recv_threadpool_.reset(
new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}
send_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
recv_threadpool_.reset(new ::ThreadPool(FLAGS_communicator_thread_pool_size));
}

Communicator::~Communicator() {
Expand Down Expand Up @@ -157,18 +169,28 @@ void Communicator::SendThread() {
task_f.wait();
}
auto after_run_send_graph = GetCurrentUS();
auto send_graph_use_time = after_run_send_graph - before_run_send_graph;
if (send_graph_use_time > 100) {
VLOG(1) << "run send graph use time "
<< after_run_send_graph - before_run_send_graph;
}
if (!FLAGS_communicator_independent_recv_thread) {
RecvAll();
}

VLOG(3) << "run send graph use time "
<< after_run_send_graph - before_run_send_graph;
RecvNonIndependent();
}
VLOG(0) << "communicator stopped, send thread exit";
}

void Communicator::RecvNonIndependent() {
if (!FLAGS_communicator_independent_recv_thread) {
return;
}

auto grad_num = grad_num_.load();
if (grad_num > 0) {
RecvAll();
grad_num_.store(0);
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}

void Communicator::RecvAll() {
VLOG(3) << "parallel run recv graph";
if (!running_) return;
Expand Down
3 changes: 3 additions & 0 deletions paddle/fluid/operators/distributed/communicator.h
Original file line number Diff line number Diff line change
Expand Up @@ -167,12 +167,15 @@ class Communicator {
void Start();
void Stop();

bool IsRunning() { return running_; }

// send grad
void Send(const std::string& var_name, const framework::Scope& scope);

private:
// recv all parameter
void RecvAll();
void RecvNonIndependent();
void SendThread();
void RecvThread();

Expand Down
Loading