Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
225d331
refine profiler && add runtime tracer
cjld Jan 12, 2019
fc6dc7d
test=develop
cjld Jan 12, 2019
41c82ca
test=develop
cjld Jan 12, 2019
e9a625f
test=develop
cjld Jan 12, 2019
98a7543
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cjld Jan 12, 2019
a84497c
test=develop
cjld Jan 12, 2019
c431b6a
test=develop
cjld Jan 13, 2019
e069fab
test=develop
cjld Jan 13, 2019
31be7bf
test=develop
cjld Jan 14, 2019
d17f79e
test=develop
cjld Jan 15, 2019
bb49a5b
fix bug && test=develop
cjld Jan 18, 2019
c2cb082
add thread id map && test=develop
cjld Jan 20, 2019
18f9158
test=develop
cjld Jan 20, 2019
eeb2aa7
testing
cjld Feb 3, 2019
5510f31
bug fix
cjld Feb 3, 2019
b59cd77
Merge branch 'profiler_refine_tmp' into profiler_refine
cjld Feb 3, 2019
fbd3122
remove cuda event && refine code && test=develop
cjld Feb 3, 2019
29c461f
Merge branch 'develop' of https://github.com/PaddlePaddle/Paddle into…
cjld Feb 3, 2019
dda480c
test=develop
cjld Feb 3, 2019
fa15b9f
test=develop
cjld Feb 3, 2019
6188131
test=develop
cjld Feb 3, 2019
02158db
fix windows temp file && test=develop
cjld Feb 15, 2019
a1ca223
test=develop
cjld Feb 17, 2019
58dea48
fix windows bug && test=develop
cjld Feb 17, 2019
e46c6b9
fix start up issue && test=develop
cjld Feb 17, 2019
b00bee2
code polish && test=develop
cjld Feb 18, 2019
058bcd8
remove unused code && test=develop
cjld Feb 18, 2019
f0c9f10
add some cupti cbid && test=develop
cjld Feb 18, 2019
f878edf
add FLAGS_multiple_of_cupti_buffer_size && test=develop
cjld Feb 19, 2019
3f29bb8
fix compile error && test=develop
cjld Feb 20, 2019
12ecf7d
add keyword && test=develop
cjld Feb 20, 2019
03a7de2
fix && test=develop
cjld Feb 20, 2019
4f39900
code polish && test=develop
cjld Feb 20, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 24 additions & 13 deletions paddle/fluid/platform/device_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,20 @@ DeviceTracer *tracer = nullptr;
#ifdef PADDLE_WITH_CUPTI

namespace {
// TODO(panyx0718): Revisit the buffer size here.
uint64_t kBufSize = 32 * 1024;
// Best performance: the same size with CUPTI device buffer size(8M)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is just an experimental best performance.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

uint64_t kBufSize = 1024 * 1024 * 8;
uint64_t kAlignSize = 8;
std::unordered_map<CUpti_CallbackId, std::string> runtime_cbid_str,
driver_cbid_str;

void PrintCuptiHint() {
static bool showed = false;
if (showed) return;
showed = true;
LOG(WARNING) << "Invalid timestamp occured. Please try increasing the "
"FLAGS_multiple_of_cupti_buffer_size.";
}

#define ALIGN_BUFFER(buffer, align) \
(((uintptr_t)(buffer) & ((align)-1)) \
? ((buffer) + (align) - ((uintptr_t)(buffer) & ((align)-1))) \
Expand Down Expand Up @@ -140,15 +148,15 @@ void DisableActivity() {
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMCPY));
CUPTI_CALL(
dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONCURRENT_KERNEL));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DEVICE));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DEVICE));
// Disable all other activity record kinds.
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONTEXT));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_CONTEXT));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_DRIVER));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_RUNTIME));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMSET));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_NAME));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MARKER));
CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_OVERHEAD));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MEMSET));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_NAME));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_MARKER));
// CUPTI_CALL(dynload::cuptiActivityDisable(CUPTI_ACTIVITY_KIND_OVERHEAD));
}

void CUPTIAPI bufferRequested(uint8_t **buffer, size_t *size,
Expand Down Expand Up @@ -284,8 +292,9 @@ class DeviceTracerImpl : public DeviceTracer {
uint64_t end_ns, int64_t device_id, int64_t stream_id,
uint32_t correlation_id, uint64_t bytes) {
// 0 means timestamp information could not be collected for the kernel.
if (start_ns == 0 || end_ns == 0) {
if (start_ns == 0 || end_ns == 0 || start_ns == end_ns) {
VLOG(3) << name << " cannot be traced";
PrintCuptiHint();
return;
}
// NOTE(liangdun): lock is not needed, only one thread call this function.
Expand All @@ -298,8 +307,9 @@ class DeviceTracerImpl : public DeviceTracer {
int64_t device_id, int64_t stream_id,
uint32_t correlation_id) {
// 0 means timestamp information could not be collected for the kernel.
if (start == 0 || end == 0) {
if (start == 0 || end == 0 || start == end) {
VLOG(3) << correlation_id << " cannot be traced";
PrintCuptiHint();
return;
}
// NOTE(liangdun): lock is not needed, only one thread call this function.
Expand Down Expand Up @@ -372,8 +382,9 @@ class DeviceTracerImpl : public DeviceTracer {
for (auto &tmp : correlations_pairs)
for (auto &pair : tmp) correlations_[pair.first] = pair.second;
for (const KernelRecord &r : kernel_records_) {
if (correlations_.find(r.correlation_id) != correlations_.end()) {
Event *e = correlations_.at(r.correlation_id);
auto c = correlations_.find(r.correlation_id);
if (c != correlations_.end() && c->second != nullptr) {
Event *e = c->second;
e->AddCudaElapsedTime(r.start_ns, r.end_ns);
}
}
Expand All @@ -400,7 +411,7 @@ class DeviceTracerImpl : public DeviceTracer {
auto *event = profile_pb.add_events();
event->set_type(proto::Event::GPUKernel);
auto c = correlations_.find(r.correlation_id);
if (c != correlations_.end()) {
if (c != correlations_.end() && c->second != nullptr) {
event->set_name(c->second->name());
event->set_detail_info(r.name);
find++;
Expand Down
27 changes: 27 additions & 0 deletions paddle/fluid/platform/init.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ limitations under the License. */
#include "paddle/fluid/string/split.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cuda_device_guard.h"
#include "paddle/fluid/platform/dynload/cupti.h"
#endif
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/init.h"
Expand All @@ -30,6 +31,9 @@ limitations under the License. */

DEFINE_int32(paddle_num_threads, 1,
"Number of threads for each paddle instance.");
DEFINE_int32(multiple_of_cupti_buffer_size, 1,
"Multiple of the CUPTI device buffer size. If the timestamps have "
"been dropped when you are profiling, try increasing this value.");

namespace paddle {
namespace framework {
Expand Down Expand Up @@ -78,7 +82,30 @@ void InitP2P(std::vector<int> devices) {
#endif
}

void InitCupti() {
#ifdef PADDLE_WITH_CUPTI
if (FLAGS_multiple_of_cupti_buffer_size == 1) return;
size_t attrValue = 0, attrValueSize = sizeof(size_t);
#define MULTIPLY_ATTR_VALUE(attr) \
{ \
PADDLE_ENFORCE(!platform::dynload::cuptiActivityGetAttribute( \
attr, &attrValueSize, &attrValue)); \
attrValue *= FLAGS_multiple_of_cupti_buffer_size; \
VLOG(1) << "Set " #attr " " << attrValue << " byte"; \
PADDLE_ENFORCE(!platform::dynload::cuptiActivitySetAttribute( \
attr, &attrValueSize, &attrValue)); \
}
MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE);
MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_DEVICE_BUFFER_SIZE_CDP);
MULTIPLY_ATTR_VALUE(CUPTI_ACTIVITY_ATTR_PROFILING_SEMAPHORE_POOL_SIZE);
#undef MULTIPLY_ATTR_VALUE
#endif
}

void InitDevices(bool init_p2p) {
// CUPTI attribute should be set before any CUDA context is created (see CUPTI
// documentation).
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There should have a link.

Copy link
Contributor Author

@cjld cjld Feb 20, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The link may expire in the future, I put a keyword CUpti_ActivityAttribute in the comment.

InitCupti();
/*Init all available devices by default */
std::vector<int> devices;
#ifdef PADDLE_WITH_CUDA
Expand Down
12 changes: 11 additions & 1 deletion paddle/fluid/platform/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,19 @@ RecordBlock::~RecordBlock() {
ClearCurBlock();
}

void SynchronizeAllDevice() {
int count = GetCUDADeviceCount();
for (int i = 0; i < count; i++) {
SetDeviceId(i);
PADDLE_ENFORCE(cudaDeviceSynchronize());
}
}

void EnableProfiler(ProfilerState state) {
PADDLE_ENFORCE(state != ProfilerState::kDisabled,
"Can't enable profiling, since the input state is ",
"ProfilerState::kDisabled");

SynchronizeAllDevice();
std::lock_guard<std::mutex> l(profiler_mu);
if (state == g_state) {
return;
Expand All @@ -223,6 +231,7 @@ void EnableProfiler(ProfilerState state) {
}

void ResetProfiler() {
SynchronizeAllDevice();
GetDeviceTracer()->Reset();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you want here?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fixed

std::lock_guard<std::mutex> guard(g_all_event_lists_mutex);
for (auto it = g_all_event_lists.begin(); it != g_all_event_lists.end();
Expand Down Expand Up @@ -450,6 +459,7 @@ void ParseEvents(const std::vector<std::vector<Event>>& events,

void DisableProfiler(EventSortingKey sorted_key,
const std::string& profile_path) {
SynchronizeAllDevice();
std::lock_guard<std::mutex> l(profiler_mu);
if (g_state == ProfilerState::kDisabled) return;
// Mark the profiling stop.
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,8 @@ def __bootstrap__():
'eager_delete_tensor_gb', 'fast_eager_deletion_mode',
'allocator_strategy', 'reader_queue_speed_test_mode',
'print_sub_graph_dir', 'pe_profile_fname', 'warpctc_dir',
'inner_op_parallelism', 'enable_parallel_graph'
'inner_op_parallelism', 'enable_parallel_graph',
'multiple_of_cupti_buffer_size'
]
if 'Darwin' not in sysstr:
read_env_flags.append('use_pinned_memory')
Expand Down