We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 1ef06be commit d818a7eCopy full SHA for d818a7e
torchtune/training/memory.py
@@ -310,8 +310,9 @@ def log_memory_stats(
310
"""
311
device_support = get_device_support()
312
_log.info(
313
- f"{message}"
314
- f"\n\t{device_support.device_name} peak memory allocation: {stats['peak_memory_alloc']:.2f} GiB"
315
- f"\n\t{device_support.device_name} peak memory reserved: {stats.get('peak_memory_reserved', 0):.2f} GiB"
316
- f"\n\t{device_support.device_name} peak memory active: {stats['peak_memory_active']:.2f} GiB"
+ f"{message}\n"
+ + "\n".join(
+ f"\t{device_support.device_name} {key.replace('_', ' ')}: {value:.2f} GiB"
+ for key, value in stats.items()
317
+ )
318
)
0 commit comments