@@ -34,6 +34,7 @@ import re
3434import subprocess
3535import sys
3636import time
37+ from functools import reduce
3738
3839from compare_perf_tests import LogParser
3940
@@ -65,7 +66,7 @@ class BenchmarkDriver(object):
6566
6667 def _invoke (self , cmd ):
6768 return self ._subprocess .check_output (
68- cmd , stderr = self ._subprocess .STDOUT )
69+ cmd , stderr = self ._subprocess .STDOUT , universal_newlines = True )
6970
7071 @property
7172 def test_harness (self ):
@@ -144,7 +145,7 @@ class BenchmarkDriver(object):
144145 verbose , measure_memory , quantile , gather_metadata )
145146 output = self ._invoke (cmd )
146147 results = self .parser .results_from_string (output )
147- return results .items ()[0 ][1 ] if test else results
148+ return list ( results .items () )[0 ][1 ] if test else results
148149
149150 def _cmd_run (self , test , num_samples , num_iters , sample_time , min_samples ,
150151 verbose , measure_memory , quantile , gather_metadata ):
@@ -219,9 +220,9 @@ class BenchmarkDriver(object):
219220 print (format (values ))
220221
221222 def result_values (r ):
222- return map ( str , [ r . test_num , r . name , r . num_samples , r . min ,
223- r . samples . q1 , r .median , r .samples . q3 , r .max ,
224- r .max_rss ])
223+ return [ str ( value ) for value in
224+ [ r . test_num , r .name , r .num_samples , r .min ,
225+ r . samples . q1 , r . median , r . samples . q3 , r . max , r .max_rss ]]
225226
226227 header = ['#' , 'TEST' , 'SAMPLES' , 'MIN(μs)' , 'Q1(μs)' , 'MEDIAN(μs)' ,
227228 'Q3(μs)' , 'MAX(μs)' , 'MAX_RSS(B)' ]
@@ -303,7 +304,11 @@ class MarkdownReportHandler(logging.StreamHandler):
303304 msg = self .format (record )
304305 stream = self .stream
305306 try :
306- if (isinstance (msg , unicode ) and
307+ unicode_type = unicode # Python 2
308+ except NameError :
309+ unicode_type = str # Python 3
310+ try :
311+ if (isinstance (msg , unicode_type ) and
307312 getattr (stream , 'encoding' , None )):
308313 stream .write (msg .encode (stream .encoding ))
309314 else :
@@ -415,10 +420,10 @@ class BenchmarkDoctor(object):
415420 setup , ratio = BenchmarkDoctor ._setup_overhead (measurements )
416421 setup = 0 if ratio < 0.05 else setup
417422 runtime = min (
418- [(result .samples .min - correction ) for i_series in
419- [BenchmarkDoctor ._select (measurements , num_iters = i )
420- for correction in [(setup / i ) for i in [1 , 2 ]]
421- ] for result in i_series ])
423+ [(result .samples .min - correction ) for correction , i_series in
424+ [( correction , BenchmarkDoctor ._select (measurements , num_iters = i ) )
425+ for i , correction in [(i , setup // i ) for i in [1 , 2 ] ]]
426+ for result in i_series ])
422427
423428 threshold = 1000
424429 if threshold < runtime :
@@ -473,7 +478,7 @@ class BenchmarkDoctor(object):
473478
474479 @staticmethod
475480 def _reasonable_setup_time (measurements ):
476- setup = min ([result .setup
481+ setup = min ([result .setup or 0
477482 for result in BenchmarkDoctor ._select (measurements )])
478483 if 200000 < setup : # 200 ms
479484 BenchmarkDoctor .log_runtime .error (
@@ -537,7 +542,7 @@ class BenchmarkDoctor(object):
537542
538543 def capped (s ):
539544 return min (s , 200 )
540- run_args = [(capped (num_samples ), 1 ), (capped (num_samples / 2 ), 2 )]
545+ run_args = [(capped (num_samples ), 1 ), (capped (num_samples // 2 ), 2 )]
541546 opts = self .driver .args .optimization
542547 opts = opts if isinstance (opts , list ) else [opts ]
543548 self .log .debug (
@@ -691,6 +696,7 @@ def parse_args(args):
691696 subparsers = parser .add_subparsers (
692697 title = 'Swift benchmark driver commands' ,
693698 help = 'See COMMAND -h for additional arguments' , metavar = 'COMMAND' )
699+ subparsers .required = True
694700
695701 shared_benchmarks_parser = argparse .ArgumentParser (add_help = False )
696702 benchmarks_group = shared_benchmarks_parser .add_mutually_exclusive_group ()
0 commit comments