diff --git a/.ci/Dockerfile b/.ci/Dockerfile index 46adf92dd1a..a7b75d6c97f 100644 --- a/.ci/Dockerfile +++ b/.ci/Dockerfile @@ -1,5 +1,5 @@ -# Last updated 4/8/2020 (to rebuild the docker image, update this timestamp) -FROM cirrusci/flutter:latest +# Last updated 10/22/2020 (to rebuild the docker image, update this timestamp) +FROM cirrusci/flutter:stable-web RUN sudo apt-get update && \ sudo apt-get upgrade --yes && \ diff --git a/.cirrus.yml b/.cirrus.yml index c3eb5a1c407..673bf9ba34e 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -11,7 +11,7 @@ task: activate_script: pub global activate flutter_plugin_tools matrix: - name: analyze - script: ./script/incremental_build.sh analyze + script: ./script/incremental_build.sh analyze --custom-analysis=web_benchmarks/testing/test_app - name: publishable script: ./script/check_publish.sh depends_on: @@ -31,6 +31,16 @@ task: - ./script/incremental_build.sh java-test # must come after apk build depends_on: - analyze + - name: web_benchmarks_test + script: + - ./script/install_chromium.sh + - export CHROME_EXECUTABLE=$(pwd)/.chromium/chrome-linux/chrome + - flutter config --enable-web + - cd packages/web_benchmarks/testing/test_app + - flutter packages get + - cd ../.. + - flutter packages get + - dart testing/web_benchmarks_test.dart task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' diff --git a/.gitignore b/.gitignore index 9cbde5567b9..0d71f02078b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,9 @@ GeneratedPluginRegistrant.java *instrumentscli*.trace *.cipd +# Build directories are produced when building using the Flutter CLI. +build + +# This file is produced as a back-up when web_benchmarks fails to parse a +# Chrome trace. +chrome-trace.json diff --git a/packages/web_benchmarks/CHANGELOG.md b/packages/web_benchmarks/CHANGELOG.md new file mode 100644 index 00000000000..46889e1035e --- /dev/null +++ b/packages/web_benchmarks/CHANGELOG.md @@ -0,0 +1,3 @@ +## 0.0.1 - Initial release. + +* Provide a benchmark server (host-side) and a benchmark client (browser-side). diff --git a/packages/web_benchmarks/LICENSE b/packages/web_benchmarks/LICENSE new file mode 100644 index 00000000000..bc67b8f9556 --- /dev/null +++ b/packages/web_benchmarks/LICENSE @@ -0,0 +1,27 @@ +Copyright 2019 The Chromium Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/packages/web_benchmarks/README.md b/packages/web_benchmarks/README.md new file mode 100644 index 00000000000..ba6a1cb44ad --- /dev/null +++ b/packages/web_benchmarks/README.md @@ -0,0 +1,15 @@ +# web_benchmarks + +A benchmark harness for Flutter Web apps. Currently only supports running +benchmarks in Chrome. + +# Writing a benchmark + +An example benchmark can be found in [test/web_benchmark_test.dart][1]. + +A web benchmark is made of two parts: a client and a server. The client is code +that runs in the browser together with the benchmark code. The server serves the +app's code and assets. Additionally, the server communicates with the browser to +extract the performance traces. + +[1]: https://github.com/flutter/packages/blob/master/packages/web_benchmarks/test/web_benchmarks_test.dart diff --git a/packages/web_benchmarks/lib/client.dart b/packages/web_benchmarks/lib/client.dart new file mode 100644 index 00000000000..3238540aa9a --- /dev/null +++ b/packages/web_benchmarks/lib/client.dart @@ -0,0 +1,383 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:async'; +import 'dart:convert' show json; +import 'dart:html' as html; +import 'dart:math' as math; + +import 'package:meta/meta.dart'; + +import 'src/recorder.dart'; +export 'src/recorder.dart'; + +/// Signature for a function that creates a [Recorder]. +typedef RecorderFactory = Recorder Function(); + +/// List of all benchmarks that run in the devicelab. +/// +/// When adding a new benchmark, add it to this map. Make sure that the name +/// of your benchmark is unique. +Map _benchmarks; + +final LocalBenchmarkServerClient _client = LocalBenchmarkServerClient(); + +/// Starts a local benchmark client to run [benchmarks]. +/// +/// Usually used in combination with a benchmark server, which orders the +/// client to run each benchmark in order. +/// +/// When used without a server, prompts the user to select a benchmark to +/// run next. +Future runBenchmarks(Map benchmarks) async { + assert(benchmarks != null); + + // Set local benchmarks. + _benchmarks = benchmarks; + + // Check if the benchmark server wants us to run a specific benchmark. + final String nextBenchmark = await _client.requestNextBenchmark(); + + if (nextBenchmark == LocalBenchmarkServerClient.kManualFallback) { + _fallbackToManual( + 'The server did not tell us which benchmark to run next.'); + return; + } + + await _runBenchmark(nextBenchmark); + html.window.location.reload(); +} + +Future _runBenchmark(String benchmarkName) async { + final RecorderFactory recorderFactory = _benchmarks[benchmarkName]; + + if (recorderFactory == null) { + _fallbackToManual('Benchmark $benchmarkName not found.'); + return; + } + + await runZoned>( + () async { + final Recorder recorder = recorderFactory(); + final Runner runner = recorder.isTracingEnabled && !_client.isInManualMode + ? Runner( + recorder: recorder, + setUpAllDidRun: () => + _client.startPerformanceTracing(benchmarkName), + tearDownAllWillRun: _client.stopPerformanceTracing, + ) + : Runner(recorder: recorder); + + final Profile profile = await runner.run(); + if (!_client.isInManualMode) { + await _client.sendProfileData(profile); + } else { + _printResultsToScreen(profile); + print(profile); + } + }, + zoneSpecification: ZoneSpecification( + print: (Zone self, ZoneDelegate parent, Zone zone, String line) async { + if (_client.isInManualMode) { + parent.print(zone, '[$benchmarkName] $line'); + } else { + await _client.printToConsole(line); + } + }, + handleUncaughtError: ( + Zone self, + ZoneDelegate parent, + Zone zone, + Object error, + StackTrace stackTrace, + ) async { + if (_client.isInManualMode) { + parent.print(zone, '[$benchmarkName] $error, $stackTrace'); + parent.handleUncaughtError(zone, error, stackTrace); + } else { + await _client.reportError(error, stackTrace); + } + }, + ), + ); +} + +void _fallbackToManual(String error) { + html.document.body.appendHtml(''' +
+

$error

+ +

Choose one of the following benchmarks:

+ + +
    + ${_benchmarks.keys.map((String name) => '
  • ').join('\n')} +
+
+ ''', + validator: html.NodeValidatorBuilder() + ..allowHtml5() + ..allowInlineStyles()); + + for (final String benchmarkName in _benchmarks.keys) { + final html.Element button = html.document.querySelector('#$benchmarkName'); + button.addEventListener('click', (_) { + final html.Element manualPanel = + html.document.querySelector('#manual-panel'); + manualPanel?.remove(); + _runBenchmark(benchmarkName); + }); + } +} + +/// Visualizes results on the Web page for manual inspection. +void _printResultsToScreen(Profile profile) { + html.document.body.innerHtml = '

${profile.name}

'; + + profile.scoreData.forEach((String scoreKey, Timeseries timeseries) { + html.document.body.appendHtml('

$scoreKey

'); + html.document.body.appendHtml('
${timeseries.computeStats()}
'); + html.document.body.append(TimeseriesVisualization(timeseries).render()); + }); +} + +/// Draws timeseries data and statistics on a canvas. +class TimeseriesVisualization { + /// Creates a visualization for a [Timeseries]. + TimeseriesVisualization(this._timeseries) { + _stats = _timeseries.computeStats(); + _canvas = html.CanvasElement(); + _screenWidth = html.window.screen.width; + _canvas.width = _screenWidth; + _canvas.height = (_kCanvasHeight * html.window.devicePixelRatio).round(); + _canvas.style + ..width = '100%' + ..height = '${_kCanvasHeight}px' + ..outline = '1px solid green'; + _ctx = _canvas.context2D; + + // The amount of vertical space available on the chart. Because some + // outliers can be huge they can dwarf all the useful values. So we + // limit it to 1.5 x the biggest non-outlier. + _maxValueChartRange = 1.5 * + _stats.samples + .where((AnnotatedSample sample) => !sample.isOutlier) + .map((AnnotatedSample sample) => sample.magnitude) + .fold(0, math.max); + } + + static const double _kCanvasHeight = 200; + + final Timeseries _timeseries; + TimeseriesStats _stats; + html.CanvasElement _canvas; + html.CanvasRenderingContext2D _ctx; + int _screenWidth; + + // Used to normalize benchmark values to chart height. + double _maxValueChartRange; + + /// Converts a sample value to vertical canvas coordinates. + /// + /// This does not work for horizontal coordinates. + double _normalized(double value) { + return _kCanvasHeight * value / _maxValueChartRange; + } + + /// A utility for drawing lines. + void drawLine(num x1, num y1, num x2, num y2) { + _ctx.beginPath(); + _ctx.moveTo(x1, y1); + _ctx.lineTo(x2, y2); + _ctx.stroke(); + } + + /// Renders the timeseries into a `` and returns the canvas element. + html.CanvasElement render() { + _ctx.translate(0, _kCanvasHeight * html.window.devicePixelRatio); + _ctx.scale(1, -html.window.devicePixelRatio); + + final double barWidth = _screenWidth / _stats.samples.length; + double xOffset = 0; + for (int i = 0; i < _stats.samples.length; i++) { + final AnnotatedSample sample = _stats.samples[i]; + + if (sample.isWarmUpValue) { + // Put gray background behing warm-up samples. + _ctx.fillStyle = 'rgba(200,200,200,1)'; + _ctx.fillRect(xOffset, 0, barWidth, _normalized(_maxValueChartRange)); + } + + if (sample.magnitude > _maxValueChartRange) { + // The sample value is so big it doesn't fit on the chart. Paint it purple. + _ctx.fillStyle = 'rgba(100,50,100,0.8)'; + } else if (sample.isOutlier) { + // The sample is an outlier, color it light red. + _ctx.fillStyle = 'rgba(255,50,50,0.6)'; + } else { + // A non-outlier sample, color it light blue. + _ctx.fillStyle = 'rgba(50,50,255,0.6)'; + } + + _ctx.fillRect(xOffset, 0, barWidth - 1, _normalized(sample.magnitude)); + xOffset += barWidth; + } + + // Draw a horizontal solid line corresponding to the average. + _ctx.lineWidth = 1; + drawLine(0, _normalized(_stats.average), _screenWidth, + _normalized(_stats.average)); + + // Draw a horizontal dashed line corresponding to the outlier cut off. + _ctx.setLineDash([5, 5]); + drawLine(0, _normalized(_stats.outlierCutOff), _screenWidth, + _normalized(_stats.outlierCutOff)); + + // Draw a light red band that shows the noise (1 stddev in each direction). + _ctx.fillStyle = 'rgba(255,50,50,0.3)'; + _ctx.fillRect( + 0, + _normalized(_stats.average * (1 - _stats.noise)), + _screenWidth, + _normalized(2 * _stats.average * _stats.noise), + ); + + return _canvas; + } +} + +/// Implements the client REST API for the local benchmark server. +/// +/// The local server is optional. If it is not available the benchmark UI must +/// implement a manual fallback. This allows debugging benchmarks using plain +/// `flutter run`. +class LocalBenchmarkServerClient { + /// This value is returned by [requestNextBenchmark]. + static const String kManualFallback = '__manual_fallback__'; + + /// Whether we fell back to manual mode. + /// + /// This happens when you run benchmarks using plain `flutter run` rather than + /// devicelab test harness. The test harness spins up a special server that + /// provides API for automatically picking the next benchmark to run. + bool isInManualMode; + + /// Asks the local server for the name of the next benchmark to run. + /// + /// Returns [kManualFallback] if local server is not available (uses 404 as a + /// signal). + Future requestNextBenchmark() async { + final html.HttpRequest request = await _requestXhr( + '/next-benchmark', + method: 'POST', + mimeType: 'application/json', + sendData: json.encode(_benchmarks.keys.toList()), + ); + + // 404 is expected in the following cases: + // - The benchmark is ran using plain `flutter run`, which does not provide "next-benchmark" handler. + // - We ran all benchmarks and the benchmark is telling us there are no more benchmarks to run. + if (request.status == 404) { + isInManualMode = true; + return kManualFallback; + } + + isInManualMode = false; + return request.responseText; + } + + void _checkNotManualMode() { + if (isInManualMode) { + throw StateError('Operation not supported in manual fallback mode.'); + } + } + + /// Asks the local server to begin tracing performance. + /// + /// This uses the chrome://tracing tracer, which is not available from within + /// the page itself, and therefore must be controlled from outside using the + /// DevTools Protocol. + Future startPerformanceTracing(String benchmarkName) async { + _checkNotManualMode(); + await html.HttpRequest.request( + '/start-performance-tracing?label=$benchmarkName', + method: 'POST', + mimeType: 'application/json', + ); + } + + /// Stops the performance tracing session started by [startPerformanceTracing]. + Future stopPerformanceTracing() async { + _checkNotManualMode(); + await html.HttpRequest.request( + '/stop-performance-tracing', + method: 'POST', + mimeType: 'application/json', + ); + } + + /// Sends the profile data collected by the benchmark to the local benchmark + /// server. + Future sendProfileData(Profile profile) async { + _checkNotManualMode(); + final html.HttpRequest request = await html.HttpRequest.request( + '/profile-data', + method: 'POST', + mimeType: 'application/json', + sendData: json.encode(profile.toJson()), + ); + if (request.status != 200) { + throw Exception('Failed to report profile data to benchmark server. ' + 'The server responded with status code ${request.status}.'); + } + } + + /// Reports an error to the benchmark server. + /// + /// The server will halt the devicelab task and log the error. + Future reportError(dynamic error, StackTrace stackTrace) async { + _checkNotManualMode(); + await html.HttpRequest.request( + '/on-error', + method: 'POST', + mimeType: 'application/json', + sendData: json.encode({ + 'error': '$error', + 'stackTrace': '$stackTrace', + }), + ); + } + + /// Reports a message about the demo to the benchmark server. + Future printToConsole(String report) async { + _checkNotManualMode(); + await html.HttpRequest.request( + '/print-to-console', + method: 'POST', + mimeType: 'text/plain', + sendData: report, + ); + } + + /// This is the same as calling [html.HttpRequest.request] but it doesn't + /// crash on 404, which we use to detect `flutter run`. + Future _requestXhr( + String url, { + @required String method, + @required String mimeType, + @required dynamic sendData, + }) { + final Completer completer = Completer(); + final html.HttpRequest xhr = html.HttpRequest(); + method ??= 'GET'; + xhr.open(method, url, async: true); + xhr.overrideMimeType(mimeType); + xhr.onLoad.listen((html.ProgressEvent e) { + completer.complete(xhr); + }); + xhr.onError.listen(completer.completeError); + xhr.send(sendData); + return completer.future; + } +} diff --git a/packages/web_benchmarks/lib/server.dart b/packages/web_benchmarks/lib/server.dart new file mode 100644 index 00000000000..77300c0807f --- /dev/null +++ b/packages/web_benchmarks/lib/server.dart @@ -0,0 +1,62 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:async'; +import 'dart:io' as io; + +import 'package:logging/logging.dart'; +import 'package:meta/meta.dart'; + +import 'src/benchmark_result.dart'; +import 'src/runner.dart'; + +export 'src/benchmark_result.dart'; + +/// The default port number used by the local benchmark server. +const int defaultBenchmarkServerPort = 9999; + +/// The default port number used for Chrome DevTool Protocol. +const int defaultChromeDebugPort = 10000; + +/// Builds and serves a Flutter Web app, collects raw benchmark data and +/// summarizes the result as a [BenchmarkResult]. +/// +/// [benchmarkAppDirectory] is the directory containing the app that's being +/// benchmarked. The app is expected to use `package:web_benchmarks/client.dart` +/// and call the `runBenchmarks` function to run the benchmarks. +/// +/// [entryPoint] is the path to the main app file that runs the benchmark. It +/// can be different (and typically is) from the production entry point of the +/// app. +/// +/// If [useCanvasKit] is true, builds the app in CanvasKit mode. +/// +/// [benchmarkServerPort] is the port this benchmark server serves the app on. +/// By default uses [defaultBenchmarkServerPort]. +/// +/// [chromeDebugPort] is the port Chrome uses for DevTool Protocol used to +/// extract tracing data. By default uses [defaultChromeDebugPort]. +/// +/// If [headless] is true, runs Chrome without UI. In particular, this is +/// useful in environments (e.g. CI) that doesn't have a display. +Future serveWebBenchmark({ + @required io.Directory benchmarkAppDirectory, + @required String entryPoint, + @required bool useCanvasKit, + int benchmarkServerPort = defaultBenchmarkServerPort, + int chromeDebugPort = defaultChromeDebugPort, + bool headless = true, +}) async { + // Reduce logging level. Otherwise, package:webkit_inspection_protocol is way too spammy. + Logger.root.level = Level.INFO; + + return BenchmarkServer( + benchmarkAppDirectory: benchmarkAppDirectory, + entryPoint: entryPoint, + useCanvasKit: useCanvasKit, + benchmarkServerPort: benchmarkServerPort, + chromeDebugPort: chromeDebugPort, + headless: headless, + ).run(); +} diff --git a/packages/web_benchmarks/lib/src/benchmark_result.dart b/packages/web_benchmarks/lib/src/benchmark_result.dart new file mode 100644 index 00000000000..4f6c4584471 --- /dev/null +++ b/packages/web_benchmarks/lib/src/benchmark_result.dart @@ -0,0 +1,59 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'package:meta/meta.dart'; + +/// A single benchmark score value collected from the benchmark. +class BenchmarkScore { + /// Creates a benchmark score. + /// + /// [metric] and [value] must not be null. + BenchmarkScore({ + @required this.metric, + @required this.value, + }) : assert(metric != null && value != null); + + /// The name of the metric that this score is categorized under. + /// + /// Scores collected over time under the same name can be visualized as a + /// timeline. + final String metric; + + /// The result of measuring a particular metric in this benchmark run. + final num value; + + /// Serializes the benchmark metric to a JSON object. + Map toJson() { + return { + 'metric': metric, + 'value': value, + }; + } +} + +/// The result of running a benchmark. +class BenchmarkResults { + /// Constructs a result containing scores from a single run benchmark run. + BenchmarkResults(this.scores); + + /// Scores collected in a benchmark run. + final Map> scores; + + /// Serializes benchmark metrics to JSON. + Map>> toJson() { + return scores.map>>( + (String benchmarkName, List scores) { + return MapEntry>>( + benchmarkName, + scores + .map>( + (BenchmarkScore score) => { + 'metric': score.metric, + 'value': score.value, + }) + .toList(), + ); + }); + } +} diff --git a/packages/web_benchmarks/lib/src/browser.dart b/packages/web_benchmarks/lib/src/browser.dart new file mode 100644 index 00000000000..3f3db456c37 --- /dev/null +++ b/packages/web_benchmarks/lib/src/browser.dart @@ -0,0 +1,606 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:async'; +import 'dart:convert' show json, utf8, LineSplitter, JsonEncoder; +import 'dart:io' as io; +import 'dart:math' as math; + +import 'package:path/path.dart' as path; +import 'package:meta/meta.dart'; +import 'package:webkit_inspection_protocol/webkit_inspection_protocol.dart'; + +import 'common.dart'; + +/// Options passed to Chrome when launching it. +class ChromeOptions { + /// Creates chrome options. + /// + /// [windowWidth], [windowHeight], and [headless] must not be null. + ChromeOptions({ + this.userDataDirectory, + this.url, + this.windowWidth = 1024, + this.windowHeight = 1024, + this.headless, + this.debugPort, + }); + + /// If not null passed as `--user-data-dir`. + final String userDataDirectory; + + /// If not null launches a Chrome tab at this URL. + final String url; + + /// The width of the Chrome window. + /// + /// This is important for screenshots and benchmarks. + final int windowWidth; + + /// The height of the Chrome window. + /// + /// This is important for screenshots and benchmarks. + final int windowHeight; + + /// Launches code in "headless" mode, which allows running Chrome in + /// environments without a display, such as LUCI and Cirrus. + final bool headless; + + /// The port Chrome will use for its debugging protocol. + /// + /// If null, Chrome is launched without debugging. When running in headless + /// mode without a debug port, Chrome quits immediately. For most tests it is + /// typical to set [headless] to true and set a non-null debug port. + final int debugPort; +} + +/// A function called when the Chrome process encounters an error. +typedef ChromeErrorCallback = void Function(String); + +/// Manages a single Chrome process. +class Chrome { + Chrome._(this._chromeProcess, this._onError, this._debugConnection, + bool headless) { + if (headless) { + // In headless mode, if the Chrome process quits before it was asked to + // quit, notify the error listener. If it's not running headless, the + // developer may close the browser any time, so it's not considered to + // be an error. + _chromeProcess.exitCode.then((int exitCode) { + if (!_isStopped) { + _onError( + 'Chrome process exited prematurely with exit code $exitCode'); + } + }); + } + } + + /// Launches Chrome with the give [options]. + /// + /// The [onError] callback is called with an error message when the Chrome + /// process encounters an error. In particular, [onError] is called when the + /// Chrome process exits prematurely, i.e. before [stop] is called. + static Future launch(ChromeOptions options, + {String workingDirectory, @required ChromeErrorCallback onError}) async { + if (!io.Platform.isWindows) { + final io.ProcessResult versionResult = io.Process.runSync( + _findSystemChromeExecutable(), const ['--version']); + print('Launching ${versionResult.stdout}'); + } else { + print('Launching Chrome...'); + } + + final bool withDebugging = options.debugPort != null; + final List args = [ + if (options.userDataDirectory != null) + '--user-data-dir=${options.userDataDirectory}', + if (options.url != null) options.url, + if (io.Platform.environment['CHROME_NO_SANDBOX'] == 'true') + '--no-sandbox', + if (options.headless) '--headless', + if (withDebugging) '--remote-debugging-port=${options.debugPort}', + '--window-size=${options.windowWidth},${options.windowHeight}', + '--disable-extensions', + '--disable-popup-blocking', + // Indicates that the browser is in "browse without sign-in" (Guest session) mode. + '--bwsi', + '--no-first-run', + '--no-default-browser-check', + '--disable-default-apps', + '--disable-translate', + ]; + final io.Process chromeProcess = await io.Process.start( + _findSystemChromeExecutable(), + args, + workingDirectory: workingDirectory, + ); + + WipConnection debugConnection; + if (withDebugging) { + debugConnection = + await _connectToChromeDebugPort(chromeProcess, options.debugPort); + } + + return Chrome._(chromeProcess, onError, debugConnection, options.headless); + } + + final io.Process _chromeProcess; + final ChromeErrorCallback _onError; + final WipConnection _debugConnection; + bool _isStopped = false; + + Completer _tracingCompleter; + StreamSubscription _tracingSubscription; + List> _tracingData; + + /// Starts recording a performance trace. + /// + /// If there is already a tracing session in progress, throws an error. Call + /// [endRecordingPerformance] before starting a new tracing session. + /// + /// The [label] is for debugging convenience. + Future beginRecordingPerformance(String label) async { + if (_tracingCompleter != null) { + throw StateError( + 'Cannot start a new performance trace. A tracing session labeled ' + '"$label" is already in progress.'); + } + _tracingCompleter = Completer(); + _tracingData = >[]; + + // Subscribe to tracing events prior to calling "Tracing.start". Otherwise, + // we'll miss tracing data. + _tracingSubscription = + _debugConnection.onNotification.listen((WipEvent event) { + // We receive data as a sequence of "Tracing.dataCollected" followed by + // "Tracing.tracingComplete" at the end. Until "Tracing.tracingComplete" + // is received, the data may be incomplete. + if (event.method == 'Tracing.tracingComplete') { + _tracingCompleter.complete(); + _tracingSubscription.cancel(); + _tracingSubscription = null; + } else if (event.method == 'Tracing.dataCollected') { + final dynamic value = event.params['value']; + if (value is! List) { + throw FormatException( + '"Tracing.dataCollected" returned malformed data. ' + 'Expected a List but got: ${value.runtimeType}'); + } + _tracingData.addAll(event.params['value'].cast>()); + } + }); + await _debugConnection.sendCommand('Tracing.start', { + // The choice of categories is as follows: + // + // blink: + // provides everything on the UI thread, including scripting, + // style recalculations, layout, painting, and some compositor + // work. + // blink.user_timing: + // provides marks recorded using window.performance. We use marks + // to find frames that the benchmark cares to measure. + // gpu: + // provides tracing data from the GPU data + // disabled due to https://bugs.chromium.org/p/chromium/issues/detail?id=1068259 + // TODO(yjbanov): extract useful GPU data + 'categories': 'blink,blink.user_timing', + 'transferMode': 'SendAsStream', + }); + } + + /// Stops a performance tracing session started by [beginRecordingPerformance]. + /// + /// Returns all the collected tracing data unfiltered. + Future>> endRecordingPerformance() async { + await _debugConnection.sendCommand('Tracing.end'); + await _tracingCompleter.future; + final List> data = _tracingData; + _tracingCompleter = null; + _tracingData = null; + return data; + } + + /// Stops the Chrome process. + void stop() { + _isStopped = true; + _chromeProcess.kill(); + } + + /// Resolves when the Chrome process exits. + Future get whenExits async { + await _chromeProcess.exitCode; + } +} + +String _findSystemChromeExecutable() { + // On some environments, such as the Dart HHH tester, Chrome resides in a + // non-standard location and is provided via the following environment + // variable. + final String envExecutable = io.Platform.environment['CHROME_EXECUTABLE']; + if (envExecutable != null) { + return envExecutable; + } + + if (io.Platform.isLinux) { + final io.ProcessResult which = + io.Process.runSync('which', ['google-chrome']); + + if (which.exitCode != 0) { + throw Exception('Failed to locate system Chrome installation.'); + } + + final String output = which.stdout; + return output.trim(); + } else if (io.Platform.isMacOS) { + return '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'; + } else if (io.Platform.isWindows) { + const String kWindowsExecutable = r'Google\Chrome\Application\chrome.exe'; + final List kWindowsPrefixes = [ + io.Platform.environment['LOCALAPPDATA'], + io.Platform.environment['PROGRAMFILES'], + io.Platform.environment['PROGRAMFILES(X86)'], + ]; + final String windowsPrefix = kWindowsPrefixes.firstWhere((String prefix) { + if (prefix == null) { + return false; + } + final String expectedPath = path.join(prefix, kWindowsExecutable); + return io.File(expectedPath).existsSync(); + }, orElse: () => '.'); + return path.join(windowsPrefix, kWindowsExecutable); + } else { + throw Exception( + 'Web benchmarks cannot run on ${io.Platform.operatingSystem}.'); + } +} + +/// Waits for Chrome to print DevTools URI and connects to it. +Future _connectToChromeDebugPort( + io.Process chromeProcess, int port) async { + chromeProcess.stdout + .transform(utf8.decoder) + .transform(const LineSplitter()) + .listen((String line) { + print('[CHROME]: $line'); + }); + + await chromeProcess.stderr + .transform(utf8.decoder) + .transform(const LineSplitter()) + .map((String line) { + print('[CHROME]: $line'); + return line; + }).firstWhere((String line) => line.startsWith('DevTools listening'), + orElse: () { + throw Exception('Expected Chrome to print "DevTools listening" string ' + 'with DevTools URL, but the string was never printed.'); + }); + + final Uri devtoolsUri = + await _getRemoteDebuggerUrl(Uri.parse('http://localhost:$port')); + print('Connecting to DevTools: $devtoolsUri'); + final ChromeConnection chromeConnection = ChromeConnection('localhost', port); + final Iterable tabs = + (await chromeConnection.getTabs()).where((ChromeTab tab) { + return tab.url.startsWith('http://localhost'); + }); + final ChromeTab tab = tabs.single; + final WipConnection debugConnection = await tab.connect(); + print('Connected to Chrome tab: ${tab.title} (${tab.url})'); + return debugConnection; +} + +/// Gets the Chrome debugger URL for the web page being benchmarked. +Future _getRemoteDebuggerUrl(Uri base) async { + final io.HttpClient client = io.HttpClient(); + final io.HttpClientRequest request = + await client.getUrl(base.resolve('/json/list')); + final io.HttpClientResponse response = await request.close(); + final List jsonObject = + await json.fuse(utf8).decoder.bind(response).single; + if (jsonObject == null || jsonObject.isEmpty) { + return base; + } + return base.resolve(jsonObject.first['webSocketDebuggerUrl']); +} + +/// Summarizes a Blink trace down to a few interesting values. +class BlinkTraceSummary { + BlinkTraceSummary._({ + @required this.averageBeginFrameTime, + @required this.averageUpdateLifecyclePhasesTime, + }) : averageTotalUIFrameTime = + averageBeginFrameTime + averageUpdateLifecyclePhasesTime; + + /// Summarizes Blink trace from the raw JSON trace. + static BlinkTraceSummary fromJson(List> traceJson) { + try { + // Convert raw JSON data to BlinkTraceEvent objects sorted by timestamp. + List events = traceJson + .map(BlinkTraceEvent.fromJson) + .toList() + ..sort((BlinkTraceEvent a, BlinkTraceEvent b) => a.ts - b.ts); + + Exception noMeasuredFramesFound() => Exception( + 'No measured frames found in benchmark tracing data. This likely ' + 'indicates a bug in the benchmark. For example, the benchmark failed ' + 'to pump enough frames. It may also indicate a change in Chrome\'s ' + 'tracing data format. Check if Chrome version changed recently and ' + 'adjust the parsing code accordingly.', + ); + + // Use the pid from the first "measured_frame" event since the event is + // emitted by the script running on the process we're interested in. + // + // We previously tried using the "CrRendererMain" event. However, for + // reasons unknown, Chrome in the devicelab refuses to emit this event + // sometimes, causing to flakes. + final BlinkTraceEvent firstMeasuredFrameEvent = events.firstWhere( + (BlinkTraceEvent event) => event.isBeginMeasuredFrame, + orElse: () => throw noMeasuredFramesFound(), + ); + + if (firstMeasuredFrameEvent == null) { + // This happens in benchmarks that do not measure frames, such as some + // of the text layout benchmarks. + return null; + } + + final int tabPid = firstMeasuredFrameEvent.pid; + + // Filter out data from unrelated processes + events = events + .where((BlinkTraceEvent element) => element.pid == tabPid) + .toList(); + + // Extract frame data. + final List frames = []; + int skipCount = 0; + BlinkFrame frame = BlinkFrame(); + for (final BlinkTraceEvent event in events) { + if (event.isBeginFrame) { + frame.beginFrame = event; + } else if (event.isUpdateAllLifecyclePhases) { + frame.updateAllLifecyclePhases = event; + if (frame.endMeasuredFrame != null) { + frames.add(frame); + } else { + skipCount += 1; + } + frame = BlinkFrame(); + } else if (event.isBeginMeasuredFrame) { + frame.beginMeasuredFrame = event; + } else if (event.isEndMeasuredFrame) { + frame.endMeasuredFrame = event; + } + } + + print('Extracted ${frames.length} measured frames.'); + print('Skipped $skipCount non-measured frames.'); + + if (frames.isEmpty) { + throw noMeasuredFramesFound(); + } + + // Compute averages and summarize. + return BlinkTraceSummary._( + averageBeginFrameTime: _computeAverageDuration( + frames.map((BlinkFrame frame) => frame.beginFrame).toList()), + averageUpdateLifecyclePhasesTime: _computeAverageDuration(frames + .map((BlinkFrame frame) => frame.updateAllLifecyclePhases) + .toList()), + ); + } catch (_, __) { + final io.File traceFile = io.File('./chrome-trace.json'); + io.stderr.writeln( + 'Failed to interpret the Chrome trace contents. The trace was saved in ${traceFile.path}'); + traceFile.writeAsStringSync( + const JsonEncoder.withIndent(' ').convert(traceJson)); + rethrow; + } + } + + /// The average duration of "WebViewImpl::beginFrame" events. + /// + /// This event contains all of scripting time of an animation frame, plus an + /// unknown small amount of work browser does before and after scripting. + final Duration averageBeginFrameTime; + + /// The average duration of "WebViewImpl::updateAllLifecyclePhases" events. + /// + /// This event contains style, layout, painting, and compositor computations, + /// which are not included in the scripting time. This event does not + /// include GPU time, which happens on a separate thread. + final Duration averageUpdateLifecyclePhasesTime; + + /// The average sum of [averageBeginFrameTime] and + /// [averageUpdateLifecyclePhasesTime]. + /// + /// This value contains the vast majority of work the UI thread performs in + /// any given animation frame. + final Duration averageTotalUIFrameTime; + + @override + String toString() => '$BlinkTraceSummary(' + 'averageBeginFrameTime: ${averageBeginFrameTime.inMicroseconds / 1000}ms, ' + 'averageUpdateLifecyclePhasesTime: ${averageUpdateLifecyclePhasesTime.inMicroseconds / 1000}ms)'; +} + +/// Contains events pertaining to a single frame in the Blink trace data. +class BlinkFrame { + /// Corresponds to 'WebViewImpl::beginFrame' event. + BlinkTraceEvent beginFrame; + + /// Corresponds to 'WebViewImpl::updateAllLifecyclePhases' event. + BlinkTraceEvent updateAllLifecyclePhases; + + /// Corresponds to 'measured_frame' begin event. + BlinkTraceEvent beginMeasuredFrame; + + /// Corresponds to 'measured_frame' end event. + BlinkTraceEvent endMeasuredFrame; +} + +/// Takes a list of events that have non-null [BlinkTraceEvent.tdur] computes +/// their average as a [Duration] value. +Duration _computeAverageDuration(List events) { + // Compute the sum of "tdur" fields of the last kMeasuredSampleCount events. + final double sum = events + .skip(math.max(events.length - kMeasuredSampleCount, 0)) + .fold(0.0, (double previousValue, BlinkTraceEvent event) { + if (event.tdur == null) { + throw FormatException('Trace event lacks "tdur" field: $event'); + } + return previousValue + event.tdur; + }); + final int sampleCount = math.min(events.length, kMeasuredSampleCount); + return Duration(microseconds: sum ~/ sampleCount); +} + +/// An event collected by the Blink tracer (in Chrome accessible using chrome://tracing). +/// +/// See also: +/// * https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview +class BlinkTraceEvent { + BlinkTraceEvent._({ + @required this.args, + @required this.cat, + @required this.name, + @required this.ph, + @required this.pid, + @required this.tid, + @required this.ts, + @required this.tts, + @required this.tdur, + }); + + /// Parses an event from its JSON representation. + /// + /// Sample event encoded as JSON (the data is bogus, this just shows the format): + /// + /// ``` + /// { + /// "name": "myName", + /// "cat": "category,list", + /// "ph": "B", + /// "ts": 12345, + /// "pid": 123, + /// "tid": 456, + /// "args": { + /// "someArg": 1, + /// "anotherArg": { + /// "value": "my value" + /// } + /// } + /// } + /// ``` + /// + /// For detailed documentation of the format see: + /// + /// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview + static BlinkTraceEvent fromJson(Map json) { + return BlinkTraceEvent._( + args: json['args'], + cat: json['cat'], + name: json['name'], + ph: json['ph'], + pid: _readInt(json, 'pid'), + tid: _readInt(json, 'tid'), + ts: _readInt(json, 'ts'), + tts: _readInt(json, 'tts'), + tdur: _readInt(json, 'tdur'), + ); + } + + /// Event-specific data. + final Map args; + + /// Event category. + final String cat; + + /// Event name. + final String name; + + /// Event "phase". + final String ph; + + /// Process ID of the process that emitted the event. + final int pid; + + /// Thread ID of the thread that emitted the event. + final int tid; + + /// Timestamp in microseconds using tracer clock. + final int ts; + + /// Timestamp in microseconds using thread clock. + final int tts; + + /// Event duration in microseconds. + final int tdur; + + /// A "begin frame" event contains all of the scripting time of an animation + /// frame (JavaScript, WebAssembly), plus a negligible amount of internal + /// browser overhead. + /// + /// This event does not include non-UI thread scripting, such as web workers, + /// service workers, and CSS Paint paintlets. + /// + /// This event is a duration event that has its `tdur` populated. + bool get isBeginFrame => ph == 'X' && name == 'WebViewImpl::beginFrame'; + + /// An "update all lifecycle phases" event contains UI thread computations + /// related to an animation frame that's outside the scripting phase. + /// + /// This event includes style recalculation, layer tree update, layout, + /// painting, and parts of compositing work. + /// + /// This event is a duration event that has its `tdur` populated. + bool get isUpdateAllLifecyclePhases => + ph == 'X' && name == 'WebViewImpl::updateAllLifecyclePhases'; + + /// Whether this is the beginning of a "measured_frame" event. + /// + /// This event is a custom event emitted by our benchmark test harness. + /// + /// See also: + /// * `recorder.dart`, which emits this event. + bool get isBeginMeasuredFrame => ph == 'b' && name == 'measured_frame'; + + /// Whether this is the end of a "measured_frame" event. + /// + /// This event is a custom event emitted by our benchmark test harness. + /// + /// See also: + /// * `recorder.dart`, which emits this event. + bool get isEndMeasuredFrame => ph == 'e' && name == 'measured_frame'; + + @override + String toString() => '$BlinkTraceEvent(' + 'args: ${json.encode(args)}, ' + 'cat: $cat, ' + 'name: $name, ' + 'ph: $ph, ' + 'pid: $pid, ' + 'tid: $tid, ' + 'ts: $ts, ' + 'tts: $tts, ' + 'tdur: $tdur)'; +} + +/// Read an integer out of [json] stored under [key]. +/// +/// Since JSON does not distinguish between `int` and `double`, extra +/// validation and conversion is needed. +/// +/// Returns null if the value is null. +int _readInt(Map json, String key) { + final num jsonValue = json[key]; + + if (jsonValue == null) { + return null; + } + + return jsonValue.toInt(); +} diff --git a/packages/web_benchmarks/lib/src/common.dart b/packages/web_benchmarks/lib/src/common.dart new file mode 100644 index 00000000000..4792bc3f2ec --- /dev/null +++ b/packages/web_benchmarks/lib/src/common.dart @@ -0,0 +1,12 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +/// This library contains code that's common between the client and the server. +/// +/// The code must be compilable both as a command-line program and as a web +/// program. +library web_benchmarks.common; + +/// The number of samples we use to collect statistics from. +const int kMeasuredSampleCount = 100; diff --git a/packages/web_benchmarks/lib/src/recorder.dart b/packages/web_benchmarks/lib/src/recorder.dart new file mode 100644 index 00000000000..5c81c2a18ce --- /dev/null +++ b/packages/web_benchmarks/lib/src/recorder.dart @@ -0,0 +1,1237 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:async'; +import 'dart:html' as html; +import 'dart:js_util' as js_util; +import 'dart:math' as math; +import 'dart:ui'; + +import 'package:meta/meta.dart'; + +import 'package:flutter/gestures.dart'; +import 'package:flutter/foundation.dart'; +import 'package:flutter/services.dart'; +import 'package:flutter/scheduler.dart'; +import 'package:flutter/rendering.dart'; +import 'package:flutter/widgets.dart'; + +import 'common.dart'; + +/// The number of samples from warm-up iterations. +/// +/// We warm-up the benchmark prior to measuring to allow JIT and caches to settle. +const int _kWarmUpSampleCount = 200; + +/// The total number of samples collected by a benchmark. +const int kTotalSampleCount = _kWarmUpSampleCount + kMeasuredSampleCount; + +/// A benchmark metric that includes frame-related computations prior to +/// submitting layer and picture operations to the underlying renderer, such as +/// HTML and CanvasKit. During this phase we compute transforms, clips, and +/// other information needed for rendering. +const String kProfilePrerollFrame = 'preroll_frame'; + +/// A benchmark metric that includes submitting layer and picture information +/// to the renderer. +const String kProfileApplyFrame = 'apply_frame'; + +/// Measures the amount of time [action] takes. +Duration timeAction(VoidCallback action) { + final Stopwatch stopwatch = Stopwatch()..start(); + action(); + stopwatch.stop(); + return stopwatch.elapsed; +} + +/// A function that performs asynchronous work. +typedef AsyncVoidCallback = Future Function(); + +/// An [AsyncVoidCallback] that doesn't do anything. +/// +/// This is used just so we don't have to deal with null all over the place. +Future _dummyAsyncVoidCallback() async {} + +/// Runs the benchmark using the given [recorder]. +/// +/// Notifies about "set up" and "tear down" events via the [setUpAllDidRun] +/// and [tearDownAllWillRun] callbacks. +@sealed +class Runner { + /// Creates a runner for the [recorder]. + /// + /// All arguments must not be null. + Runner({ + @required this.recorder, + this.setUpAllDidRun = _dummyAsyncVoidCallback, + this.tearDownAllWillRun = _dummyAsyncVoidCallback, + }); + + /// The recorder that will run and record the benchmark. + final Recorder recorder; + + /// Called immediately after [Recorder.setUpAll] future is resolved. + /// + /// This is useful, for example, to kick off a profiler or a tracer such that + /// the "set up" computations are not included in the metrics. + final AsyncVoidCallback setUpAllDidRun; + + /// Called just before calling [Recorder.tearDownAll]. + /// + /// This is useful, for example, to stop a profiler or a tracer such that + /// the "tear down" computations are not included in the metrics. + final AsyncVoidCallback tearDownAllWillRun; + + /// Runs the benchmark and reports the results. + Future run() async { + await recorder.setUpAll(); + await setUpAllDidRun(); + final Profile profile = await recorder.run(); + await tearDownAllWillRun(); + await recorder.tearDownAll(); + return profile; + } +} + +/// Base class for benchmark recorders. +/// +/// Each benchmark recorder has a [name] and a [run] method at a minimum. +abstract class Recorder { + Recorder._(this.name, this.isTracingEnabled); + + /// Whether this recorder requires tracing using Chrome's DevTools Protocol's + /// "Tracing" API. + final bool isTracingEnabled; + + /// The name of the benchmark. + /// + /// The results displayed in the Flutter Dashboard will use this name as a + /// prefix. + final String name; + + /// Returns the recorded profile. + /// + /// This value is only available while the benchmark is running. + Profile get profile; + + /// Whether the benchmark should continue running. + /// + /// Returns `false` if the benchmark collected enough data and it's time to + /// stop. + bool shouldContinue() => profile.shouldContinue(); + + /// Called once before all runs of this benchmark recorder. + /// + /// This is useful for doing one-time setup work that's needed for the + /// benchmark. + Future setUpAll() async {} + + /// The implementation of the benchmark that will produce a [Profile]. + Future run(); + + /// Called once after all runs of this benchmark recorder. + /// + /// This is useful for doing one-time clean up work after the benchmark is + /// complete. + Future tearDownAll() async {} +} + +/// A recorder for benchmarking raw execution of Dart code. +/// +/// This is useful for benchmarks that don't need frames or widgets. +/// +/// Example: +/// +/// ``` +/// class BenchForLoop extends RawRecorder { +/// BenchForLoop() : super(name: benchmarkName); +/// +/// static const String benchmarkName = 'for_loop'; +/// +/// @override +/// void body(Profile profile) { +/// profile.record('loop', () { +/// double x = 0; +/// for (int i = 0; i < 10000000; i++) { +/// x *= 1.5; +/// } +/// }); +/// } +/// } +/// ``` +abstract class RawRecorder extends Recorder { + /// Creates a raw benchmark recorder with a name. + /// + /// [name] must not be null. + RawRecorder({@required String name}) : super._(name, false); + + /// The body of the benchmark. + /// + /// This is the part that records measurements of the benchmark. + void body(Profile profile); + + @override + Profile get profile => _profile; + Profile _profile; + + @override + @nonVirtual + Future run() async { + _profile = Profile(name: name); + do { + await Future.delayed(Duration.zero); + body(_profile); + } while (shouldContinue()); + return _profile; + } +} + +/// A recorder for benchmarking interactions with the engine without the +/// framework by directly exercising [SceneBuilder]. +/// +/// To implement a benchmark, extend this class and implement [onDrawFrame]. +/// +/// Example: +/// +/// ``` +/// class BenchDrawCircle extends SceneBuilderRecorder { +/// BenchDrawCircle() : super(name: benchmarkName); +/// +/// static const String benchmarkName = 'draw_circle'; +/// +/// @override +/// void onDrawFrame(SceneBuilder sceneBuilder) { +/// final PictureRecorder pictureRecorder = PictureRecorder(); +/// final Canvas canvas = Canvas(pictureRecorder); +/// final Paint paint = Paint()..color = const Color.fromARGB(255, 255, 0, 0); +/// final Size windowSize = window.physicalSize; +/// canvas.drawCircle(windowSize.center(Offset.zero), 50.0, paint); +/// final Picture picture = pictureRecorder.endRecording(); +/// sceneBuilder.addPicture(picture); +/// } +/// } +/// ``` +abstract class SceneBuilderRecorder extends Recorder { + /// Creates a [SceneBuilder] benchmark recorder. + /// + /// [name] must not be null. + SceneBuilderRecorder({@required String name}) : super._(name, true); + + @override + Profile get profile => _profile; + Profile _profile; + + /// Called from [Window.onBeginFrame]. + @mustCallSuper + void onBeginFrame() {} + + /// Called on every frame. + /// + /// An implementation should exercise the [sceneBuilder] to build a frame. + /// However, it must not call [SceneBuilder.build] or [Window.render]. + /// Instead the benchmark harness will call them and time them appropriately. + void onDrawFrame(SceneBuilder sceneBuilder); + + @override + Future run() { + final Completer profileCompleter = Completer(); + _profile = Profile(name: name); + + window.onBeginFrame = (_) { + try { + startMeasureFrame(profile); + onBeginFrame(); + } catch (error, stackTrace) { + profileCompleter.completeError(error, stackTrace); + rethrow; + } + }; + window.onDrawFrame = () { + try { + _profile.record('drawFrameDuration', () { + final SceneBuilder sceneBuilder = SceneBuilder(); + onDrawFrame(sceneBuilder); + _profile.record('sceneBuildDuration', () { + final Scene scene = sceneBuilder.build(); + _profile.record('windowRenderDuration', () { + window.render(scene); + }, reported: false); + }, reported: false); + }, reported: true); + endMeasureFrame(); + + if (shouldContinue()) { + window.scheduleFrame(); + } else { + profileCompleter.complete(_profile); + } + } catch (error, stackTrace) { + profileCompleter.completeError(error, stackTrace); + rethrow; + } + }; + window.scheduleFrame(); + return profileCompleter.future; + } +} + +/// A recorder for benchmarking interactions with the framework by creating +/// widgets. +/// +/// To implement a benchmark, extend this class and implement [createWidget]. +/// +/// Example: +/// +/// ``` +/// class BenchListView extends WidgetRecorder { +/// BenchListView() : super(name: benchmarkName); +/// +/// static const String benchmarkName = 'bench_list_view'; +/// +/// @override +/// Widget createWidget() { +/// return Directionality( +/// textDirection: TextDirection.ltr, +/// child: _TestListViewWidget(), +/// ); +/// } +/// } +/// +/// class _TestListViewWidget extends StatefulWidget { +/// @override +/// State createState() { +/// return _TestListViewWidgetState(); +/// } +/// } +/// +/// class _TestListViewWidgetState extends State<_TestListViewWidget> { +/// ScrollController scrollController; +/// +/// @override +/// void initState() { +/// super.initState(); +/// scrollController = ScrollController(); +/// Timer.run(() async { +/// bool forward = true; +/// while (true) { +/// await scrollController.animateTo( +/// forward ? 300 : 0, +/// curve: Curves.linear, +/// duration: const Duration(seconds: 1), +/// ); +/// forward = !forward; +/// } +/// }); +/// } +/// +/// @override +/// Widget build(BuildContext context) { +/// return ListView.builder( +/// controller: scrollController, +/// itemCount: 10000, +/// itemBuilder: (BuildContext context, int index) { +/// return Text('Item #$index'); +/// }, +/// ); +/// } +/// } +/// ``` +abstract class WidgetRecorder extends Recorder implements FrameRecorder { + /// Creates a widget benchmark recorder. + /// + /// [name] must not be null. + /// + /// If [useCustomWarmUp] is true, delegates the benchmark warm-up to the + /// benchmark implementation instead of using a built-in strategy. The + /// benchmark is expected to call [Profile.stopWarmingUp] to signal that + /// the warm-up phase is finished. + WidgetRecorder({ + @required String name, + this.useCustomWarmUp = false, + }) : super._(name, true); + + /// Creates a widget to be benchmarked. + /// + /// The widget must create its own animation to drive the benchmark. The + /// animation should continue indefinitely. The benchmark harness will stop + /// pumping frames automatically. + Widget createWidget(); + + final List _didStopCallbacks = []; + @override + void registerDidStop(VoidCallback fn) { + _didStopCallbacks.add(fn); + } + + @override + Profile profile; + Completer _runCompleter; + + /// Whether to delimit warm-up frames in a custom way. + final bool useCustomWarmUp; + + Stopwatch _drawFrameStopwatch; + + @override + @mustCallSuper + void frameWillDraw() { + startMeasureFrame(profile); + _drawFrameStopwatch = Stopwatch()..start(); + } + + @override + @mustCallSuper + void frameDidDraw() { + endMeasureFrame(); + profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed, + reported: true); + + if (shouldContinue()) { + window.scheduleFrame(); + } else { + for (final VoidCallback fn in _didStopCallbacks) { + fn(); + } + _runCompleter.complete(); + } + } + + @override + void _onError(dynamic error, StackTrace stackTrace) { + _runCompleter.completeError(error, stackTrace); + } + + @override + Future run() async { + _runCompleter = Completer(); + final Profile localProfile = + profile = Profile(name: name, useCustomWarmUp: useCustomWarmUp); + final _RecordingWidgetsBinding binding = + _RecordingWidgetsBinding.ensureInitialized(); + final Widget widget = createWidget(); + + registerEngineBenchmarkValueListener(kProfilePrerollFrame, (num value) { + localProfile.addDataPoint( + kProfilePrerollFrame, + Duration(microseconds: value.toInt()), + reported: false, + ); + }); + registerEngineBenchmarkValueListener(kProfileApplyFrame, (num value) { + localProfile.addDataPoint( + kProfileApplyFrame, + Duration(microseconds: value.toInt()), + reported: false, + ); + }); + + binding._beginRecording(this, widget); + + try { + await _runCompleter.future; + return localProfile; + } finally { + stopListeningToEngineBenchmarkValues(kProfilePrerollFrame); + stopListeningToEngineBenchmarkValues(kProfileApplyFrame); + _runCompleter = null; + profile = null; + } + } +} + +/// A recorder for measuring the performance of building a widget from scratch +/// starting from an empty frame. +/// +/// The recorder will call [createWidget] and render it, then it will pump +/// another frame that clears the screen. It repeats this process, measuring the +/// performance of frames that render the widget and ignoring the frames that +/// clear the screen. +abstract class WidgetBuildRecorder extends Recorder implements FrameRecorder { + /// Creates a widget build benchmark recorder. + /// + /// [name] must not be null. + WidgetBuildRecorder({@required String name}) : super._(name, true); + + /// Creates a widget to be benchmarked. + /// + /// The widget is not expected to animate as we only care about construction + /// of the widget. If you are interested in benchmarking an animation, + /// consider using [WidgetRecorder]. + Widget createWidget(); + + final List _didStopCallbacks = []; + @override + void registerDidStop(VoidCallback fn) { + _didStopCallbacks.add(fn); + } + + @override + Profile profile; + Completer _runCompleter; + + Stopwatch _drawFrameStopwatch; + + /// Whether in this frame we should call [createWidget] and render it. + /// + /// If false, then this frame will clear the screen. + bool showWidget = true; + + /// The state that hosts the widget under test. + _WidgetBuildRecorderHostState _hostState; + + Widget _getWidgetForFrame() { + if (showWidget) { + return createWidget(); + } else { + return null; + } + } + + @override + @mustCallSuper + void frameWillDraw() { + if (showWidget) { + startMeasureFrame(profile); + _drawFrameStopwatch = Stopwatch()..start(); + } + } + + @override + @mustCallSuper + void frameDidDraw() { + // Only record frames that show the widget. + if (showWidget) { + endMeasureFrame(); + profile.addDataPoint('drawFrameDuration', _drawFrameStopwatch.elapsed, + reported: true); + } + + if (shouldContinue()) { + showWidget = !showWidget; + _hostState._setStateTrampoline(); + } else { + for (final VoidCallback fn in _didStopCallbacks) { + fn(); + } + _runCompleter.complete(); + } + } + + @override + void _onError(dynamic error, StackTrace stackTrace) { + _runCompleter.completeError(error, stackTrace); + } + + @override + Future run() async { + _runCompleter = Completer(); + final Profile localProfile = profile = Profile(name: name); + final _RecordingWidgetsBinding binding = + _RecordingWidgetsBinding.ensureInitialized(); + binding._beginRecording(this, _WidgetBuildRecorderHost(this)); + + try { + await _runCompleter.future; + return localProfile; + } finally { + _runCompleter = null; + profile = null; + } + } +} + +/// Hosts widgets created by [WidgetBuildRecorder]. +class _WidgetBuildRecorderHost extends StatefulWidget { + const _WidgetBuildRecorderHost(this.recorder); + + final WidgetBuildRecorder recorder; + + @override + State createState() => + recorder._hostState = _WidgetBuildRecorderHostState(); +} + +class _WidgetBuildRecorderHostState extends State<_WidgetBuildRecorderHost> { + // This is just to bypass the @protected on setState. + void _setStateTrampoline() { + setState(() {}); + } + + @override + Widget build(BuildContext context) { + return SizedBox.expand( + child: widget.recorder._getWidgetForFrame(), + ); + } +} + +/// Series of time recordings indexed in time order. +/// +/// It can calculate [average], [standardDeviation] and [noise]. If the amount +/// of data collected is higher than [_kMeasuredSampleCount], then these +/// calculations will only apply to the latest [_kMeasuredSampleCount] data +/// points. +class Timeseries { + /// Creates an empty timeseries. + /// + /// [name], [isReported], and [useCustomWarmUp] must not be null. + Timeseries(this.name, this.isReported, {this.useCustomWarmUp = false}) + : _warmUpFrameCount = useCustomWarmUp ? 0 : null; + + /// The label of this timeseries used for debugging and result inspection. + final String name; + + /// Whether this timeseries is reported to the benchmark dashboard. + /// + /// If `true` a new benchmark card is created for the timeseries and is + /// visible on the dashboard. + /// + /// If `false` the data is stored but it does not show up on the dashboard. + /// Use unreported metrics for metrics that are useful for manual inspection + /// but that are too fine-grained to be useful for tracking on the dashboard. + final bool isReported; + + /// Whether to delimit warm-up frames in a custom way. + final bool useCustomWarmUp; + + /// The number of frames ignored as warm-up frames, used only + /// when [useCustomWarmUp] is true. + int _warmUpFrameCount; + + /// The number of frames ignored as warm-up frames. + int get warmUpFrameCount => + useCustomWarmUp ? _warmUpFrameCount : count - kMeasuredSampleCount; + + /// List of all the values that have been recorded. + /// + /// This list has no limit. + final List _allValues = []; + + /// The total amount of data collected, including ones that were dropped + /// because of the sample size limit. + int get count => _allValues.length; + + /// Extracts useful statistics out of this timeseries. + /// + /// See [TimeseriesStats] for more details. + TimeseriesStats computeStats() { + final int finalWarmUpFrameCount = warmUpFrameCount; + + assert(finalWarmUpFrameCount >= 0 && finalWarmUpFrameCount < count); + + // The first few values we simply discard and never look at. They're from the warm-up phase. + final List warmUpValues = + _allValues.sublist(0, finalWarmUpFrameCount); + + // Values we analyze. + final List candidateValues = + _allValues.sublist(finalWarmUpFrameCount); + + // The average that includes outliers. + final double dirtyAverage = _computeAverage(name, candidateValues); + + // The standard deviation that includes outliers. + final double dirtyStandardDeviation = + _computeStandardDeviationForPopulation(name, candidateValues); + + // Any value that's higher than this is considered an outlier. + final double outlierCutOff = dirtyAverage + dirtyStandardDeviation; + + // Candidates with outliers removed. + final Iterable cleanValues = + candidateValues.where((double value) => value <= outlierCutOff); + + // Outlier candidates. + final Iterable outliers = + candidateValues.where((double value) => value > outlierCutOff); + + // Final statistics. + final double cleanAverage = _computeAverage(name, cleanValues); + final double standardDeviation = + _computeStandardDeviationForPopulation(name, cleanValues); + final double noise = + cleanAverage > 0.0 ? standardDeviation / cleanAverage : 0.0; + + // Compute outlier average. If there are no outliers the outlier average is + // the same as clean value average. In other words, in a perfect benchmark + // with no noise the difference between average and outlier average is zero, + // which the best possible outcome. Noise produces a positive difference + // between the two. + final double outlierAverage = + outliers.isNotEmpty ? _computeAverage(name, outliers) : cleanAverage; + + final List annotatedValues = [ + for (final double warmUpValue in warmUpValues) + AnnotatedSample( + magnitude: warmUpValue, + isOutlier: warmUpValue > outlierCutOff, + isWarmUpValue: true, + ), + for (final double candidate in candidateValues) + AnnotatedSample( + magnitude: candidate, + isOutlier: candidate > outlierCutOff, + isWarmUpValue: false, + ), + ]; + + return TimeseriesStats( + name: name, + average: cleanAverage, + outlierCutOff: outlierCutOff, + outlierAverage: outlierAverage, + standardDeviation: standardDeviation, + noise: noise, + cleanSampleCount: cleanValues.length, + outlierSampleCount: outliers.length, + samples: annotatedValues, + ); + } + + /// Adds a value to this timeseries. + void add(double value, {@required bool isWarmUpValue}) { + if (value < 0.0) { + throw StateError( + 'Timeseries $name: negative metric values are not supported. Got: $value', + ); + } + _allValues.add(value); + if (useCustomWarmUp && isWarmUpValue) { + _warmUpFrameCount += 1; + } + } +} + +/// Various statistics about a [Timeseries]. +/// +/// See the docs on the individual fields for more details. +@sealed +class TimeseriesStats { + /// Creates statistics for a time series. + const TimeseriesStats({ + @required this.name, + @required this.average, + @required this.outlierCutOff, + @required this.outlierAverage, + @required this.standardDeviation, + @required this.noise, + @required this.cleanSampleCount, + @required this.outlierSampleCount, + @required this.samples, + }); + + /// The label used to refer to the corresponding timeseries. + final String name; + + /// The average value of the measured samples without outliers. + final double average; + + /// The standard deviation in the measured samples without outliers. + final double standardDeviation; + + /// The noise as a multiple of the [average] value takes from clean samples. + /// + /// This value can be multiplied by 100.0 to get noise as a percentage of + /// the average. + /// + /// If [average] is zero, treats the result as perfect score, returns zero. + final double noise; + + /// The maximum value a sample can have without being considered an outlier. + /// + /// See [Timeseries.computeStats] for details on how this value is computed. + final double outlierCutOff; + + /// The average of outlier samples. + /// + /// This value can be used to judge how badly we jank, when we jank. + /// + /// Another useful metrics is the difference between [outlierAverage] and + /// [average]. The smaller the value the more predictable is the performance + /// of the corresponding benchmark. + final double outlierAverage; + + /// The number of measured samples after outlier are removed. + final int cleanSampleCount; + + /// The number of outliers. + final int outlierSampleCount; + + /// All collected samples, annotated with statistical information. + /// + /// See [AnnotatedSample] for more details. + final List samples; + + /// Outlier average divided by clean average. + /// + /// This is a measure of performance consistency. The higher this number the + /// worse is jank when it happens. Smaller is better, with 1.0 being the + /// perfect score. If [average] is zero, this value defaults to 1.0. + double get outlierRatio => average > 0.0 + ? outlierAverage / average + : 1.0; // this can only happen in perfect benchmark that reports only zeros + + @override + String toString() { + final StringBuffer buffer = StringBuffer(); + buffer.writeln( + '$name: (samples: $cleanSampleCount clean/$outlierSampleCount outliers/' + '${cleanSampleCount + outlierSampleCount} measured/' + '${samples.length} total)'); + buffer.writeln(' | average: $average μs'); + buffer.writeln(' | outlier average: $outlierAverage μs'); + buffer.writeln(' | outlier/clean ratio: ${outlierRatio}x'); + buffer.writeln(' | noise: ${_ratioToPercent(noise)}'); + return buffer.toString(); + } +} + +/// Annotates a single measurement with statistical information. +@sealed +class AnnotatedSample { + /// Creates an annotated measurement sample. + const AnnotatedSample({ + @required this.magnitude, + @required this.isOutlier, + @required this.isWarmUpValue, + }); + + /// The non-negative raw result of the measurement. + final double magnitude; + + /// Whether this sample was considered an outlier. + final bool isOutlier; + + /// Whether this sample was taken during the warm-up phase. + /// + /// If this value is `true`, this sample does not participate in + /// statistical computations. However, the sample would still be + /// shown in the visualization of results so that the benchmark + /// can be inspected manually to make sure there's a predictable + /// warm-up regression slope. + final bool isWarmUpValue; +} + +/// Base class for a profile collected from running a benchmark. +class Profile { + /// Creates an empty profile. + /// + /// [name] and [useCustomWarmUp] must not be null. + Profile({@required this.name, this.useCustomWarmUp = false}) + : assert(name != null), + _isWarmingUp = useCustomWarmUp; + + /// The name of the benchmark that produced this profile. + final String name; + + /// Whether to delimit warm-up frames in a custom way. + final bool useCustomWarmUp; + + /// Whether we are measuring warm-up frames currently. + bool get isWarmingUp => _isWarmingUp; + + bool _isWarmingUp; + + /// Stop the warm-up phase. + /// + /// Call this method only when [useCustomWarmUp] and [isWarmingUp] are both + /// true. + /// Call this method only once for each profile. + void stopWarmingUp() { + if (!useCustomWarmUp) { + throw Exception( + '`stopWarmingUp` should be used only when `useCustomWarmUp` is true.'); + } else if (!_isWarmingUp) { + throw Exception('Warm-up already stopped.'); + } else { + _isWarmingUp = false; + } + } + + /// This data will be used to display cards in the Flutter Dashboard. + final Map scoreData = {}; + + /// This data isn't displayed anywhere. It's stored for completeness purposes. + final Map extraData = {}; + + /// Invokes [callback] and records the duration of its execution under [key]. + Duration record(String key, VoidCallback callback, + {@required bool reported}) { + final Duration duration = timeAction(callback); + addDataPoint(key, duration, reported: reported); + return duration; + } + + /// Adds a timed sample to the timeseries corresponding to [key]. + /// + /// Set [reported] to `true` to report the timeseries to the dashboard UI. + /// + /// Set [reported] to `false` to store the data, but not show it on the + /// dashboard UI. + void addDataPoint(String key, Duration duration, {@required bool reported}) { + scoreData + .putIfAbsent( + key, + () => Timeseries(key, reported, useCustomWarmUp: useCustomWarmUp), + ) + .add(duration.inMicroseconds.toDouble(), isWarmUpValue: isWarmingUp); + } + + /// Decides whether the data collected so far is sufficient to stop, or + /// whether the benchmark should continue collecting more data. + /// + /// The signals used are sample size, noise, and duration. + /// + /// If any of the timeseries doesn't satisfy the noise requirements, this + /// method will return true (asking the benchmark to continue collecting + /// data). + bool shouldContinue() { + // If there are no `Timeseries` in the `scoreData`, then we haven't + // recorded anything yet. Don't stop. + if (scoreData.isEmpty) { + return true; + } + + // We have recorded something, but do we have enough samples? If every + // timeseries has collected enough samples, stop the benchmark. + return !scoreData.keys + .every((String key) => scoreData[key].count >= kTotalSampleCount); + } + + /// Returns a JSON representation of the profile that will be sent to the + /// server. + Map toJson() { + final List scoreKeys = []; + final Map json = { + 'name': name, + 'scoreKeys': scoreKeys, + }; + + for (final String key in scoreData.keys) { + final Timeseries timeseries = scoreData[key]; + + if (timeseries.isReported) { + scoreKeys.add('$key.average'); + // Report `outlierRatio` rather than `outlierAverage`, because + // the absolute value of outliers is less interesting than the + // ratio. + scoreKeys.add('$key.outlierRatio'); + } + + final TimeseriesStats stats = timeseries.computeStats(); + json['$key.average'] = stats.average; + json['$key.outlierAverage'] = stats.outlierAverage; + json['$key.outlierRatio'] = stats.outlierRatio; + json['$key.noise'] = stats.noise; + } + + json.addAll(extraData); + + return json; + } + + @override + String toString() { + final StringBuffer buffer = StringBuffer(); + buffer.writeln('name: $name'); + for (final String key in scoreData.keys) { + final Timeseries timeseries = scoreData[key]; + final TimeseriesStats stats = timeseries.computeStats(); + buffer.writeln(stats.toString()); + } + for (final String key in extraData.keys) { + final dynamic value = extraData[key]; + if (value is List) { + buffer.writeln('$key:'); + for (final dynamic item in value) { + buffer.writeln(' - $item'); + } + } else { + buffer.writeln('$key: $value'); + } + } + return buffer.toString(); + } +} + +/// Computes the arithmetic mean (or average) of given [values]. +double _computeAverage(String label, Iterable values) { + if (values.isEmpty) { + throw StateError( + '$label: attempted to compute an average of an empty value list.'); + } + + final double sum = values.reduce((double a, double b) => a + b); + return sum / values.length; +} + +/// Computes population standard deviation. +/// +/// Unlike sample standard deviation, which divides by N - 1, this divides by N. +/// +/// See also: +/// +/// * https://en.wikipedia.org/wiki/Standard_deviation +double _computeStandardDeviationForPopulation( + String label, Iterable population) { + if (population.isEmpty) { + throw StateError( + '$label: attempted to compute the standard deviation of empty population.'); + } + final double mean = _computeAverage(label, population); + final double sumOfSquaredDeltas = population.fold( + 0.0, + (double previous, double value) => previous += math.pow(value - mean, 2), + ); + return math.sqrt(sumOfSquaredDeltas / population.length); +} + +String _ratioToPercent(double value) { + return '${(value * 100).toStringAsFixed(2)}%'; +} + +/// Implemented by recorders that use [_RecordingWidgetsBinding] to receive +/// frame life-cycle calls. +abstract class FrameRecorder { + /// Add a callback that will be called by the recorder when it stops recording. + void registerDidStop(VoidCallback cb); + + /// Called just before calling [SchedulerBinding.handleDrawFrame]. + void frameWillDraw(); + + /// Called immediately after calling [SchedulerBinding.handleDrawFrame]. + void frameDidDraw(); + + /// Reports an error. + /// + /// The implementation is expected to halt benchmark execution as soon as possible. + void _onError(dynamic error, StackTrace stackTrace); +} + +/// A variant of [WidgetsBinding] that collaborates with a [Recorder] to decide +/// when to stop pumping frames. +/// +/// A normal [WidgetsBinding] typically always pumps frames whenever a widget +/// instructs it to do so by calling [scheduleFrame] (transitively via +/// `setState`). This binding will stop pumping new frames as soon as benchmark +/// parameters are satisfactory (e.g. when the metric noise levels become low +/// enough). +class _RecordingWidgetsBinding extends BindingBase + with + GestureBinding, + SchedulerBinding, + ServicesBinding, + PaintingBinding, + SemanticsBinding, + RendererBinding, + WidgetsBinding { + /// Makes an instance of [_RecordingWidgetsBinding] the current binding. + static _RecordingWidgetsBinding ensureInitialized() { + if (WidgetsBinding.instance == null) { + _RecordingWidgetsBinding(); + } + return WidgetsBinding.instance; + } + + FrameRecorder _recorder; + bool _hasErrored = false; + + /// To short-circuit all frame lifecycle methods when the benchmark has + /// stopped collecting data. + bool _benchmarkStopped = false; + + void _beginRecording(FrameRecorder recorder, Widget widget) { + if (_recorder != null) { + throw Exception( + 'Cannot call _RecordingWidgetsBinding._beginRecording more than once', + ); + } + final FlutterExceptionHandler originalOnError = FlutterError.onError; + + recorder.registerDidStop(() { + _benchmarkStopped = true; + }); + + // Fail hard and fast on errors. Benchmarks should not have any errors. + FlutterError.onError = (FlutterErrorDetails details) { + _haltBenchmarkWithError(details.exception, details.stack); + originalOnError(details); + }; + _recorder = recorder; + runApp(widget); + } + + void _haltBenchmarkWithError(dynamic error, StackTrace stackTrace) { + if (_hasErrored) { + return; + } + _recorder._onError(error, stackTrace); + _hasErrored = true; + } + + @override + void handleBeginFrame(Duration rawTimeStamp) { + // Don't keep on truckin' if there's an error or the benchmark has stopped. + if (_hasErrored || _benchmarkStopped) { + return; + } + try { + super.handleBeginFrame(rawTimeStamp); + } catch (error, stackTrace) { + _haltBenchmarkWithError(error, stackTrace); + rethrow; + } + } + + @override + void scheduleFrame() { + // Don't keep on truckin' if there's an error or the benchmark has stopped. + if (_hasErrored || _benchmarkStopped) { + return; + } + super.scheduleFrame(); + } + + @override + void handleDrawFrame() { + // Don't keep on truckin' if there's an error or the benchmark has stopped. + if (_hasErrored || _benchmarkStopped) { + return; + } + try { + _recorder.frameWillDraw(); + super.handleDrawFrame(); + _recorder.frameDidDraw(); + } catch (error, stackTrace) { + _haltBenchmarkWithError(error, stackTrace); + rethrow; + } + } +} + +int _currentFrameNumber = 1; + +/// If [_calledStartMeasureFrame] is true, we have called [startMeasureFrame] +/// but have not its pairing [endMeasureFrame] yet. +/// +/// This flag ensures that [startMeasureFrame] and [endMeasureFrame] are always +/// called in pairs, with [startMeasureFrame] followed by [endMeasureFrame]. +bool _calledStartMeasureFrame = false; + +/// Whether we are recording a measured frame. +/// +/// This flag ensures that we always stop measuring a frame if we +/// have started one. Because we want to skip warm-up frames, this flag +/// is necessary. +bool _isMeasuringFrame = false; + +/// Adds a marker indication the beginning of frame rendering. +/// +/// This adds an event to the performance trace used to find measured frames in +/// Chrome tracing data. The tracing data contains all frames, but some +/// benchmarks are only interested in a subset of frames. For example, +/// [WidgetBuildRecorder] only measures frames that build widgets, and ignores +/// frames that clear the screen. +/// +/// Warm-up frames are not measured. If [profile.isWarmingUp] is true, +/// this function does nothing. +void startMeasureFrame(Profile profile) { + if (_calledStartMeasureFrame) { + throw Exception('`startMeasureFrame` called twice in a row.'); + } + + _calledStartMeasureFrame = true; + + if (!profile.isWarmingUp) { + // Tell the browser to mark the beginning of the frame. + html.window.performance.mark('measured_frame_start#$_currentFrameNumber'); + + _isMeasuringFrame = true; + } +} + +/// Signals the end of a measured frame. +/// +/// See [startMeasureFrame] for details on what this instrumentation is used +/// for. +/// +/// Warm-up frames are not measured. If [profile.isWarmingUp] was true +/// when the corresponding [startMeasureFrame] was called, +/// this function does nothing. +void endMeasureFrame() { + if (!_calledStartMeasureFrame) { + throw Exception( + '`startMeasureFrame` has not been called before calling `endMeasureFrame`'); + } + + _calledStartMeasureFrame = false; + + if (_isMeasuringFrame) { + // Tell the browser to mark the end of the frame, and measure the duration. + html.window.performance.mark('measured_frame_end#$_currentFrameNumber'); + html.window.performance.measure( + 'measured_frame', + 'measured_frame_start#$_currentFrameNumber', + 'measured_frame_end#$_currentFrameNumber', + ); + + // Increment the current frame number. + _currentFrameNumber += 1; + + _isMeasuringFrame = false; + } +} + +/// A function that receives a benchmark value from the framework. +typedef EngineBenchmarkValueListener = void Function(num value); + +// Maps from a value label name to a listener. +final Map _engineBenchmarkListeners = + {}; + +/// Registers a [listener] for engine benchmark values labeled by [name]. +/// +/// If another listener is already registered, overrides it. +void registerEngineBenchmarkValueListener( + String name, EngineBenchmarkValueListener listener) { + if (listener == null) { + throw ArgumentError( + 'Listener must not be null. To stop listening to engine benchmark values ' + 'under label "$name", call stopListeningToEngineBenchmarkValues(\'$name\').', + ); + } + + if (_engineBenchmarkListeners.containsKey(name)) { + throw StateError('A listener for "$name" is already registered.\n' + 'Call `stopListeningToEngineBenchmarkValues` to unregister the previous ' + 'listener before registering a new one.'); + } + + if (_engineBenchmarkListeners.isEmpty) { + // The first listener is being registered. Register the global listener. + js_util.setProperty(html.window, '_flutter_internal_on_benchmark', + _dispatchEngineBenchmarkValue); + } + + _engineBenchmarkListeners[name] = listener; +} + +/// Stops listening to engine benchmark values under labeled by [name]. +void stopListeningToEngineBenchmarkValues(String name) { + _engineBenchmarkListeners.remove(name); + if (_engineBenchmarkListeners.isEmpty) { + // The last listener unregistered. Remove the global listener. + js_util.setProperty(html.window, '_flutter_internal_on_benchmark', null); + } +} + +// Dispatches a benchmark value reported by the engine to the relevant listener. +// +// If there are no listeners registered for [name], ignores the value. +void _dispatchEngineBenchmarkValue(String name, double value) { + final EngineBenchmarkValueListener listener = _engineBenchmarkListeners[name]; + if (listener != null) { + listener(value); + } +} diff --git a/packages/web_benchmarks/lib/src/runner.dart b/packages/web_benchmarks/lib/src/runner.dart new file mode 100644 index 00000000000..beaa1e1da5a --- /dev/null +++ b/packages/web_benchmarks/lib/src/runner.dart @@ -0,0 +1,305 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:async'; +import 'dart:convert' show json; +import 'dart:io' as io; + +import 'package:logging/logging.dart'; +import 'package:meta/meta.dart'; +import 'package:path/path.dart' as path; +import 'package:process/process.dart'; +import 'package:shelf/shelf.dart'; +import 'package:shelf/shelf_io.dart' as shelf_io; +import 'package:shelf_static/shelf_static.dart'; + +import 'benchmark_result.dart'; +import 'browser.dart'; + +/// The default port number used by the local benchmark server. +const int defaultBenchmarkServerPort = 9999; + +/// The default port number used for Chrome DevTool Protocol. +const int defaultChromeDebugPort = 10000; + +/// Builds and serves a Flutter Web app, collects raw benchmark data and +/// summarizes the result as a [BenchmarkResult]. +class BenchmarkServer { + /// Creates a benchmark server. + /// + /// [benchmarkAppDirectory] is the directory containing the app that's being + /// benchmarked. The app is expected to use `package:web_benchmarks/client.dart` + /// and call the `runBenchmarks` function to run the benchmarks. + /// + /// [entryPoint] is the path to the main app file that runs the benchmark. It + /// can be different (and typically is) from the production entry point of the + /// app. + /// + /// If [useCanvasKit] is true, builds the app in CanvasKit mode. + /// + /// [benchmarkServerPort] is the port this benchmark server serves the app on. + /// + /// [chromeDebugPort] is the port Chrome uses for DevTool Protocol used to + /// extract tracing data. + /// + /// If [headless] is true, runs Chrome without UI. In particular, this is + /// useful in environments (e.g. CI) that doesn't have a display. + BenchmarkServer({ + @required this.benchmarkAppDirectory, + @required this.entryPoint, + @required this.useCanvasKit, + @required this.benchmarkServerPort, + @required this.chromeDebugPort, + @required this.headless, + }); + + final ProcessManager _processManager = const LocalProcessManager(); + + /// The directory containing the app that's being benchmarked. + /// + /// The app is expected to use `package:web_benchmarks/client.dart` + /// and call the `runBenchmarks` function to run the benchmarks. + final io.Directory benchmarkAppDirectory; + + /// The path to the main app file that runs the benchmark. + /// + /// It can be different (and typically is) from the production entry point of + /// the app. + final String entryPoint; + + /// Whether to build the app in CanvasKit mode. + final bool useCanvasKit; + + /// The port this benchmark server serves the app on. + final int benchmarkServerPort; + + /// The port Chrome uses for DevTool Protocol used to extract tracing data. + final int chromeDebugPort; + + /// Whether to run Chrome without UI. + /// + /// This is useful in environments (e.g. CI) that doesn't have a display. + final bool headless; + + /// Builds and serves the benchmark app, and collects benchmark results. + Future run() async { + // Reduce logging level. Otherwise, package:webkit_inspection_protocol is way too spammy. + Logger.root.level = Level.INFO; + + if (!_processManager.canRun('flutter')) { + throw Exception( + 'flutter executable is not runnable. Make sure it\'s in the PATH.'); + } + + final io.ProcessResult buildResult = await _processManager.run( + [ + 'flutter', + 'build', + 'web', + '--dart-define=FLUTTER_WEB_ENABLE_PROFILING=true', + if (useCanvasKit) '--dart-define=FLUTTER_WEB_USE_SKIA=true', + '--profile', + '-t', + entryPoint, + ], + workingDirectory: benchmarkAppDirectory.path, + ); + + if (buildResult.exitCode != 0) { + io.stderr.writeln(buildResult.stdout); + io.stderr.writeln(buildResult.stderr); + throw Exception('Failed to build the benchmark.'); + } + + final Completer>> profileData = + Completer>>(); + final List> collectedProfiles = + >[]; + List benchmarks; + Iterator benchmarkIterator; + + // This future fixes a race condition between the web-page loading and + // asking to run a benchmark, and us connecting to Chrome's DevTools port. + // Sometime one wins. Other times, the other wins. + Future whenChromeIsReady; + Chrome chrome; + io.HttpServer server; + Cascade cascade = Cascade(); + List> latestPerformanceTrace; + cascade = cascade.add((Request request) async { + try { + chrome ??= await whenChromeIsReady; + if (request.requestedUri.path.endsWith('/profile-data')) { + final Map profile = + json.decode(await request.readAsString()); + final String benchmarkName = profile['name']; + if (benchmarkName != benchmarkIterator.current) { + profileData.completeError(Exception( + 'Browser returned benchmark results from a wrong benchmark.\n' + 'Requested to run bechmark ${benchmarkIterator.current}, but ' + 'got results for $benchmarkName.', + )); + server.close(); + } + + // Trace data is null when the benchmark is not frame-based, such as RawRecorder. + if (latestPerformanceTrace != null) { + final BlinkTraceSummary traceSummary = + BlinkTraceSummary.fromJson(latestPerformanceTrace); + profile['totalUiFrame.average'] = + traceSummary.averageTotalUIFrameTime.inMicroseconds; + profile['scoreKeys'] ??= + []; // using dynamic for consistency with JSON + profile['scoreKeys'].add('totalUiFrame.average'); + latestPerformanceTrace = null; + } + collectedProfiles.add(profile); + return Response.ok('Profile received'); + } else if (request.requestedUri.path + .endsWith('/start-performance-tracing')) { + latestPerformanceTrace = null; + await chrome.beginRecordingPerformance( + request.requestedUri.queryParameters['label']); + return Response.ok('Started performance tracing'); + } else if (request.requestedUri.path + .endsWith('/stop-performance-tracing')) { + latestPerformanceTrace = await chrome.endRecordingPerformance(); + return Response.ok('Stopped performance tracing'); + } else if (request.requestedUri.path.endsWith('/on-error')) { + final Map errorDetails = + json.decode(await request.readAsString()); + server.close(); + // Keep the stack trace as a string. It's thrown in the browser, not this Dart VM. + final String errorMessage = + 'Caught browser-side error: ${errorDetails['error']}\n${errorDetails['stackTrace']}'; + if (!profileData.isCompleted) { + profileData.completeError(errorMessage); + } else { + io.stderr.writeln(errorMessage); + } + return Response.ok(''); + } else if (request.requestedUri.path.endsWith('/next-benchmark')) { + if (benchmarks == null) { + benchmarks = + (json.decode(await request.readAsString())).cast(); + benchmarkIterator = benchmarks.iterator; + } + if (benchmarkIterator.moveNext()) { + final String nextBenchmark = benchmarkIterator.current; + print('Launching benchmark "$nextBenchmark"'); + return Response.ok(nextBenchmark); + } else { + profileData.complete(collectedProfiles); + return Response.notFound('Finished running benchmarks.'); + } + } else if (request.requestedUri.path.endsWith('/print-to-console')) { + // A passthrough used by + // `dev/benchmarks/macrobenchmarks/lib/web_benchmarks.dart` + // to print information. + final String message = await request.readAsString(); + print('[Gallery] $message'); + return Response.ok('Reported.'); + } else { + io.stderr + .writeln('Unrecognized URL path: ${request.requestedUri.path}'); + return Response.notFound( + 'This request is not handled by the profile-data handler.'); + } + } catch (error, stackTrace) { + if (!profileData.isCompleted) { + profileData.completeError(error, stackTrace); + } else { + io.stderr.writeln('Caught error: $error'); + io.stderr.writeln('$stackTrace'); + } + return Response.internalServerError(body: '$error'); + } + }).add(createStaticHandler( + path.join(benchmarkAppDirectory.path, 'build', 'web'), + )); + + server = await io.HttpServer.bind('localhost', benchmarkServerPort); + try { + shelf_io.serveRequests(server, cascade.handler); + + final String dartToolDirectory = + path.join(benchmarkAppDirectory.path, '.dart_tool'); + final String userDataDir = io.Directory(dartToolDirectory) + .createTempSync('chrome_user_data_') + .path; + + final ChromeOptions options = ChromeOptions( + url: 'http://localhost:$benchmarkServerPort/index.html', + userDataDirectory: userDataDir, + windowHeight: 1024, + windowWidth: 1024, + headless: headless, + debugPort: chromeDebugPort, + ); + + print('Launching Chrome.'); + whenChromeIsReady = Chrome.launch( + options, + onError: (String error) { + if (!profileData.isCompleted) { + profileData.completeError(Exception(error)); + } else { + io.stderr.writeln('Chrome error: $error'); + } + }, + workingDirectory: benchmarkAppDirectory.path, + ); + + print('Waiting for the benchmark to report benchmark profile.'); + final List> profiles = await profileData.future; + + print('Received profile data'); + final Map> results = + >{}; + for (final Map profile in profiles) { + final String benchmarkName = profile['name']; + if (benchmarkName.isEmpty) { + throw 'Benchmark name is empty'; + } + + final List scoreKeys = List.from(profile['scoreKeys']); + if (scoreKeys == null || scoreKeys.isEmpty) { + throw 'No score keys in benchmark "$benchmarkName"'; + } + for (final String scoreKey in scoreKeys) { + if (scoreKey == null || scoreKey.isEmpty) { + throw 'Score key is empty in benchmark "$benchmarkName". ' + 'Received [${scoreKeys.join(', ')}]'; + } + } + + final List scores = []; + for (final String key in profile.keys) { + if (key == 'name' || key == 'scoreKeys') { + continue; + } + scores.add(BenchmarkScore( + metric: key, + value: profile[key], + )); + } + results[benchmarkName] = scores; + } + return BenchmarkResults(results); + } finally { + if (headless) { + chrome?.stop(); + } else { + // In non-headless mode wait for the developer to close Chrome + // manually. Otherwise, they won't get a chance to debug anything. + print( + 'Benchmark finished. Chrome running in windowed mode. Close ' + 'Chrome manually to continue.', + ); + await chrome?.whenExits; + } + server?.close(); + } + } +} diff --git a/packages/web_benchmarks/pubspec.yaml b/packages/web_benchmarks/pubspec.yaml new file mode 100644 index 00000000000..fa98baba081 --- /dev/null +++ b/packages/web_benchmarks/pubspec.yaml @@ -0,0 +1,23 @@ +name: web_benchmarks +description: A benchmark harness for performance-testing Flutter apps in Chrome. +version: 0.0.1 +homepage: https://github.com/flutter/packages/tree/master/packages/web_benchmarks + +environment: + sdk: ">=2.7.0 <3.0.0" + flutter: ">=1.17.0 <2.0.0" + +# Using +2 upper limit on some packages to allow null-safe versions +dependencies: + flutter: + sdk: flutter + flutter_test: + sdk: flutter + logging: ">=0.11.4 <1.0.0" + meta: ">=1.0.0 <2.0.0" + path: ">=1.7.0 <2.0.0" + process: ">=3.0.13 <5.0.0" + shelf: ">=0.7.5 <1.0.0" + shelf_static: ">=0.2.8 <1.0.0" + test: ">=1.15.0 <3.0.0" + webkit_inspection_protocol: ">=0.7.3 <1.0.0" diff --git a/packages/web_benchmarks/testing/test_app/.gitignore b/packages/web_benchmarks/testing/test_app/.gitignore new file mode 100644 index 00000000000..9d532b18a01 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/.gitignore @@ -0,0 +1,41 @@ +# Miscellaneous +*.class +*.log +*.pyc +*.swp +.DS_Store +.atom/ +.buildlog/ +.history +.svn/ + +# IntelliJ related +*.iml +*.ipr +*.iws +.idea/ + +# The .vscode folder contains launch configuration and tasks you configure in +# VS Code which you may wish to be included in version control, so this line +# is commented out by default. +#.vscode/ + +# Flutter/Dart/Pub related +**/doc/api/ +**/ios/Flutter/.last_build_id +.dart_tool/ +.flutter-plugins +.flutter-plugins-dependencies +.packages +.pub-cache/ +.pub/ +/build/ + +# Web related +lib/generated_plugin_registrant.dart + +# Symbolication related +app.*.symbols + +# Obfuscation related +app.*.map.json diff --git a/packages/web_benchmarks/testing/test_app/.metadata b/packages/web_benchmarks/testing/test_app/.metadata new file mode 100644 index 00000000000..5e875f26787 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/.metadata @@ -0,0 +1,10 @@ +# This file tracks properties of this Flutter project. +# Used by Flutter tool to assess capabilities and perform upgrades etc. +# +# This file should be version controlled and should not be manually edited. + +version: + revision: d26268bb9e6d713a73d6148da7fa75936d442741 + channel: master + +project_type: app diff --git a/packages/web_benchmarks/testing/test_app/README.md b/packages/web_benchmarks/testing/test_app/README.md new file mode 100644 index 00000000000..c3bb8e47809 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/README.md @@ -0,0 +1,3 @@ +# test_app + +An example app for web benchmarks testing. diff --git a/packages/web_benchmarks/testing/test_app/analysis_options.yaml b/packages/web_benchmarks/testing/test_app/analysis_options.yaml new file mode 100644 index 00000000000..2597fd11794 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/analysis_options.yaml @@ -0,0 +1,7 @@ +include: ../../../../analysis_options.yaml + +linter: + rules: + # This is test code. Do not enforce docs. + package_api_docs: false + public_member_api_docs: false diff --git a/packages/web_benchmarks/testing/test_app/lib/aboutpage.dart b/packages/web_benchmarks/testing/test_app/lib/aboutpage.dart new file mode 100644 index 00000000000..337f7025369 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/lib/aboutpage.dart @@ -0,0 +1,27 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'package:flutter/material.dart'; + +const ValueKey backKey = ValueKey('backKey'); + +class AboutPage extends StatelessWidget { + @override + Widget build(BuildContext context) { + return Scaffold( + appBar: AppBar( + leading: BackButton( + key: backKey, + onPressed: () => Navigator.of(context).pop(), + ), + ), + body: Center( + child: Text( + 'This is a sample app.', + style: Theme.of(context).textTheme.headline3, + ), + ), + ); + } +} diff --git a/packages/web_benchmarks/testing/test_app/lib/benchmarks/runner.dart b/packages/web_benchmarks/testing/test_app/lib/benchmarks/runner.dart new file mode 100644 index 00000000000..b35bfa9cbb3 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/lib/benchmarks/runner.dart @@ -0,0 +1,99 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'package:flutter/material.dart'; +import 'package:flutter_test/flutter_test.dart'; +import 'package:web_benchmarks/client.dart'; + +import '../aboutpage.dart' show backKey; +import '../homepage.dart' show textKey, aboutPageKey; +import '../main.dart'; + +/// A recorder that measures frame building durations. +abstract class AppRecorder extends WidgetRecorder { + AppRecorder({@required this.benchmarkName}) : super(name: benchmarkName); + + final String benchmarkName; + + Future automate(); + + @override + Widget createWidget() { + Future.delayed(const Duration(milliseconds: 400), automate); + return MyApp(); + } + + Future animationStops() async { + while (WidgetsBinding.instance.hasScheduledFrame) { + await Future.delayed(const Duration(milliseconds: 200)); + } + } +} + +class ScrollRecorder extends AppRecorder { + ScrollRecorder() : super(benchmarkName: 'scroll'); + + @override + Future automate() async { + final ScrollableState scrollable = + Scrollable.of(find.byKey(textKey).evaluate().single); + await scrollable.position.animateTo( + 30000, + curve: Curves.linear, + duration: const Duration(seconds: 20), + ); + } +} + +class PageRecorder extends AppRecorder { + PageRecorder() : super(benchmarkName: 'page'); + + bool _completed = false; + + @override + bool shouldContinue() => profile.shouldContinue() || !_completed; + + @override + Future automate() async { + final LiveWidgetController controller = + LiveWidgetController(WidgetsBinding.instance); + for (int i = 0; i < 10; ++i) { + print('Testing round $i...'); + await controller.tap(find.byKey(aboutPageKey)); + await animationStops(); + await controller.tap(find.byKey(backKey)); + await animationStops(); + } + _completed = true; + } +} + +class TapRecorder extends AppRecorder { + TapRecorder() : super(benchmarkName: 'tap'); + + bool _completed = false; + + @override + bool shouldContinue() => profile.shouldContinue() || !_completed; + + @override + Future automate() async { + final LiveWidgetController controller = + LiveWidgetController(WidgetsBinding.instance); + for (int i = 0; i < 10; ++i) { + print('Testing round $i...'); + await controller.tap(find.byIcon(Icons.add)); + await animationStops(); + } + _completed = true; + } +} + +Future main() async { + await runBenchmarks({ + 'scroll': () => ScrollRecorder(), + 'page': () => PageRecorder(), + 'tap': () => TapRecorder(), + }); +} diff --git a/packages/web_benchmarks/testing/test_app/lib/homepage.dart b/packages/web_benchmarks/testing/test_app/lib/homepage.dart new file mode 100644 index 00000000000..7c7ac211246 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/lib/homepage.dart @@ -0,0 +1,91 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'package:flutter/material.dart'; + +const ValueKey textKey = ValueKey('textKey'); +const ValueKey aboutPageKey = ValueKey('aboutPageKey'); + +class HomePage extends StatefulWidget { + const HomePage({Key key, this.title}) : super(key: key); + + final String title; + + @override + _HomePageState createState() => _HomePageState(); +} + +class _HomePageState extends State { + int _counter = 0; + + void _incrementCounter() { + setState(() { + _counter++; + }); + } + + @override + Widget build(BuildContext context) { + return Scaffold( + appBar: AppBar( + title: Text(widget.title), + actions: [ + IconButton( + key: aboutPageKey, + icon: const Icon(Icons.alternate_email), + onPressed: () => Navigator.of(context).pushNamed('about'), + ), + ], + ), + body: Center( + child: ListView.builder( + itemExtent: 80, + itemBuilder: (BuildContext context, int index) { + if (index == 0) { + return Column( + key: textKey, + mainAxisAlignment: MainAxisAlignment.center, + children: [ + const Text('You have pushed the button this many times:'), + Text( + '$_counter', + style: Theme.of(context).textTheme.headline4, + ), + ], + ); + } else { + return SizedBox( + height: 80, + child: Padding( + padding: const EdgeInsets.all(12), + child: Card( + elevation: 8, + child: Row( + mainAxisAlignment: MainAxisAlignment.center, + crossAxisAlignment: CrossAxisAlignment.center, + children: [ + Text( + 'Line $index', + style: Theme.of(context).textTheme.headline5, + ), + Expanded(child: Container()), + const Icon(Icons.camera), + const Icon(Icons.face), + ], + ), + ), + ), + ); + } + }, + ), + ), + floatingActionButton: FloatingActionButton( + onPressed: _incrementCounter, + tooltip: 'Increment', + child: const Icon(Icons.add), + ), + ); + } +} diff --git a/packages/web_benchmarks/testing/test_app/lib/main.dart b/packages/web_benchmarks/testing/test_app/lib/main.dart new file mode 100644 index 00000000000..c25696bcb63 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/lib/main.dart @@ -0,0 +1,30 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'package:flutter/material.dart'; + +import 'aboutpage.dart'; +import 'homepage.dart'; + +void main() { + runApp(MyApp()); +} + +class MyApp extends StatelessWidget { + @override + Widget build(BuildContext context) { + return MaterialApp( + title: 'Flutter Demo', + theme: ThemeData( + primarySwatch: Colors.blue, + visualDensity: VisualDensity.adaptivePlatformDensity, + ), + initialRoute: 'home', + routes: { + 'home': (_) => const HomePage(title: 'Flutter Demo Home Page'), + 'about': (_) => AboutPage(), + }, + ); + } +} diff --git a/packages/web_benchmarks/testing/test_app/pubspec.yaml b/packages/web_benchmarks/testing/test_app/pubspec.yaml new file mode 100644 index 00000000000..4d9a9f63878 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/pubspec.yaml @@ -0,0 +1,23 @@ +name: test_app +description: An example app for web benchmarks testing. + +publish_to: 'none' + +version: 1.0.0+1 + +environment: + sdk: ">=2.7.0 <3.0.0" + +dependencies: + cupertino_icons: ^0.1.3 + flutter: + sdk: flutter + web_benchmarks: + path: ../../ + +dev_dependencies: + flutter_test: + sdk: flutter + +flutter: + uses-material-design: true diff --git a/packages/web_benchmarks/testing/test_app/web/favicon.png b/packages/web_benchmarks/testing/test_app/web/favicon.png new file mode 100644 index 00000000000..8aaa46ac1ae Binary files /dev/null and b/packages/web_benchmarks/testing/test_app/web/favicon.png differ diff --git a/packages/web_benchmarks/testing/test_app/web/icons/Icon-192.png b/packages/web_benchmarks/testing/test_app/web/icons/Icon-192.png new file mode 100644 index 00000000000..b749bfef074 Binary files /dev/null and b/packages/web_benchmarks/testing/test_app/web/icons/Icon-192.png differ diff --git a/packages/web_benchmarks/testing/test_app/web/icons/Icon-512.png b/packages/web_benchmarks/testing/test_app/web/icons/Icon-512.png new file mode 100644 index 00000000000..88cfd48dff1 Binary files /dev/null and b/packages/web_benchmarks/testing/test_app/web/icons/Icon-512.png differ diff --git a/packages/web_benchmarks/testing/test_app/web/index.html b/packages/web_benchmarks/testing/test_app/web/index.html new file mode 100644 index 00000000000..c52c3523781 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/web/index.html @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + test_app + + + + + + + + diff --git a/packages/web_benchmarks/testing/test_app/web/manifest.json b/packages/web_benchmarks/testing/test_app/web/manifest.json new file mode 100644 index 00000000000..13a23690776 --- /dev/null +++ b/packages/web_benchmarks/testing/test_app/web/manifest.json @@ -0,0 +1,23 @@ +{ + "name": "test_app", + "short_name": "test_app", + "start_url": ".", + "display": "standalone", + "background_color": "#0175C2", + "theme_color": "#0175C2", + "description": "A new Flutter project.", + "orientation": "portrait-primary", + "prefer_related_applications": false, + "icons": [ + { + "src": "icons/Icon-192.png", + "sizes": "192x192", + "type": "image/png" + }, + { + "src": "icons/Icon-512.png", + "sizes": "512x512", + "type": "image/png" + } + ] +} diff --git a/packages/web_benchmarks/testing/web_benchmarks_test.dart b/packages/web_benchmarks/testing/web_benchmarks_test.dart new file mode 100644 index 00000000000..2b881211e0a --- /dev/null +++ b/packages/web_benchmarks/testing/web_benchmarks_test.dart @@ -0,0 +1,51 @@ +// Copyright 2014 The Flutter Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import 'dart:convert' show JsonEncoder; +import 'dart:io'; + +import 'package:test/test.dart'; + +import 'package:web_benchmarks/server.dart'; + +Future main() async { + test('Can run a web benchmark', () async { + final BenchmarkResults taskResult = await serveWebBenchmark( + benchmarkAppDirectory: Directory('testing/test_app'), + entryPoint: 'lib/benchmarks/runner.dart', + useCanvasKit: false, + ); + + for (final String benchmarkName in ['scroll', 'page', 'tap']) { + for (final String metricName in [ + 'preroll_frame', + 'apply_frame', + 'drawFrameDuration', + ]) { + for (final String valueName in [ + 'average', + 'outlierAverage', + 'outlierRatio', + 'noise', + ]) { + expect( + taskResult.scores[benchmarkName].where((BenchmarkScore score) => + score.metric == '$metricName.$valueName'), + hasLength(1), + ); + } + } + expect( + taskResult.scores[benchmarkName].where( + (BenchmarkScore score) => score.metric == 'totalUiFrame.average'), + hasLength(1), + ); + } + + expect( + const JsonEncoder.withIndent(' ').convert(taskResult.toJson()), + isA(), + ); + }, timeout: Timeout.none); +} diff --git a/script/install_chromium.sh b/script/install_chromium.sh new file mode 100755 index 00000000000..0ad868219b8 --- /dev/null +++ b/script/install_chromium.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e +set -x + +# The build of Chromium used to test web functionality. +# +# Chromium builds can be located here: https://commondatastorage.googleapis.com/chromium-browser-snapshots/index.html?prefix=Linux_x64/ +CHROMIUM_BUILD=768968 + +mkdir .chromium +wget "https://www.googleapis.com/download/storage/v1/b/chromium-browser-snapshots/o/Linux_x64%2F${CHROMIUM_BUILD}%2Fchrome-linux.zip?alt=media" -O .chromium/chromium.zip +unzip .chromium/chromium.zip -d .chromium/ +export CHROME_EXECUTABLE=$(pwd)/.chromium/chrome-linux/chrome +echo $CHROME_EXECUTABLE +$CHROME_EXECUTABLE --version