|
1 | 1 | #!/usr/bin/env bash |
2 | 2 |
|
3 | | -# This file is sourced when running various Spark classes. |
| 3 | +# This file is sourced when running various Spark programs. |
4 | 4 | # Copy it as spark-env.sh and edit that to configure Spark for your site. |
5 | 5 |
|
6 | 6 | # Options read when launching programs locally with |
7 | | -# ./bin/spark-example or ./bin/spark-submit |
| 7 | +# ./bin/run-example or ./bin/spark-submit |
8 | 8 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node |
9 | 9 | # - SPARK_PUBLIC_DNS, to set the public dns name of the driver program |
10 | 10 | # - SPARK_CLASSPATH, default classpath entries to append |
|
13 | 13 | # - SPARK_LOCAL_IP, to set the IP address Spark binds to on this node |
14 | 14 | # - SPARK_PUBLIC_DNS, to set the public DNS name of the driver program |
15 | 15 | # - SPARK_CLASSPATH, default classpath entries to append |
16 | | -# - SPARK_LOCAL_DIRS, shuffle directories to use on this node |
| 16 | +# - SPARK_LOCAL_DIRS, storage directories to use on this node for shuffle and RDD data |
17 | 17 | # - MESOS_NATIVE_LIBRARY, to point to your libmesos.so if you use Mesos |
18 | 18 |
|
19 | 19 | # Options read in YARN client mode |
20 | 20 | # - SPARK_YARN_APP_JAR, Path to your application’s JAR file (required) |
21 | | -# - SPARK_WORKER_INSTANCES, Number of workers to start (Default: 2) |
22 | | -# - SPARK_WORKER_CORES, Number of cores for the workers (Default: 1). |
23 | | -# - SPARK_WORKER_MEMORY, Memory per Worker (e.g. 1000M, 2G) (Default: 1G) |
24 | | -# - SPARK_MASTER_MEMORY, Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb) |
| 21 | +# - SPARK_EXECUTOR_INSTANCES, Number of workers to start (Default: 2) |
| 22 | +# - SPARK_EXECUTOR_CORES, Number of cores for the workers (Default: 1). |
| 23 | +# - SPARK_EXECUTOR_MEMORY, Memory per Worker (e.g. 1000M, 2G) (Default: 1G) |
| 24 | +# - SPARK_DRIVER_MEMORY, Memory for Master (e.g. 1000M, 2G) (Default: 512 Mb) |
25 | 25 | # - SPARK_YARN_APP_NAME, The name of your application (Default: Spark) |
26 | 26 | # - SPARK_YARN_QUEUE, The hadoop queue to use for allocation requests (Default: ‘default’) |
27 | 27 | # - SPARK_YARN_DIST_FILES, Comma separated list of files to be distributed with the job. |
|
0 commit comments