Skip to content

Commit dcfcc47

Browse files
committed
Merge branch 'upmaster' into SPARK-36406
2 parents 25828f2 + 0b6af46 commit dcfcc47

File tree

548 files changed

+29536
-17996
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

548 files changed

+29536
-17996
lines changed

.github/workflows/build_and_test.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ jobs:
220220
SPARK_LOCAL_IP: localhost
221221
SKIP_UNIDOC: true
222222
SKIP_MIMA: true
223-
METASPACE_SIZE: 128m
223+
METASPACE_SIZE: 1g
224224
steps:
225225
- name: Checkout Spark repository
226226
uses: actions/checkout@v2
@@ -414,7 +414,7 @@ jobs:
414414
run: |
415415
apt-get install -y libcurl4-openssl-dev libgit2-dev libssl-dev libxml2-dev
416416
Rscript -e "install.packages(c('devtools'), repos='https://cloud.r-project.org/')"
417-
Rscript -e "devtools::install_github('jimhester/lintr@v2.0.1')"
417+
Rscript -e "devtools::install_version('lintr', version='2.0.1', repos='https://cloud.r-project.org')"
418418
./R/install-dev.sh
419419
- name: Instll JavaScript linter dependencies
420420
run: |

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,12 @@ Spark is a unified analytics engine for large-scale data processing. It provides
44
high-level APIs in Scala, Java, Python, and R, and an optimized engine that
55
supports general computation graphs for data analysis. It also supports a
66
rich set of higher-level tools including Spark SQL for SQL and DataFrames,
7-
MLlib for machine learning, GraphX for graph processing,
7+
pandas API on Spark for pandas workloads, MLlib for machine learning, GraphX for graph processing,
88
and Structured Streaming for stream processing.
99

1010
<https://spark.apache.org/>
1111

12-
[![GitHub Action Build](https://github.com/apache/spark/actions/workflows/build_and_test.yml/badge.svg?branch=master)](https://github.com/apache/spark/actions/workflows/build_and_test.yml?query=branch%3Amaster)
12+
[![GitHub Action Build](https://github.com/apache/spark/actions/workflows/build_and_test.yml/badge.svg?branch=master&event=push)](https://github.com/apache/spark/actions/workflows/build_and_test.yml?query=branch%3Amaster+event%3Apush)
1313
[![Jenkins Build](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-3.2/badge/icon)](https://amplab.cs.berkeley.edu/jenkins/job/spark-master-test-sbt-hadoop-3.2)
1414
[![AppVeyor Build](https://img.shields.io/appveyor/ci/ApacheSoftwareFoundation/spark/master.svg?style=plastic&logo=appveyor)](https://ci.appveyor.com/project/ApacheSoftwareFoundation/spark)
1515
[![PySpark Coverage](https://codecov.io/gh/apache/spark/branch/master/graph/badge.svg)](https://codecov.io/gh/apache/spark)

build/sbt-launch-lib.bash

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,11 @@ addDebugger () {
117117
# so they need not be dicked around with individually.
118118
get_mem_opts () {
119119
local mem=${1:-$sbt_default_mem}
120-
local codecache=128
120+
local codecache=$(( $mem / 8 ))
121+
(( $codecache > 128 )) || codecache=128
122+
(( $codecache < 2048 )) || codecache=2048
121123

122-
echo "-Xms256m -Xmx${mem}m -XX:ReservedCodeCacheSize=${codecache}m"
124+
echo "-Xms${mem}m -Xmx${mem}m -XX:ReservedCodeCacheSize=${codecache}m"
123125
}
124126

125127
require_arg () {

common/network-common/pom.xml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,10 @@
9191
<groupId>org.apache.commons</groupId>
9292
<artifactId>commons-crypto</artifactId>
9393
</dependency>
94+
<dependency>
95+
<groupId>com.google.crypto.tink</groupId>
96+
<artifactId>tink</artifactId>
97+
</dependency>
9498
<dependency>
9599
<groupId>org.roaringbitmap</groupId>
96100
<artifactId>RoaringBitmap</artifactId>

common/network-common/src/main/java/org/apache/spark/network/client/StreamCallbackWithID.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,16 @@
1717

1818
package org.apache.spark.network.client;
1919

20+
import java.nio.ByteBuffer;
21+
2022
public interface StreamCallbackWithID extends StreamCallback {
2123
String getID();
24+
25+
/**
26+
* Response to return to client upon the completion of a stream. Currently only invoked in
27+
* {@link org.apache.spark.network.server.TransportRequestHandler#processStreamUpload}
28+
*/
29+
default ByteBuffer getCompletionResponse() {
30+
return ByteBuffer.allocate(0);
31+
}
2232
}

common/network-common/src/main/java/org/apache/spark/network/crypto/AuthClientBootstrap.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -105,15 +105,15 @@ private void doSparkAuth(TransportClient client, Channel channel)
105105

106106
String secretKey = secretKeyHolder.getSecretKey(appId);
107107
try (AuthEngine engine = new AuthEngine(appId, secretKey, conf)) {
108-
ClientChallenge challenge = engine.challenge();
108+
AuthMessage challenge = engine.challenge();
109109
ByteBuf challengeData = Unpooled.buffer(challenge.encodedLength());
110110
challenge.encode(challengeData);
111111

112112
ByteBuffer responseData =
113113
client.sendRpcSync(challengeData.nioBuffer(), conf.authRTTimeoutMs());
114-
ServerResponse response = ServerResponse.decodeMessage(responseData);
114+
AuthMessage response = AuthMessage.decodeMessage(responseData);
115115

116-
engine.validate(response);
116+
engine.deriveSessionCipher(challenge, response);
117117
engine.sessionCipher().addToChannel(channel);
118118
}
119119
}

0 commit comments

Comments
 (0)