Skip to content

Commit dd95fca

Browse files
committed
style check
1 parent a958920 commit dd95fca

9 files changed

Lines changed: 105 additions & 109 deletions

File tree

docs/security.md

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -729,8 +729,7 @@ so that non-local processes can authenticate. These delegation tokens in Kuberne
729729
shared by the Driver and its Executors. As such, there are three ways of submitting a Kerberos job:
730730

731731
In all cases you must define the environment variable: `HADOOP_CONF_DIR` or
732-
`spark.kubernetes.hadoop.configMapName` as well as either
733-
`spark.kubernetes.kerberos.krb5.path` or `spark.kubernetes.kerberos.krb5.configMapName`.
732+
`spark.kubernetes.hadoop.configMapName.`
734733

735734
It also important to note that the KDC needs to be visible from inside the containers.
736735

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/HadoopConfExecutorFeatureStep.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -23,14 +23,14 @@ import org.apache.spark.deploy.k8s.Constants._
2323
import org.apache.spark.deploy.k8s.features.hadooputils.HadoopBootstrapUtil
2424
import org.apache.spark.internal.Logging
2525

26-
/**
27-
* This step is responsible for bootstraping the container with ConfigMaps
28-
* containing Hadoop config files mounted as volumes and an ENV variable
29-
* pointed to the mounted file directory.
30-
*/
26+
/**
27+
* This step is responsible for bootstraping the container with ConfigMaps
28+
* containing Hadoop config files mounted as volumes and an ENV variable
29+
* pointed to the mounted file directory.
30+
*/
3131
private[spark] class HadoopConfExecutorFeatureStep(
3232
kubernetesConf: KubernetesConf[KubernetesExecutorSpecificConf])
33-
extends KubernetesFeatureConfigStep with Logging {
33+
extends KubernetesFeatureConfigStep with Logging {
3434

3535
override def configurePod(pod: SparkPod): SparkPod = {
3636
val sparkConf = kubernetesConf.sparkConf

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/HadoopSparkUserExecutorFeatureStep.scala

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -24,17 +24,17 @@ import org.apache.spark.deploy.k8s.KubernetesExecutorSpecificConf
2424
import org.apache.spark.deploy.k8s.features.hadooputils.HadoopBootstrapUtil
2525
import org.apache.spark.internal.Logging
2626

27-
/**
28-
* This step is responsible for setting ENV_SPARK_USER when HADOOP_FILES are detected
29-
* however, this step would not be run if Kerberos is enabled, as Kerberos sets SPARK_USER
30-
*/
27+
/**
28+
* This step is responsible for setting ENV_SPARK_USER when HADOOP_FILES are detected
29+
* however, this step would not be run if Kerberos is enabled, as Kerberos sets SPARK_USER
30+
*/
3131
private[spark] class HadoopSparkUserExecutorFeatureStep(
3232
kubernetesConf: KubernetesConf[KubernetesExecutorSpecificConf])
33-
extends KubernetesFeatureConfigStep with Logging {
33+
extends KubernetesFeatureConfigStep with Logging {
3434

3535
override def configurePod(pod: SparkPod): SparkPod = {
3636
val sparkUserName = kubernetesConf.sparkConf.get(KERBEROS_SPARK_USER_NAME)
37-
HadoopBootstrapUtil.bootstrapSparkUserPod(sparkUserName, pod)
37+
HadoopBootstrapUtil.bootstrapSparkUserPod(sparkUserName, pod)
3838
}
3939

4040
override def getAdditionalPodSystemProperties(): Map[String, String] = Map.empty

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KerberosConfDriverFeatureStep.scala

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,13 +26,13 @@ import org.apache.spark.deploy.k8s.KubernetesDriverSpecificConf
2626
import org.apache.spark.deploy.k8s.features.hadooputils._
2727
import org.apache.spark.internal.Logging
2828

29-
/**
30-
* Runs the necessary Hadoop-based logic based on Kerberos configs and the presence of the
31-
* HADOOP_CONF_DIR. This runs various bootstrap methods defined in HadoopBootstrapUtil.
32-
*/
29+
/**
30+
* Runs the necessary Hadoop-based logic based on Kerberos configs and the presence of the
31+
* HADOOP_CONF_DIR. This runs various bootstrap methods defined in HadoopBootstrapUtil.
32+
*/
3333
private[spark] class KerberosConfDriverFeatureStep(
3434
kubernetesConf: KubernetesConf[KubernetesDriverSpecificConf])
35-
extends KubernetesFeatureConfigStep with Logging {
35+
extends KubernetesFeatureConfigStep with Logging {
3636

3737
require(kubernetesConf.hadoopConfSpec.isDefined,
3838
"Ensure that HADOOP_CONF_DIR is defined either via env or a pre-defined ConfigMap")
@@ -143,22 +143,19 @@ private[spark] class KerberosConfDriverFeatureStep(
143143
}
144144

145145
override def getAdditionalKubernetesResources(): Seq[HasMetadata] = {
146-
// HADOOP_CONF_DIR ConfigMap
147146
val hadoopConfConfigMap = for {
148147
hName <- newHadoopConfigMapName
149148
hFiles <- hadoopConfigurationFiles
150149
} yield {
151150
HadoopBootstrapUtil.buildHadoopConfigMap(hName, hFiles)
152151
}
153152

154-
// krb5 ConfigMap
155153
val krb5ConfigMap = krb5File.map { fileLocation =>
156154
HadoopBootstrapUtil.buildkrb5ConfigMap(
157155
kubernetesConf.krbConfigMapName,
158156
fileLocation)
159157
}
160158

161-
// Kerberos DT Secret
162159
val kerberosDTSecret = kerberosConfSpec.flatMap(_.dtSecret)
163160

164161
hadoopConfConfigMap.toSeq ++

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KerberosConfExecutorFeatureStep.scala

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@ import org.apache.spark.deploy.k8s.KubernetesExecutorSpecificConf
2424
import org.apache.spark.deploy.k8s.features.hadooputils.HadoopBootstrapUtil
2525
import org.apache.spark.internal.Logging
2626

27-
/**
28-
* This step is responsible for mounting the DT secret for the executors
29-
*/
27+
/**
28+
* This step is responsible for mounting the DT secret for the executors
29+
*/
3030
private[spark] class KerberosConfExecutorFeatureStep(
3131
kubernetesConf: KubernetesConf[KubernetesExecutorSpecificConf])
32-
extends KubernetesFeatureConfigStep with Logging {
32+
extends KubernetesFeatureConfigStep with Logging {
3333

3434
private val sparkConf = kubernetesConf.sparkConf
3535
private val maybeKrb5CMap = sparkConf.getOption(KRB5_CONFIG_MAP_NAME)

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/hadooputils/HadoopBootstrapUtil.scala

Lines changed: 57 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -30,18 +30,18 @@ import org.apache.spark.internal.Logging
3030

3131
private[spark] object HadoopBootstrapUtil extends Logging {
3232

33-
/**
34-
* Mounting the DT secret for both the Driver and the executors
35-
*
36-
* @param dtSecretName Name of the secret that stores the Delegation Token
37-
* @param dtSecretItemKey Name of the Item Key storing the Delegation Token
38-
* @param userName Name of the SparkUser to set SPARK_USER
39-
* @param fileLocation Optional Location of the krb5 file
40-
* @param newKrb5ConfName Optional location of the ConfigMap for Krb5
41-
* @param existingKrb5ConfName Optional name of ConfigMap for Krb5
42-
* @param pod Input pod to be appended to
43-
* @return a modified SparkPod
44-
*/
33+
/**
34+
* Mounting the DT secret for both the Driver and the executors
35+
*
36+
* @param dtSecretName Name of the secret that stores the Delegation Token
37+
* @param dtSecretItemKey Name of the Item Key storing the Delegation Token
38+
* @param userName Name of the SparkUser to set SPARK_USER
39+
* @param fileLocation Optional Location of the krb5 file
40+
* @param newKrb5ConfName Optional location of the ConfigMap for Krb5
41+
* @param existingKrb5ConfName Optional name of ConfigMap for Krb5
42+
* @param pod Input pod to be appended to
43+
* @return a modified SparkPod
44+
*/
4545
def bootstrapKerberosPod(
4646
dtSecretName: String,
4747
dtSecretItemKey: String,
@@ -138,29 +138,29 @@ private[spark] object HadoopBootstrapUtil extends Logging {
138138
SparkPod(kerberizedPod, kerberizedContainer)
139139
}
140140

141-
/**
142-
* setting ENV_SPARK_USER when HADOOP_FILES are detected
143-
*
144-
* @param sparkUserName Name of the SPARK_USER
145-
* @param pod Input pod to be appended to
146-
* @return a modified SparkPod
147-
*/
141+
/**
142+
* setting ENV_SPARK_USER when HADOOP_FILES are detected
143+
*
144+
* @param sparkUserName Name of the SPARK_USER
145+
* @param pod Input pod to be appended to
146+
* @return a modified SparkPod
147+
*/
148148
def bootstrapSparkUserPod(sparkUserName: String, pod: SparkPod): SparkPod = {
149-
val envModifiedContainer = new ContainerBuilder(pod.container)
150-
.addNewEnv()
151-
.withName(ENV_SPARK_USER)
152-
.withValue(sparkUserName)
153-
.endEnv()
149+
val envModifiedContainer = new ContainerBuilder(pod.container)
150+
.addNewEnv()
151+
.withName(ENV_SPARK_USER)
152+
.withValue(sparkUserName)
153+
.endEnv()
154154
.build()
155-
SparkPod(pod.pod, envModifiedContainer)
155+
SparkPod(pod.pod, envModifiedContainer)
156156
}
157157

158-
/**
159-
* Grabbing files in the HADOOP_CONF_DIR
160-
*
161-
* @param path location of HADOOP_CONF_DIR
162-
* @return a list of File object
163-
*/
158+
/**
159+
* Grabbing files in the HADOOP_CONF_DIR
160+
*
161+
* @param path location of HADOOP_CONF_DIR
162+
* @return a list of File object
163+
*/
164164
def getHadoopConfFiles(path: String): Seq[File] = {
165165
val dir = new File(path)
166166
if (dir.isDirectory) {
@@ -170,16 +170,16 @@ private[spark] object HadoopBootstrapUtil extends Logging {
170170
}
171171
}
172172

173-
/**
174-
* Bootstraping the container with ConfigMaps that store
175-
* Hadoop configuration files
176-
*
177-
* @param hadoopConfDir directory location of HADOOP_CONF_DIR env
178-
* @param newHadoopConfigMapName name of the new configMap for HADOOP_CONF_DIR
179-
* @param existingHadoopConfigMapName name of the pre-defined configMap for HADOOP_CONF_DIR
180-
* @param pod Input pod to be appended to
181-
* @return a modified SparkPod
182-
*/
173+
/**
174+
* Bootstraping the container with ConfigMaps that store
175+
* Hadoop configuration files
176+
*
177+
* @param hadoopConfDir directory location of HADOOP_CONF_DIR env
178+
* @param newHadoopConfigMapName name of the new configMap for HADOOP_CONF_DIR
179+
* @param existingHadoopConfigMapName name of the pre-defined configMap for HADOOP_CONF_DIR
180+
* @param pod Input pod to be appended to
181+
* @return a modified SparkPod
182+
*/
183183
def bootstrapHadoopConfDir(
184184
hadoopConfDir: Option[String],
185185
newHadoopConfigMapName: Option[String],
@@ -237,14 +237,14 @@ private[spark] object HadoopBootstrapUtil extends Logging {
237237
SparkPod(hadoopSupportedPod, hadoopSupportedContainer)
238238
}
239239

240-
/**
241-
* Builds ConfigMap given the file location of the
242-
* krb5.conf file
243-
*
244-
* @param configMapName name of configMap for krb5
245-
* @param fileLocation location of krb5 file
246-
* @return a ConfigMap
247-
*/
240+
/**
241+
* Builds ConfigMap given the file location of the
242+
* krb5.conf file
243+
*
244+
* @param configMapName name of configMap for krb5
245+
* @param fileLocation location of krb5 file
246+
* @return a ConfigMap
247+
*/
248248
def buildkrb5ConfigMap(
249249
configMapName: String,
250250
fileLocation: String): ConfigMap = {
@@ -258,14 +258,14 @@ private[spark] object HadoopBootstrapUtil extends Logging {
258258
.build()
259259
}
260260

261-
/**
262-
* Builds ConfigMap given the ConfigMap name
263-
* and a list of Hadoop Conf files
264-
*
265-
* @param hadoopConfigMapName name of hadoopConfigMap
266-
* @param hadoopConfFiles list of hadoopFiles
267-
* @return a ConfigMap
268-
*/
261+
/**
262+
* Builds ConfigMap given the ConfigMap name
263+
* and a list of Hadoop Conf files
264+
*
265+
* @param hadoopConfigMapName name of hadoopConfigMap
266+
* @param hadoopConfFiles list of hadoopFiles
267+
* @return a ConfigMap
268+
*/
269269
def buildHadoopConfigMap(
270270
hadoopConfigMapName: String,
271271
hadoopConfFiles: Seq[File]): ConfigMap = {

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/hadooputils/HadoopKerberosLogin.scala

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,19 +24,19 @@ import org.apache.spark.deploy.SparkHadoopUtil
2424
import org.apache.spark.deploy.k8s.Constants._
2525
import org.apache.spark.deploy.k8s.security.KubernetesHadoopDelegationTokenManager
2626

27-
/**
28-
* This logic does all the heavy lifting for Delegation Token creation. This step
29-
* assumes that the job user has either specified a principal and keytab or ran
30-
* $kinit before running spark-submit. By running UGI.getCurrentUser we are able
31-
* to obtain the current user, either signed in via $kinit or keytab. With the
32-
* Job User principal you then retrieve the delegation token from the NameNode
33-
* and store values in DelegationToken. Lastly, the class puts the data into
34-
* a secret. All this is defined in a KerberosConfigSpec.
35-
*/
27+
/**
28+
* This logic does all the heavy lifting for Delegation Token creation. This step
29+
* assumes that the job user has either specified a principal and keytab or ran
30+
* $kinit before running spark-submit. By running UGI.getCurrentUser we are able
31+
* to obtain the current user, either signed in via $kinit or keytab. With the
32+
* Job User principal you then retrieve the delegation token from the NameNode
33+
* and store values in DelegationToken. Lastly, the class puts the data into
34+
* a secret. All this is defined in a KerberosConfigSpec.
35+
*/
3636
private[spark] object HadoopKerberosLogin {
3737
def buildSpec(
3838
submissionSparkConf: SparkConf,
39-
kubernetesResourceNamePrefix : String,
39+
kubernetesResourceNamePrefix: String,
4040
tokenManager: KubernetesHadoopDelegationTokenManager): KerberosConfigSpec = {
4141
val hadoopConf = SparkHadoopUtil.get.newConfiguration(submissionSparkConf)
4242
// The JobUserUGI will be taken fom the Local Ticket Cache or via keytab+principal

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/hadooputils/KerberosConfigSpec.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,14 +18,14 @@ package org.apache.spark.deploy.k8s.features.hadooputils
1818

1919
import io.fabric8.kubernetes.api.model.Secret
2020

21-
/**
22-
* Represents a given configuration of the Kerberos Configuration logic
23-
* <p>
24-
* - The secret containing a DT, either previously specified or built on the fly
25-
* - The name of the secret where the DT will be stored
26-
* - The data item-key on the secret which correlates with where the current DT data is stored
27-
* - The Job User's username
28-
*/
21+
/**
22+
* Represents a given configuration of the Kerberos Configuration logic
23+
* <p>
24+
* - The secret containing a DT, either previously specified or built on the fly
25+
* - The name of the secret where the DT will be stored
26+
* - The data item-key on the secret which correlates with where the current DT data is stored
27+
* - The Job User's username
28+
*/
2929
private[spark] case class KerberosConfigSpec(
3030
dtSecret: Option[Secret],
3131
dtSecretName: String,

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/security/KubernetesHadoopDelegationTokenManager.scala

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,19 +26,19 @@ import org.apache.spark.deploy.SparkHadoopUtil
2626
import org.apache.spark.deploy.security.HadoopDelegationTokenManager
2727
import org.apache.spark.internal.Logging
2828

29-
/**
30-
* The KubernetesHadoopDelegationTokenManager fetches Hadoop delegation tokens
31-
* on the behalf of the Kubernetes submission client. The new credentials
32-
* (called Tokens when they are serialized) are stored in Secrets accessible
33-
* to the driver and executors, when new Tokens are received they overwrite the current Secrets.
34-
*/
29+
/**
30+
* The KubernetesHadoopDelegationTokenManager fetches Hadoop delegation tokens
31+
* on the behalf of the Kubernetes submission client. The new credentials
32+
* (called Tokens when they are serialized) are stored in Secrets accessible
33+
* to the driver and executors, when new Tokens are received they overwrite the current Secrets.
34+
*/
3535
private[spark] class KubernetesHadoopDelegationTokenManager(
3636
tokenManager: HadoopDelegationTokenManager) extends Logging {
3737

3838
// HadoopUGI Util methods
3939
def getCurrentUser: UserGroupInformation = UserGroupInformation.getCurrentUser
40-
def getShortUserName : String = getCurrentUser.getShortUserName
41-
def getFileSystem(hadoopConf: Configuration) : FileSystem = FileSystem.get(hadoopConf)
40+
def getShortUserName: String = getCurrentUser.getShortUserName
41+
def getFileSystem(hadoopConf: Configuration): FileSystem = FileSystem.get(hadoopConf)
4242
def isSecurityEnabled: Boolean = UserGroupInformation.isSecurityEnabled
4343
def loginUserFromKeytabAndReturnUGI(principal: String, keytab: String): UserGroupInformation =
4444
UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab)

0 commit comments

Comments
 (0)