Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit 6cf4ed7

Browse files
committed
Address comments
1 parent 4b32134 commit 6cf4ed7

File tree

4 files changed

+7
-15
lines changed

4 files changed

+7
-15
lines changed

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/ConfigurationUtils.scala

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -34,15 +34,6 @@ private[spark] object ConfigurationUtils {
3434
fromPrefix.toMap
3535
}
3636

37-
def requireBothOrNeitherDefined(
38-
opt1: Option[_],
39-
opt2: Option[_],
40-
errMessageWhenFirstIsMissing: String,
41-
errMessageWhenSecondIsMissing: String): Unit = {
42-
requireSecondIfFirstIsDefined(opt1, opt2, errMessageWhenSecondIsMissing)
43-
requireSecondIfFirstIsDefined(opt2, opt1, errMessageWhenFirstIsMissing)
44-
}
45-
4637
def requireSecondIfFirstIsDefined(
4738
opt1: Option[_],
4839
opt2: Option[_],

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodFactory.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
4545

4646
import ExecutorPodFactoryImpl._
4747

48-
private val executorExtraClasspath = sparkConf.get(
49-
org.apache.spark.internal.config.EXECUTOR_CLASS_PATH)
48+
private val executorExtraClasspath =
49+
sparkConf.get(org.apache.spark.internal.config.EXECUTOR_CLASS_PATH)
5050

5151
private val executorLabels = ConfigurationUtils.parsePrefixedKeyValuePairs(
5252
sparkConf,
@@ -59,6 +59,9 @@ private[spark] class ExecutorPodFactoryImpl(sparkConf: SparkConf)
5959
!executorLabels.contains(SPARK_EXECUTOR_ID_LABEL),
6060
s"Custom executor labels cannot contain $SPARK_EXECUTOR_ID_LABEL as it is reserved for" +
6161
s" Spark.")
62+
require(
63+
!executorLabels.contains(SPARK_ROLE_LABEL),
64+
s"Custom executor labels cannot contain $SPARK_ROLE_LABEL as it is reserved for Spark.")
6265

6366
private val executorAnnotations =
6467
ConfigurationUtils.parsePrefixedKeyValuePairs(

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterSchedulerBackend.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -87,12 +87,12 @@ private[spark] class KubernetesClusterSchedulerBackend(
8787
private val initialExecutors = SchedulerBackendUtils.getInitialTargetExecutorNumber(conf)
8888

8989
private val podAllocationInterval = conf.get(KUBERNETES_ALLOCATION_BATCH_DELAY)
90-
require(podAllocationInterval > 0, s"Allocation batch delay " +
90+
require(podAllocationInterval > 0, "Allocation batch delay " +
9191
s"${KUBERNETES_ALLOCATION_BATCH_DELAY} " +
9292
s"is ${podAllocationInterval}, should be a positive integer")
9393

9494
private val podAllocationSize = conf.get(KUBERNETES_ALLOCATION_BATCH_SIZE)
95-
require(podAllocationSize > 0, s"Allocation batch size " +
95+
require(podAllocationSize > 0, "Allocation batch size " +
9696
s"${KUBERNETES_ALLOCATION_BATCH_SIZE} " +
9797
s"is ${podAllocationSize}, should be a positive integer")
9898

resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,8 +133,6 @@ object YarnSparkHadoopUtil {
133133

134134
val ANY_HOST = "*"
135135

136-
val DEFAULT_NUMBER_EXECUTORS = 2
137-
138136
// All RM requests are issued with same priority : we do not (yet) have any distinction between
139137
// request types (like map/reduce in hadoop for example)
140138
val RM_REQUEST_PRIORITY = Priority.newInstance(1)

0 commit comments

Comments
 (0)