Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
package org.apache.spark.sql.execution.datasources.v2

import org.apache.spark.sql.catalyst.SQLConfHelper
import org.apache.spark.sql.catalyst.expressions.V2ExpressionUtils
import org.apache.spark.sql.catalyst.expressions.{AttributeSet, V2ExpressionUtils}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.connector.read.{SupportsReportOrdering, SupportsReportPartitioning}
Expand All @@ -41,8 +41,19 @@ object V2ScanPartitioningAndOrdering extends Rule[LogicalPlan] with SQLConfHelpe
private def partitioning(plan: LogicalPlan) = plan.transformDown {
case d @ DataSourceV2ScanRelation(relation, scan: SupportsReportPartitioning, _, None, _) =>
val catalystPartitioning = scan.outputPartitioning() match {
case kgp: KeyGroupedPartitioning => sequenceToOption(kgp.keys().map(
V2ExpressionUtils.toCatalystOpt(_, relation, relation.funCatalog)))
case kgp: KeyGroupedPartitioning =>
val partitioning = sequenceToOption(
kgp.keys().map(V2ExpressionUtils.toCatalystOpt(_, relation, relation.funCatalog)))
if (partitioning.isEmpty) {
None
} else {
val ref = AttributeSet.fromAttributeSets(partitioning.get.map(_.references))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how about partitioning.get.forall(p => p.references.subsetOf(...))

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good! Changed.

if (ref.subsetOf(AttributeSet(d.output))) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
if (ref.subsetOf(AttributeSet(d.output))) {
if (ref.subsetOf(d.outputSet)) {

partitioning
} else {
None
}
}
case _: UnknownPartitioning => None
case p => throw new IllegalArgumentException("Unsupported data source V2 partitioning " +
"type: " + p.getClass.getSimpleName)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -216,4 +216,20 @@ class MetadataColumnSuite extends DatasourceV2SQLBase {
.withColumn("right_all", struct($"right.*"))
checkAnswer(dfQuery, Row(1, "a", "b", Row(1, "a"), Row(1, "b")))
}

test("SPARK-40429: Only set KeyGroupedPartitioning when the referenced column is in the output") {
withTable(tbl) {
sql(s"CREATE TABLE $tbl (id bigint, data string) PARTITIONED BY (id)")
sql(s"INSERT INTO $tbl VALUES (1, 'a'), (2, 'b'), (3, 'c')")
checkAnswer(
spark.table(tbl).select("index", "_partition"),
Seq(Row(0, "3"), Row(0, "2"), Row(0, "1"))
)

checkAnswer(
spark.table(tbl).select("id", "index", "_partition"),
Seq(Row(3, 0, "3"), Row(2, 0, "2"), Row(1, 0, "1"))
)
}
}
}