File tree Expand file tree Collapse file tree
main/scala/org/apache/spark/sql
test/scala/org/apache/spark/sql Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -1000,11 +1000,11 @@ class Dataset[T] private[sql](
10001000 // By the time we get here, since we have already run analysis, all attributes should've been
10011001 // resolved and become AttributeReference.
10021002 val cond = plan.condition.map { _.transform {
1003- case catalyst.expressions.EqualTo (a : AttributeReference , b : AttributeReference )
1003+ case e @ catalyst.expressions.BinaryComparison (a : AttributeReference , b : AttributeReference )
10041004 if a.sameRef(b) =>
1005- catalyst.expressions. EqualTo (
1005+ e.withNewChildren( Seq (
10061006 withPlan(plan.left).resolve(a.name),
1007- withPlan(plan.right).resolve(b.name))
1007+ withPlan(plan.right).resolve(b.name)))
10081008 }}
10091009
10101010 withPlan {
Original file line number Diff line number Diff line change @@ -287,4 +287,16 @@ class DataFrameJoinSuite extends QueryTest with SharedSQLContext {
287287 dfOne.join(dfTwo, $" a" === $" b" , " left" ).queryExecution.optimizedPlan
288288 }
289289 }
290+
291+ test(" SPARK-24385: Resolve ambiguity in self-joins with operators different from EqualsTo" ) {
292+ withSQLConf(SQLConf .CROSS_JOINS_ENABLED .key -> " false" ) {
293+ val df = spark.range(10 )
294+ // these should not throw any exception
295+ df.join(df, df(" id" ) >= df(" id" )).queryExecution.optimizedPlan
296+ df.join(df, df(" id" ) <=> df(" id" )).queryExecution.optimizedPlan
297+ df.join(df, df(" id" ) <= df(" id" )).queryExecution.optimizedPlan
298+ df.join(df, df(" id" ) > df(" id" )).queryExecution.optimizedPlan
299+ df.join(df, df(" id" ) < df(" id" )).queryExecution.optimizedPlan
300+ }
301+ }
290302}
You can’t perform that action at this time.
0 commit comments