Skip to content
Closed
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@ package org.apache.spark.sql.catalyst.analysis
import scala.util.control.NonFatal

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.{Add, AttributeReference, AttributeSet, Cast, CheckOverflow, Expression, ExpressionSet, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, Literal, Multiply, PreciseTimestampConversion, PredicateHelper, Subtract, TimeAdd, TimeSub, UnaryMinus}
import org.apache.spark.sql.catalyst.expressions.{Add, AttributeReference, AttributeSet, Cast, CheckOverflow, Expression}
import org.apache.spark.sql.catalyst.expressions.{ExpressionSet, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Do we need these imports?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

They are needed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe just import org.apache.spark.sql.catalyst.expressions._ ?

Copy link
Member

@kiszk kiszk Dec 18, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry. This splits long one line into two lines.
@cloud-fan's suggestion looks good to make them simple

import org.apache.spark.sql.catalyst.expressions.{Literal, Multiply, PreciseTimestampConversion, PredicateHelper}
import org.apache.spark.sql.catalyst.expressions.{PromotePrecision, Subtract, TimeAdd, TimeSub, UnaryMinus}
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans.logical.{EventTimeWatermark, LogicalPlan}
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark._
Expand Down Expand Up @@ -238,6 +241,8 @@ object StreamingJoinHelper extends PredicateHelper with Logging {
collect(child, !negate)
case CheckOverflow(child, _) =>
collect(child, negate)
case PromotePrecision(child) =>
collect(child, negate)
case Cast(child, dataType, _) =>
dataType match {
case _: NumericType | _: TimestampType => collect(child, negate)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,12 @@ case class MakeDecimal(child: Expression, precision: Int, scale: Int) extends Un
case class PromotePrecision(child: Expression) extends UnaryExpression {
override def dataType: DataType = child.dataType
override def eval(input: InternalRow): Any = child.eval(input)
/** Just a simple pass-through for code generation. */
override def genCode(ctx: CodegenContext): ExprCode = child.genCode(ctx)
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = ev.copy("")
override def prettyName: String = "promote_precision"
override def sql: String = child.sql
override lazy val canonicalized: Expression = child.canonicalized
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,6 @@ object SimplifyCasts extends Rule[LogicalPlan] {
object RemoveDispensableExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case UnaryPositive(child) => child
case PromotePrecision(child) => child
}
}

Expand Down