File tree Expand file tree Collapse file tree
catalyst/src/main/scala/org/apache/spark/sql/internal
core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet Expand file tree Collapse file tree Original file line number Diff line number Diff line change @@ -695,8 +695,8 @@ object SQLConf {
695695 val PARQUET_FILTER_PUSHDOWN_INFILTERTHRESHOLD =
696696 buildConf(" spark.sql.parquet.pushdown.inFilterThreshold" )
697697 .doc(" The maximum number of values to filter push-down optimization for IN predicate. " +
698- " Large threshold won't necessarily provide much better performance. " +
699- " The experiment argued that 300 is the limit threshold. " +
698+ " Spark will push-down a value greater than or equal to its minimum value and " +
699+ " less than or equal to its maximum value if its value exceeds this threshold. " +
700700 " By setting this value to 0 this feature can be disabled. " +
701701 s " This configuration only has an effect when ' ${PARQUET_FILTER_PUSHDOWN_ENABLED .key}' is " +
702702 " enabled." )
Original file line number Diff line number Diff line change @@ -599,7 +599,8 @@ class ParquetFilters(
599599 createFilterHelper(pred, canPartialPushDownConjuncts = false )
600600 .map(FilterApi .not)
601601
602- case sources.In (name, values) if values.nonEmpty && canMakeFilterOn(name, values.head) =>
602+ case sources.In (name, values) if pushDownInFilterThreshold > 0 &&
603+ values.nonEmpty && canMakeFilterOn(name, values.head) =>
603604 if (values.length <= pushDownInFilterThreshold) {
604605 values.flatMap { v =>
605606 makeEq.lift(nameToParquetField(name).fieldType)
You can’t perform that action at this time.
0 commit comments