@@ -78,8 +78,8 @@ public Filter[] pushFilters(Filter[] filters) {
78
78
for (Filter filter : filters ) {
79
79
OpticFilter opticFilter = FilterFactory .toPlanFilter (filter );
80
80
if (opticFilter != null ) {
81
- if (logger .isDebugEnabled ()) {
82
- logger .debug ("Pushing down filter: {}" , filter );
81
+ if (logger .isInfoEnabled ()) {
82
+ logger .info ("Pushing down filter: {}" , filter );
83
83
}
84
84
opticFilters .add (opticFilter );
85
85
this .pushedFilters .add (filter );
@@ -108,8 +108,8 @@ public boolean pushLimit(int limit) {
108
108
if (readContext .planAnalysisFoundNoRows ()) {
109
109
return false ;
110
110
}
111
- if (logger .isDebugEnabled ()) {
112
- logger .debug ("Pushing down limit: {}" , limit );
111
+ if (logger .isInfoEnabled ()) {
112
+ logger .info ("Pushing down limit: {}" , limit );
113
113
}
114
114
readContext .pushDownLimit (limit );
115
115
return true ;
@@ -123,8 +123,8 @@ public boolean pushTopN(SortOrder[] orders, int limit) {
123
123
// This will be invoked when the user calls both orderBy and limit in their Spark program. If the user only
124
124
// calls limit, then only pushLimit is called and this will not be called. If the user only calls orderBy and
125
125
// not limit, then neither this nor pushLimit will be called.
126
- if (logger .isDebugEnabled ()) {
127
- logger .debug ("Pushing down topN: {}; limit: {}" , Arrays .asList (orders ), limit );
126
+ if (logger .isInfoEnabled ()) {
127
+ logger .info ("Pushing down topN: {}; limit: {}" , Arrays .asList (orders ), limit );
128
128
}
129
129
readContext .pushDownTopN (orders , limit );
130
130
return true ;
@@ -146,13 +146,13 @@ public boolean pushAggregation(Aggregation aggregation) {
146
146
if (supportCompletePushDown (aggregation )) {
147
147
if (aggregation .groupByExpressions ().length > 0 ) {
148
148
Expression expr = aggregation .groupByExpressions ()[0 ];
149
- if (logger .isDebugEnabled ()) {
150
- logger .debug ("Pushing down groupBy + count on: {}" , expr .describe ());
149
+ if (logger .isInfoEnabled ()) {
150
+ logger .info ("Pushing down groupBy + count on: {}" , expr .describe ());
151
151
}
152
152
readContext .pushDownGroupByCount (expr );
153
153
} else {
154
- if (logger .isDebugEnabled ()) {
155
- logger .debug ("Pushing down count()" );
154
+ if (logger .isInfoEnabled ()) {
155
+ logger .info ("Pushing down count()" );
156
156
}
157
157
readContext .pushDownCount ();
158
158
}
@@ -185,6 +185,7 @@ public void pruneColumns(StructType requiredSchema) {
185
185
logger .debug ("The schema to push down is equal to the existing schema, so not pushing it down." );
186
186
}
187
187
} else {
188
+ // Keeping this at debug level as it can be fairly verbose.
188
189
if (logger .isDebugEnabled ()) {
189
190
logger .debug ("Pushing down required schema: {}" , requiredSchema .json ());
190
191
}
0 commit comments