Skip to content

Commit 51e3a5f

Browse files
committed
Bumped up some debug logging to info
Seeing logging about the pushdown operations seems helpful to ensure it's happening, and they're only logged once.
1 parent 17580b7 commit 51e3a5f

File tree

1 file changed

+11
-10
lines changed

1 file changed

+11
-10
lines changed

src/main/java/com/marklogic/spark/reader/MarkLogicScanBuilder.java

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -78,8 +78,8 @@ public Filter[] pushFilters(Filter[] filters) {
7878
for (Filter filter : filters) {
7979
OpticFilter opticFilter = FilterFactory.toPlanFilter(filter);
8080
if (opticFilter != null) {
81-
if (logger.isDebugEnabled()) {
82-
logger.debug("Pushing down filter: {}", filter);
81+
if (logger.isInfoEnabled()) {
82+
logger.info("Pushing down filter: {}", filter);
8383
}
8484
opticFilters.add(opticFilter);
8585
this.pushedFilters.add(filter);
@@ -108,8 +108,8 @@ public boolean pushLimit(int limit) {
108108
if (readContext.planAnalysisFoundNoRows()) {
109109
return false;
110110
}
111-
if (logger.isDebugEnabled()) {
112-
logger.debug("Pushing down limit: {}", limit);
111+
if (logger.isInfoEnabled()) {
112+
logger.info("Pushing down limit: {}", limit);
113113
}
114114
readContext.pushDownLimit(limit);
115115
return true;
@@ -123,8 +123,8 @@ public boolean pushTopN(SortOrder[] orders, int limit) {
123123
// This will be invoked when the user calls both orderBy and limit in their Spark program. If the user only
124124
// calls limit, then only pushLimit is called and this will not be called. If the user only calls orderBy and
125125
// not limit, then neither this nor pushLimit will be called.
126-
if (logger.isDebugEnabled()) {
127-
logger.debug("Pushing down topN: {}; limit: {}", Arrays.asList(orders), limit);
126+
if (logger.isInfoEnabled()) {
127+
logger.info("Pushing down topN: {}; limit: {}", Arrays.asList(orders), limit);
128128
}
129129
readContext.pushDownTopN(orders, limit);
130130
return true;
@@ -146,13 +146,13 @@ public boolean pushAggregation(Aggregation aggregation) {
146146
if (supportCompletePushDown(aggregation)) {
147147
if (aggregation.groupByExpressions().length > 0) {
148148
Expression expr = aggregation.groupByExpressions()[0];
149-
if (logger.isDebugEnabled()) {
150-
logger.debug("Pushing down groupBy + count on: {}", expr.describe());
149+
if (logger.isInfoEnabled()) {
150+
logger.info("Pushing down groupBy + count on: {}", expr.describe());
151151
}
152152
readContext.pushDownGroupByCount(expr);
153153
} else {
154-
if (logger.isDebugEnabled()) {
155-
logger.debug("Pushing down count()");
154+
if (logger.isInfoEnabled()) {
155+
logger.info("Pushing down count()");
156156
}
157157
readContext.pushDownCount();
158158
}
@@ -185,6 +185,7 @@ public void pruneColumns(StructType requiredSchema) {
185185
logger.debug("The schema to push down is equal to the existing schema, so not pushing it down.");
186186
}
187187
} else {
188+
// Keeping this at debug level as it can be fairly verbose.
188189
if (logger.isDebugEnabled()) {
189190
logger.debug("Pushing down required schema: {}", requiredSchema.json());
190191
}

0 commit comments

Comments
 (0)