|
48 | 48 | import org.apache.lucene.search.IndexSearcher;
|
49 | 49 | import org.apache.lucene.search.Query;
|
50 | 50 | import org.apache.lucene.search.ScoreDoc;
|
51 |
| -import org.apache.lucene.search.TopScoreDocCollector; |
| 51 | +import org.apache.lucene.search.TopScoreDocCollectorManager; |
52 | 52 | import org.apache.lucene.util.Version;
|
53 | 53 | import org.opengrok.indexer.analysis.AbstractAnalyzer;
|
54 | 54 | import org.opengrok.indexer.analysis.CompatibleAnalyser;
|
@@ -132,7 +132,7 @@ public class SearchEngine {
|
132 | 132 | int cachePages = RuntimeEnvironment.getInstance().getCachePages();
|
133 | 133 | int totalHits = 0;
|
134 | 134 | private ScoreDoc[] hits;
|
135 |
| - private TopScoreDocCollector collector; |
| 135 | + private TopScoreDocCollectorManager collectorManager; |
136 | 136 | private IndexSearcher searcher;
|
137 | 137 | boolean allCollected;
|
138 | 138 | private final ArrayList<SuperIndexSearcher> searcherList = new ArrayList<>();
|
@@ -205,18 +205,17 @@ private void searchMultiDatabase(List<Project> projectList, boolean paging) thro
|
205 | 205 | }
|
206 | 206 |
|
207 | 207 | private void searchIndex(IndexSearcher searcher, boolean paging) throws IOException {
|
208 |
| - collector = TopScoreDocCollector.create(hitsPerPage * cachePages, Short.MAX_VALUE); |
| 208 | + collectorManager = new TopScoreDocCollectorManager(hitsPerPage * cachePages, Short.MAX_VALUE); |
209 | 209 | Statistics stat = new Statistics();
|
210 |
| - searcher.search(query, collector); |
211 |
| - totalHits = collector.getTotalHits(); |
| 210 | + hits = searcher.search(query, collectorManager).scoreDocs; |
| 211 | + totalHits = searcher.count(query); |
212 | 212 | stat.report(LOGGER, Level.FINEST, "search via SearchEngine done",
|
213 | 213 | "search.latency", new String[]{"category", "engine",
|
214 | 214 | "outcome", totalHits > 0 ? "success" : "empty"});
|
215 | 215 | if (!paging && totalHits > 0) {
|
216 |
| - collector = TopScoreDocCollector.create(totalHits, Short.MAX_VALUE); |
217 |
| - searcher.search(query, collector); |
| 216 | + collectorManager = new TopScoreDocCollectorManager(totalHits, Short.MAX_VALUE); |
| 217 | + hits = searcher.search(query, collectorManager).scoreDocs; |
218 | 218 | }
|
219 |
| - hits = collector.topDocs().scoreDocs; |
220 | 219 | StoredFields storedFields = searcher.storedFields();
|
221 | 220 | for (ScoreDoc hit : hits) {
|
222 | 221 | int docId = hit.doc;
|
@@ -412,14 +411,13 @@ public void results(int start, int end, List<Hit> ret) {
|
412 | 411 | // TODO check if below fits for if end=old hits.length, or it should include it
|
413 | 412 | if (end > hits.length && !allCollected) {
|
414 | 413 | //do the requery, we want more than 5 pages
|
415 |
| - collector = TopScoreDocCollector.create(totalHits, Short.MAX_VALUE); |
| 414 | + collectorManager = new TopScoreDocCollectorManager(totalHits, Short.MAX_VALUE); |
416 | 415 | try {
|
417 |
| - searcher.search(query, collector); |
| 416 | + hits = searcher.search(query, collectorManager).scoreDocs; |
418 | 417 | } catch (Exception e) { // this exception should never be hit, since search() will hit this before
|
419 | 418 | LOGGER.log(
|
420 | 419 | Level.WARNING, SEARCH_EXCEPTION_MSG, e);
|
421 | 420 | }
|
422 |
| - hits = collector.topDocs().scoreDocs; |
423 | 421 | StoredFields storedFields = null;
|
424 | 422 | try {
|
425 | 423 | storedFields = searcher.storedFields();
|
|
0 commit comments