use of org.apache.lucene.index.IndexReader in project intellij-community by JetBrains.
the class NexusIndexerTest method _testIteratingAddedArtifacts.
public void _testIteratingAddedArtifacts() throws Exception {
if (ignore())
return;
IndexingContext c = addContext("virtual", myIndexDir, null, null);
addArtifact(c, "group1", "id1", "version1", "x:/path1");
addArtifact(c, "group2", "id2", "version2", "x:/path2");
addArtifact(c, "group3", "id3", "version3", "x:/path3");
IndexReader r = c.getIndexReader();
assertEquals(5, r.numDocs());
List<String> result = new ArrayList<String>();
for (int i = 0; i < r.numDocs(); i++) {
// third document becomes deleted somehow...
Document d = r.document(i);
String uinfo = d.get(ArtifactInfo.UINFO);
result.add(uinfo);
}
System.out.println(result);
}
use of org.apache.lucene.index.IndexReader in project libresonic by Libresonic.
the class SearchService method getRandomAlbumsId3.
/**
* Returns a number of random albums, using ID3 tag.
*
* @param count Number of albums to return.
* @param musicFolders Only return albums from these folders.
* @return List of random albums.
*/
public List<Album> getRandomAlbumsId3(int count, List<MusicFolder> musicFolders) {
List<Album> result = new ArrayList<Album>();
IndexReader reader = null;
try {
reader = createIndexReader(ALBUM_ID3);
Searcher searcher = new IndexSearcher(reader);
List<SpanTermQuery> musicFolderQueries = new ArrayList<SpanTermQuery>();
for (MusicFolder musicFolder : musicFolders) {
musicFolderQueries.add(new SpanTermQuery(new Term(FIELD_FOLDER_ID, NumericUtils.intToPrefixCoded(musicFolder.getId()))));
}
Query query = new SpanOrQuery(musicFolderQueries.toArray(new SpanQuery[musicFolderQueries.size()]));
TopDocs topDocs = searcher.search(query, null, Integer.MAX_VALUE);
List<ScoreDoc> scoreDocs = Lists.newArrayList(topDocs.scoreDocs);
Random random = new Random(System.currentTimeMillis());
while (!scoreDocs.isEmpty() && result.size() < count) {
int index = random.nextInt(scoreDocs.size());
Document doc = searcher.doc(scoreDocs.remove(index).doc);
int id = Integer.valueOf(doc.get(FIELD_ID));
try {
addIfNotNull(albumDao.getAlbum(id), result);
} catch (Exception x) {
LOG.warn("Failed to get album file " + id, x);
}
}
} catch (Throwable x) {
LOG.error("Failed to search for random albums.", x);
} finally {
FileUtil.closeQuietly(reader);
}
return result;
}
use of org.apache.lucene.index.IndexReader in project libresonic by Libresonic.
the class SearchService method search.
public SearchResult search(SearchCriteria criteria, List<MusicFolder> musicFolders, IndexType indexType) {
SearchResult result = new SearchResult();
int offset = criteria.getOffset();
int count = criteria.getCount();
result.setOffset(offset);
IndexReader reader = null;
try {
reader = createIndexReader(indexType);
Searcher searcher = new IndexSearcher(reader);
Analyzer analyzer = new LibresonicAnalyzer();
MultiFieldQueryParser queryParser = new MultiFieldQueryParser(LUCENE_VERSION, indexType.getFields(), analyzer, indexType.getBoosts());
BooleanQuery query = new BooleanQuery();
query.add(queryParser.parse(analyzeQuery(criteria.getQuery())), BooleanClause.Occur.MUST);
List<SpanTermQuery> musicFolderQueries = new ArrayList<SpanTermQuery>();
for (MusicFolder musicFolder : musicFolders) {
if (indexType == ALBUM_ID3 || indexType == ARTIST_ID3) {
musicFolderQueries.add(new SpanTermQuery(new Term(FIELD_FOLDER_ID, NumericUtils.intToPrefixCoded(musicFolder.getId()))));
} else {
musicFolderQueries.add(new SpanTermQuery(new Term(FIELD_FOLDER, musicFolder.getPath().getPath())));
}
}
query.add(new SpanOrQuery(musicFolderQueries.toArray(new SpanQuery[musicFolderQueries.size()])), BooleanClause.Occur.MUST);
TopDocs topDocs = searcher.search(query, null, offset + count);
result.setTotalHits(topDocs.totalHits);
int start = Math.min(offset, topDocs.totalHits);
int end = Math.min(start + count, topDocs.totalHits);
for (int i = start; i < end; i++) {
Document doc = searcher.doc(topDocs.scoreDocs[i].doc);
switch(indexType) {
case SONG:
case ARTIST:
case ALBUM:
MediaFile mediaFile = mediaFileService.getMediaFile(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(mediaFile, result.getMediaFiles());
break;
case ARTIST_ID3:
Artist artist = artistDao.getArtist(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(artist, result.getArtists());
break;
case ALBUM_ID3:
Album album = albumDao.getAlbum(Integer.valueOf(doc.get(FIELD_ID)));
addIfNotNull(album, result.getAlbums());
break;
default:
break;
}
}
} catch (Throwable x) {
LOG.error("Failed to execute Lucene search.", x);
} finally {
FileUtil.closeQuietly(reader);
}
return result;
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class CollationTestBase method testFarsiTermRangeQuery.
public void testFarsiTermRangeQuery(Analyzer analyzer, BytesRef firstBeg, BytesRef firstEnd, BytesRef secondBeg, BytesRef secondEnd) throws Exception {
Directory farsiIndex = newDirectory();
IndexWriter writer = new IndexWriter(farsiIndex, new IndexWriterConfig(analyzer));
Document doc = new Document();
doc.add(new TextField("content", "ساب", Field.Store.YES));
doc.add(new StringField("body", "body", Field.Store.YES));
writer.addDocument(doc);
writer.close();
IndexReader reader = DirectoryReader.open(farsiIndex);
IndexSearcher search = newSearcher(reader);
// Unicode order would include U+0633 in [ U+062F - U+0698 ], but Farsi
// orders the U+0698 character before the U+0633 character, so the single
// index Term below should NOT be returned by a TermRangeQuery
// with a Farsi Collator (or an Arabic one for the case when Farsi is
// not supported).
Query csrq = new TermRangeQuery("content", firstBeg, firstEnd, true, true);
ScoreDoc[] result = search.search(csrq, 1000).scoreDocs;
assertEquals("The index Term should not be included.", 0, result.length);
csrq = new TermRangeQuery("content", secondBeg, secondEnd, true, true);
result = search.search(csrq, 1000).scoreDocs;
assertEquals("The index Term should be included.", 1, result.length);
reader.close();
farsiIndex.close();
}
use of org.apache.lucene.index.IndexReader in project lucene-solr by apache.
the class QueryElevationComponent method inform.
@Override
public void inform(SolrCore core) {
IndexSchema schema = core.getLatestSchema();
String a = initArgs.get(FIELD_TYPE);
if (a != null) {
FieldType ft = schema.getFieldTypes().get(a);
if (ft == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown FieldType: '" + a + "' used in QueryElevationComponent");
}
analyzer = ft.getQueryAnalyzer();
}
SchemaField sf = schema.getUniqueKeyField();
if (sf == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "QueryElevationComponent requires the schema to have a uniqueKeyField.");
}
idSchemaFT = sf.getType();
idField = sf.getName();
//register the EditorialMarkerFactory
String excludeName = initArgs.get(QueryElevationParams.EXCLUDE_MARKER_FIELD_NAME, "excluded");
if (excludeName == null || excludeName.equals("") == true) {
excludeName = "excluded";
}
ExcludedMarkerFactory excludedMarkerFactory = new ExcludedMarkerFactory();
core.addTransformerFactory(excludeName, excludedMarkerFactory);
ElevatedMarkerFactory elevatedMarkerFactory = new ElevatedMarkerFactory();
String markerName = initArgs.get(QueryElevationParams.EDITORIAL_MARKER_FIELD_NAME, "elevated");
if (markerName == null || markerName.equals("") == true) {
markerName = "elevated";
}
core.addTransformerFactory(markerName, elevatedMarkerFactory);
forceElevation = initArgs.getBool(QueryElevationParams.FORCE_ELEVATION, forceElevation);
try {
synchronized (elevationCache) {
elevationCache.clear();
String f = initArgs.get(CONFIG_FILE);
if (f == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "QueryElevationComponent must specify argument: '" + CONFIG_FILE + "' -- path to elevate.xml");
}
boolean exists = false;
// check if using ZooKeeper
ZkController zkController = core.getCoreContainer().getZkController();
if (zkController != null) {
// TODO : shouldn't have to keep reading the config name when it has been read before
exists = zkController.configFileExists(zkController.getZkStateReader().readConfigName(core.getCoreDescriptor().getCloudDescriptor().getCollectionName()), f);
} else {
File fC = new File(core.getResourceLoader().getConfigDir(), f);
File fD = new File(core.getDataDir(), f);
if (fC.exists() == fD.exists()) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "QueryElevationComponent missing config file: '" + f + "\n" + "either: " + fC.getAbsolutePath() + " or " + fD.getAbsolutePath() + " must exist, but not both.");
}
if (fC.exists()) {
exists = true;
log.info("Loading QueryElevation from: " + fC.getAbsolutePath());
Config cfg = new Config(core.getResourceLoader(), f);
elevationCache.put(null, loadElevationMap(cfg));
}
}
//in other words, we think this is in the data dir, not the conf dir
if (!exists) {
// preload the first data
RefCounted<SolrIndexSearcher> searchHolder = null;
try {
searchHolder = core.getNewestSearcher(false);
IndexReader reader = searchHolder.get().getIndexReader();
getElevationMap(reader, core);
} finally {
if (searchHolder != null)
searchHolder.decref();
}
}
}
} catch (Exception ex) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error initializing QueryElevationComponent.", ex);
}
}
Aggregations