use of gate.creole.annic.apache.lucene.search.Hits in project gate-core by GateNLP.
the class LuceneIndexer method getNamesOfSerializedFiles.
/**
* This method returns a set of annotation set names that are indexed.
*/
public Set<String> getNamesOfSerializedFiles(String documentID) throws IndexException {
String location = null;
try {
location = new File(((URL) parameters.get(Constants.INDEX_LOCATION_URL)).toURI()).getAbsolutePath();
} catch (URISyntaxException use) {
location = new File(((URL) parameters.get(Constants.INDEX_LOCATION_URL)).getFile()).getAbsolutePath();
}
Set<String> toReturn = new HashSet<String>();
try {
Term term = new Term(Constants.DOCUMENT_ID, documentID);
TermQuery tq = new TermQuery(term);
gate.creole.annic.apache.lucene.search.Searcher searcher = new IndexSearcher(location);
try {
// and now execute the query
// result of which will be stored in hits
Hits luceneHits = searcher.search(tq);
for (int i = 0; i < luceneHits.length(); i++) {
Document luceneDoc = luceneHits.doc(i);
String documentIdOfSerializedFile = luceneDoc.get(Constants.DOCUMENT_ID_FOR_SERIALIZED_FILE);
toReturn.add(documentIdOfSerializedFile);
}
return toReturn;
} finally {
searcher.close();
}
} catch (IOException ioe) {
throw new IndexException(ioe);
}
}
use of gate.creole.annic.apache.lucene.search.Hits in project gate-core by GateNLP.
the class StatsCalculator method freq.
/**
* Allows retriving frequencies for the given parameters. Please make
* sure that you close the searcher on your own. Failing to do so may
* result into many files being opened at the same time and that can
* cause the problem with your OS.
* @throws SearchException
*/
public static int freq(IndexSearcher searcher, String corpusToSearchIn, String annotationSetToSearchIn, String annotationType, String featureName, String value) throws SearchException {
try {
corpusToSearchIn = corpusToSearchIn == null || corpusToSearchIn.trim().length() == 0 ? null : corpusToSearchIn.trim();
annotationSetToSearchIn = annotationSetToSearchIn == null || annotationSetToSearchIn.trim().length() == 0 ? null : annotationSetToSearchIn.trim();
if (annotationType == null)
throw new SearchException("Annotation Type cannot be null");
// term that contains a value to be searched in the index
Term term = null;
if (featureName == null && value == null) {
term = new Term("contents", annotationType, "*");
} else if (featureName != null && value == null) {
term = new Term("contents", annotationType + "." + featureName, "**");
} else if (featureName == null) {
throw new SearchException("FeatureName cannot be null");
} else {
term = new Term("contents", value, annotationType + "." + featureName);
}
// term query
TermQuery tq = new TermQuery(term);
// indicates whether we want to use booleanQuery
boolean useBooleanQuery = false;
BooleanQuery bq = new BooleanQuery();
if (corpusToSearchIn != null) {
PhraseQuery cq = new PhraseQuery();
cq.add(new Term(Constants.CORPUS_ID, corpusToSearchIn), 0, true);
bq.add(cq, true, false);
useBooleanQuery = true;
}
if (annotationSetToSearchIn != null) {
PhraseQuery aq = new PhraseQuery();
aq.add(new Term(Constants.ANNOTATION_SET_ID, annotationSetToSearchIn), 0, true);
bq.add(aq, true, false);
useBooleanQuery = true;
}
Hits corpusHits = null;
if (useBooleanQuery) {
bq.add(tq, true, false);
corpusHits = searcher.search(bq);
} else {
corpusHits = searcher.search(tq);
}
List<?>[] firstTermPositions = searcher.getFirstTermPositions();
// if no result available, set null to our scores
if (firstTermPositions[0].size() == 0) {
return 0;
}
int size = 0;
// information
for (int hitIndex = 0; hitIndex < corpusHits.length(); hitIndex++) {
int index = firstTermPositions[0].indexOf(new Integer(corpusHits.id(hitIndex)));
// we fetch all the first term positions for the query
// issued
Integer freq = (Integer) firstTermPositions[4].get(index);
size += freq.intValue();
}
return size;
} catch (IOException ioe) {
throw new SearchException(ioe);
} finally {
searcher.initializeTermPositions();
}
}
use of gate.creole.annic.apache.lucene.search.Hits in project gate-core by GateNLP.
the class LuceneSearchThread method search.
/**
* This method collects the necessary information from lucene and uses
* it when the next method is called
*
* @param query query supplied by the user
* @param patternWindow number of tokens to refer on left and right
* context
* @param indexLocation location of the index the searcher should
* search in
* @param luceneSearcher an instance of lucene search from where the
* instance of SearchThread is invoked
* @return true iff search was successful false otherwise
*/
@SuppressWarnings("unchecked")
public boolean search(String query, int patternWindow, String indexLocation, String corpusToSearchIn, String annotationSetToSearchIn, LuceneSearcher luceneSearcher) throws SearchException {
this.query = query;
this.contextWindow = patternWindow;
this.indexLocation = indexLocation;
this.queryParser = new QueryParser();
this.luceneSearcher = luceneSearcher;
/*
* reset all parameters that keep track of where we are in our
* searching. These parameters are used mostly to keep track of
* where to start fetching the next results from
*/
searchResultInfoMap = new HashMap<String, List<QueryItem>>();
serializedFileIDIndex = 0;
queryItemIndex = 0;
serializedFilesIDsList = new ArrayList<String>();
ftpIndex = -1;
success = false;
fwdIterationEnded = false;
try {
// first find out the location of Index
// TODO does this just replace \ with / if so we should do this better
StringBuilder temp = new StringBuilder();
for (int i = 0; i < indexLocation.length(); i++) {
if (indexLocation.charAt(i) == '\\') {
temp.append("/");
} else {
temp.append(indexLocation.charAt(i));
}
}
indexLocation = temp.toString();
/*
* for each different location there can be different
* baseTokenAnnotationType each index will have their index
* Definition file stored under the index directory so first see
* if given location is a valid directory
*/
File locationFile = new File(indexLocation);
if (!locationFile.isDirectory()) {
System.out.println("Skipping the invalid Index Location :" + indexLocation);
return false;
}
if (!indexLocation.endsWith("/")) {
indexLocation += "/";
}
// otherwise let us read the index definition file
locationFile = new File(indexLocation + "LuceneIndexDefinition.xml");
// check if this file is available
if (!locationFile.exists()) {
System.out.println("Index Definition file not found - Skipping the invalid Index Location :" + indexLocation + "LuceneIndexDefinition.xml");
return false;
}
Map<String, Object> indexInformation = null;
// other wise read this file
XStream xstream = new XStream(new StaxDriver());
try (FileReader fileReader = new FileReader(indexLocation + "LuceneIndexDefinition.xml")) {
// Saving was accomplished by using XML serialization of the map.
indexInformation = (Map<String, Object>) xstream.fromXML(fileReader);
}
// find out if the current index was indexed by annicIndexPR
String indexedWithANNICIndexPR = (String) indexInformation.get(Constants.CORPUS_INDEX_FEATURE);
if (indexedWithANNICIndexPR == null || !indexedWithANNICIndexPR.equals(Constants.CORPUS_INDEX_FEATURE_VALUE)) {
System.out.println("This corpus was not indexed by Annic Index PR - Skipping the invalid Index");
return false;
}
// find out the baseTokenAnnotationType name
baseTokenAnnotationType = ((String) indexInformation.get(Constants.BASE_TOKEN_ANNOTATION_TYPE)).trim();
int separatorIndex = baseTokenAnnotationType.lastIndexOf('.');
if (separatorIndex >= 0) {
baseTokenAnnotationType = baseTokenAnnotationType.substring(separatorIndex + 1);
}
// create various Queries from the user's query
Query[] luceneQueries = queryParser.parse("contents", query, baseTokenAnnotationType, corpusToSearchIn, annotationSetToSearchIn);
if (queryParser.needValidation()) {
if (DEBUG)
System.out.println("Validation enabled!");
} else {
if (DEBUG)
System.out.println("Validation disabled!");
}
// create an instance of Index Searcher
LuceneIndexSearcher searcher = new LuceneIndexSearcher(indexLocation);
try {
// we need to iterate through one query at a time
for (int luceneQueryIndex = 0; luceneQueryIndex < luceneQueries.length; luceneQueryIndex++) {
/*
* this call reinitializes the first Term positions arraylists
* which are being used to store the results
*/
searcher.initializeTermPositions();
/*
* and now execute the query result of which will be stored in
* hits
*/
Hits hits = searcher.search(luceneQueries[luceneQueryIndex]);
/*
* and so now find out the positions of the first terms in the
* returned results. first term position is the position of the
* first term in the found pattern
*/
List<?>[] firstTermPositions = searcher.getFirstTermPositions();
// if no result available, set null to our scores
if (firstTermPositions[0].size() == 0) {
// do nothing
continue;
}
// information
for (int hitIndex = 0; hitIndex < hits.length(); hitIndex++) {
int index = firstTermPositions[0].indexOf(Integer.valueOf(hits.id(hitIndex)));
// we fetch all the first term positions for the query
// issued
List<?> ftp = (List<?>) firstTermPositions[1].get(index);
/*
* pattern length (in terms of total number of annotations
* following one other)
*/
int patLen = ((Integer) firstTermPositions[2].get(index)).intValue();
/*
* and the type of query (if it has only one annotation in it,
* or multiple terms following them)
*/
int qType = ((Integer) firstTermPositions[3].get(index)).intValue();
// find out the documentID
String serializedFileID = hits.doc(hitIndex).get(Constants.DOCUMENT_ID_FOR_SERIALIZED_FILE);
QueryItem queryItem = new QueryItem();
queryItem.annotationSetName = hits.doc(hitIndex).get(Constants.ANNOTATION_SET_ID).intern();
queryItem.id = hits.id(hitIndex);
queryItem.documentID = hits.doc(hitIndex).get(Constants.DOCUMENT_ID).intern();
queryItem.ftp = ftp;
queryItem.patLen = patLen;
queryItem.qType = qType;
queryItem.query = luceneQueries[luceneQueryIndex];
queryItem.queryString = queryParser.getQueryString(luceneQueryIndex).intern();
/*
* all these information go in the top level arrayList. we
* create separate arrayList for each individual document
* where each element in the arrayList provides information
* about different query issued over it
*/
List<QueryItem> queryItemsList = searchResultInfoMap.get(serializedFileID);
if (queryItemsList == null) {
queryItemsList = new ArrayList<QueryItem>();
queryItemsList.add(queryItem);
searchResultInfoMap.put(serializedFileID, queryItemsList);
serializedFilesIDsList.add(serializedFileID);
} else {
// // before inserting we check if it is already added
// if(!doesAlreadyExist(queryItem, queryItemsList)) {
queryItemsList.add(queryItem);
// }
}
}
}
} finally {
searcher.close();
}
// if any result possible, return true
if (searchResultInfoMap.size() > 0)
success = true;
else
success = false;
} catch (IOException | gate.creole.ir.SearchException e) {
throw new SearchException(e);
}
return success;
}
use of gate.creole.annic.apache.lucene.search.Hits in project gate-core by GateNLP.
the class LuceneSearcher method getIndexedAnnotationSetNames.
/**
* This method returns a set of annotation set names that are indexed. Each
* entry has the following format:
* <p>
* corpusName;annotationSetName
* </p>
* where, the corpusName is the name of the corpus the annotationSetName
* belongs to.
*/
@Override
public String[] getIndexedAnnotationSetNames() throws SearchException {
String indexLocation;
try {
indexLocation = new File(((URL) datastore.getIndexer().getParameters().get(Constants.INDEX_LOCATION_URL)).toURI()).getAbsolutePath();
} catch (URISyntaxException use) {
indexLocation = new File(((URL) datastore.getIndexer().getParameters().get(Constants.INDEX_LOCATION_URL)).getFile()).getAbsolutePath();
}
annotationTypesMap = new HashMap<String, List<String>>();
Set<String> toReturn = new HashSet<String>();
try {
IndexReader reader = IndexReader.open(indexLocation);
try {
// lets first obtain stored corpora
TermEnum terms = reader.terms(new Term(Constants.ANNOTATION_SET_ID, ""));
if (terms == null) {
return new String[0];
}
// iterating over terms and finding out names of annotation sets indexed
Set<String> annotSets = new HashSet<String>();
boolean foundAnnotSet = false;
do {
Term t = terms.term();
if (t == null)
continue;
if (t.field().equals(Constants.ANNOTATION_SET_ID)) {
annotSets.add(t.text());
foundAnnotSet = true;
} else {
if (foundAnnotSet)
break;
}
} while (terms.next());
// but not all documents belong to corpora
for (String annotSet : annotSets) {
Term term = new Term(Constants.ANNOTATION_SET_ID, annotSet);
TermQuery tq = new TermQuery(term);
try {
gate.creole.annic.apache.lucene.search.Searcher searcher = new IndexSearcher(indexLocation);
try {
Hits annotSetHits = searcher.search(tq);
for (int i = 0; i < annotSetHits.length(); i++) {
Document luceneDoc = annotSetHits.doc(i);
String corpusID = luceneDoc.get(Constants.CORPUS_ID);
if (corpusID == null)
corpusID = "";
toReturn.add(corpusID + ";" + annotSet);
// lets create a boolean query
Term annotSetTerm = new Term(Constants.ANNOTATION_SET_ID, annotSet);
TermQuery atq = new TermQuery(annotSetTerm);
BooleanQuery bq = new BooleanQuery();
bq.add(tq, true, false);
bq.add(atq, true, false);
gate.creole.annic.apache.lucene.search.Searcher indexFeatureSearcher = new IndexSearcher(indexLocation);
try {
Hits indexFeaturesHits = searcher.search(bq);
for (int j = 0; j < indexFeaturesHits.length(); j++) {
Document aDoc = indexFeaturesHits.doc(j);
String indexedFeatures = aDoc.get(Constants.INDEXED_FEATURES);
if (indexedFeatures != null) {
String[] features = indexedFeatures.split(";");
for (String aFeature : features) {
// AnnotationType.FeatureName
int index = aFeature.indexOf(".");
if (index == -1) {
continue;
}
String type = aFeature.substring(0, index);
String featureName = aFeature.substring(index + 1);
String key = corpusID + ";" + annotSet + ";" + type;
List<String> listOfFeatures = annotationTypesMap.get(key);
if (listOfFeatures == null) {
listOfFeatures = new ArrayList<String>();
annotationTypesMap.put(key, listOfFeatures);
}
if (!listOfFeatures.contains(featureName)) {
listOfFeatures.add(featureName);
}
}
}
}
} finally {
indexFeatureSearcher.close();
}
}
} finally {
searcher.close();
}
} catch (IOException ioe) {
ioe.printStackTrace();
throw new SearchException(ioe);
}
}
} finally {
reader.close();
}
} catch (IOException ioe) {
throw new SearchException(ioe);
}
return toReturn.toArray(new String[0]);
}
Aggregations