use of edu.stanford.muse.datacache.Blob in project epadd by ePADD.
the class IndexUtils method getBlobsForAttachmentTypes.
/**
* returns set of all blobs whose type match ANY of the given extensions.
* ("none" is a valid type) and matches attachments that don't have an
* extension.
*/
public static Set<Blob> getBlobsForAttachmentTypes(Collection<? extends Document> docs, String[] attachmentTypes) {
Set<Blob> result = new LinkedHashSet<>();
// convert to a set for fast lookup
Set<String> attachmentTypesSet = new LinkedHashSet<>();
Collections.addAll(attachmentTypesSet, attachmentTypes);
for (Document d : docs) {
if (!(d instanceof EmailDocument))
continue;
EmailDocument ed = (EmailDocument) d;
if (ed.attachments == null)
continue;
for (Blob b : ed.attachments) {
String ext = Util.getExtension(b.filename);
if (ext == null)
ext = "none";
ext = ext.toLowerCase();
if (attachmentTypesSet.contains(ext)) {
result.add(b);
}
}
}
return result;
}
use of edu.stanford.muse.datacache.Blob in project epadd by ePADD.
the class Indexer method indexAttachments.
/**
* returns whether indexAttachments succeeded
*/
private synchronized boolean indexAttachments(EmailDocument e, BlobStore blobStore, Set<Blob> processedBlobSet, IndexStats stats) throws IOException {
boolean result = true;
// bail out if no attachments
if (e.attachments == null)
return true;
final String DELIMITER = "\n";
for (Blob b : e.attachments) {
if (processedBlobSet != null && processedBlobSet.contains(b))
// skip if already processed (blob may be shared by multiple docs)
continue;
/*int id_int = iwriter_blob.numDocs();
String id = Integer.toString(++id_int);*/
int id_int = blobStore.index(b);
String id = Integer.toString(id_int);
if (processedBlobSet != null)
processedBlobSet.add(b);
attachmentDocIdToBlob.put(id, b);
// not to be confused with edu.stanford.muse.index.Document
org.apache.lucene.document.Document doc = new org.apache.lucene.document.Document();
Pair<String, String> content = b.getContent(blobStore);
if (content == null) {
// failed to process blob
result = false;
log.warn("Failed to fetch content from: " + b.filename + " content type: " + b.contentType + " size: " + b.getSize());
// but try to continue the process
continue;
}
// imp: for id, should use Field.Index.NOT_ANALYZED field should be http://vuknikolic.wordpress.com/2011/01/03/lucenes-field-options-store-and-index-aka-rtfm/
// note: id for attachments index is just sequential numbers, 1, 2, 3. etc.
// it is not the full unique id (<folder>-<num>) that the emails index has.
// NOTE: docid, emaildocid and languages fields can be stored without position (hence ft as FieldType)
// because user can not do a phrase query on these fields.
doc.add(new Field("docId", id, ft));
// Field type ft instead of StoredFiled so as to be able to search over this field
doc.add(new Field("emailDocId", e.getUniqueId(), ft));
String documentText = content.first + DELIMITER + content.second;
// we'll store all languages detected in the doc as a field in the index
Set<String> languages = Languages.getAllLanguages(documentText);
String lang_str = Util.join(languages, LANGUAGE_FIELD_DELIMITER);
doc.add(new Field("languages", lang_str, ft));
if (edu.stanford.muse.Config.OPENNLP_NER) {
Set<String> names = setNameFieldsOpenNLP(documentText, doc);
// just some connector for storing the field
String s = Util.join(names, NAMES_FIELD_DELIMITER);
doc.add(new Field("names", s, full_ft));
if (stats != null)
stats.nIndexedNames_blob += names.size();
}
// log.info ("blob metadata = " + content.first);
// meta data does not contain the fileName
doc.add(new Field("meta", content.first, full_ft));
doc.add(new Field("fileName", b.filename, full_ft));
doc.add(new Field("body", content.second, full_ft));
iwriter_blob.addDocument(doc);
// log.info("Indexed attachment #" + id + " : text = '" + documentText + "' names = '" + s + "'");
if (stats != null) {
stats.indexedTextLength_blob += documentText.length();
}
}
return result;
}
use of edu.stanford.muse.datacache.Blob in project epadd by ePADD.
the class SearchResult method selectBlobs.
/**
* this map is used only by attachments page right now, not advanced search.
* TODO: make adv. search page also use it
*/
public static SearchResult selectBlobs(SearchResult inputSet) {
Collection<Document> docs = inputSet.archive.getAllDocs();
String neededFilesize = JSPHelper.getParam(inputSet.queryParams, "attachmentFilesize");
String[] extensions = JSPHelper.getParams(inputSet.queryParams, "attachmentExtension").toArray(new String[0]);
// should also have lower-case strings, no "." included
Set<String> extensionsToMatch = new LinkedHashSet<>();
if (!Util.nullOrEmpty(extensions)) {
extensionsToMatch = new LinkedHashSet<>();
for (String s : extensions) extensionsToMatch.add(s.trim().toLowerCase());
}
// or given extensions with extensions due to attachment type
// this will have more semicolon separated extensions
String[] types = JSPHelper.getParams(inputSet.queryParams, "attachmentType").toArray(new String[0]);
if (!Util.nullOrEmpty(types)) {
for (String t : types) {
String exts = Config.attachmentTypeToExtensions.get(t);
if (exts == null)
exts = t;
// continue;
// Front end should uniformly pass attachment types as extensions like mp3;mov;ogg etc. Earlier it was passing vide, audio, doc etc.
// In order to accommodate both cases we first check if there is ampping from the extension type to actual extensions using .get(t)
// if no such mapping is present then we assume that the input extension types are of the form mp3;mov;ogg and work on that.
String[] components = exts.split(";");
Collections.addAll(extensionsToMatch, components);
}
}
// a variable to select if the extensions needed contain others.
boolean isOtherSelected = extensionsToMatch.contains("others");
// get the options that were displayed for attachment types. This will be used to select attachment extensions if the option 'other'
// was selected by the user in the drop down box of export.jsp.
List<String> attachmentTypeOptions = Config.attachmentTypeToExtensions.values().stream().map(x -> Util.tokenize(x, ";")).flatMap(Collection::stream).collect(Collectors.toList());
SearchResult outputSet = filterDocsByDate(inputSet);
// Collection<EmailDocument> eDocs = (Collection) filterDocsByDate (params, new HashSet<>((Collection) docs));
Map<Document, Pair<BodyHLInfo, AttachmentHLInfo>> outputDocs = new HashMap<>();
for (Document k : outputSet.matchedDocs.keySet()) {
EmailDocument ed = (EmailDocument) k;
Set<Blob> matchedBlobs = new HashSet<>();
for (Blob b : ed.attachments) {
if (!Util.filesizeCheck(neededFilesize, b.getSize()))
continue;
if (!(Util.nullOrEmpty(extensionsToMatch))) {
Pair<String, String> pair = Util.splitIntoFileBaseAndExtension(b.getName());
String ext = pair.getSecond();
if (ext == null)
continue;
ext = ext.toLowerCase();
// Proceed to add this attachment only if either
// 1. other is selected and this extension is not present in the list attachmentOptionType, or
// 2. this extension is present in the variable neededExtensions [Q. What if there is a file with extension .others?]
boolean firstcondition = isOtherSelected && !attachmentTypeOptions.contains(ext);
boolean secondcondition = extensionsToMatch.contains(ext);
if (!firstcondition && !secondcondition)
continue;
}
// ok, we've survived all filters, add b
matchedBlobs.add(b);
}
// of this document
if (matchedBlobs.size() != 0) {
BodyHLInfo bhlinfo = inputSet.matchedDocs.get(k).first;
AttachmentHLInfo attachmentHLInfo = inputSet.matchedDocs.get(k).second;
attachmentHLInfo.addMultipleInfo(matchedBlobs);
outputDocs.put(k, new Pair(bhlinfo, attachmentHLInfo));
}
}
// Collections.reverse (allAttachments); // reverse, so most recent attachment is first
return new SearchResult(outputDocs, inputSet.archive, inputSet.queryParams, inputSet.commonHLInfo, inputSet.regexToHighlight);
}
use of edu.stanford.muse.datacache.Blob in project epadd by ePADD.
the class SearchResult method filterForAttachmentEntities.
/**
******************************ATTACHMENT SPECIFIC FILTERS************************************
*/
/**
* returns only those docs with attachments matching params[attachmentEntity]
* (this field is or-delimiter separated)
* Todo: review usage of this and BlobStore.getKeywordsForBlob()
*/
private static SearchResult filterForAttachmentEntities(SearchResult inputSet) {
String val = JSPHelper.getParam(inputSet.queryParams, "attachmentEntity");
if (Util.nullOrEmpty(val))
return inputSet;
val = val.toLowerCase();
Set<String> entities = Util.splitFieldForOr(val);
BlobStore blobStore = inputSet.archive.blobStore;
Map<Document, Pair<BodyHLInfo, AttachmentHLInfo>> outputDocs = new HashMap<>();
inputSet.matchedDocs.keySet().stream().forEach((Document k) -> {
EmailDocument ed = (EmailDocument) k;
// Here.. check for all attachments of ed for match.
Collection<Blob> blobs = ed.attachments;
Set<Blob> matchedBlobs = new HashSet<>();
for (Blob blob : blobs) {
Collection<String> keywords = blobStore.getKeywordsForBlob(blob);
if (keywords != null) {
keywords.retainAll(entities);
if (// it means this blob is of interest, add it to matchedBlobs.
keywords.size() > 0)
matchedBlobs.add(blob);
}
}
// of this document
if (matchedBlobs.size() != 0) {
BodyHLInfo bhlinfo = inputSet.matchedDocs.get(k).first;
AttachmentHLInfo attachmentHLInfo = inputSet.matchedDocs.get(k).second;
attachmentHLInfo.addMultipleInfo(matchedBlobs);
outputDocs.put(k, new Pair(bhlinfo, attachmentHLInfo));
}
});
return new SearchResult(outputDocs, inputSet.archive, inputSet.queryParams, inputSet.commonHLInfo, inputSet.regexToHighlight);
}
use of edu.stanford.muse.datacache.Blob in project epadd by ePADD.
the class SearchResult method searchForTerm.
/**
* returns SearchResult containing docs and attachments matching the given term.
*
* @param inputSet Input search result object on which this term filtering needs to be done
* @param term term to search for
* @return searchresult obj
*/
public static SearchResult searchForTerm(SearchResult inputSet, String term) {
// go in the order subject, body, attachment
Set<Document> docsForTerm = new LinkedHashSet<>();
SearchResult outputSet;
if ("on".equals(JSPHelper.getParam(inputSet.queryParams, "termSubject"))) {
Indexer.QueryOptions options = new Indexer.QueryOptions();
options.setQueryType(Indexer.QueryType.SUBJECT);
docsForTerm.addAll(inputSet.archive.docsForQuery(term, options));
}
if ("on".equals(JSPHelper.getParam(inputSet.queryParams, "termBody"))) {
Indexer.QueryOptions options = new Indexer.QueryOptions();
options.setQueryType(Indexer.QueryType.FULL);
docsForTerm.addAll(inputSet.archive.docsForQuery(term, options));
} else if ("on".equals(JSPHelper.getParam(inputSet.queryParams, "termOriginalBody"))) {
// this is an else because we don't want to look at both body and body original
Indexer.QueryOptions options = new Indexer.QueryOptions();
options.setQueryType(Indexer.QueryType.ORIGINAL);
docsForTerm.addAll(inputSet.archive.docsForQuery(term, options));
}
Map<Document, Pair<BodyHLInfo, AttachmentHLInfo>> attachmentSearchResult;
if ("on".equals(JSPHelper.getParam(inputSet.queryParams, "termAttachments"))) {
attachmentSearchResult = new HashMap<>();
Set<Blob> blobsForTerm = inputSet.archive.blobsForQuery(term);
// iterate over 'all attachments' of docs present in 'inputSet'
inputSet.matchedDocs.keySet().stream().forEach(d -> {
EmailDocument edoc = (EmailDocument) d;
Set<Blob> commonAttachments = new HashSet<>(edoc.attachments);
commonAttachments.retainAll(blobsForTerm);
// 0 yes term found in body but not in attachment. keep its info in bodyHLInfo only.
if (commonAttachments.size() > 0) {
if (docsForTerm.contains(edoc)) {
BodyHLInfo bhlinfo = inputSet.matchedDocs.get(d).first;
AttachmentHLInfo attachmentHLInfo = inputSet.matchedDocs.get(d).second;
// it means the body and the attachment matched the term. add this information in body highliter/attachment highlighter
bhlinfo.addTerm(term);
attachmentHLInfo.addMultipleInfo(commonAttachments);
attachmentSearchResult.put(d, new Pair(bhlinfo, attachmentHLInfo));
} else {
// means only attachment matched the term. add this information in attachment highlighter
BodyHLInfo bhlinfo = inputSet.matchedDocs.get(d).first;
AttachmentHLInfo attachmentHLInfo = inputSet.matchedDocs.get(d).second;
attachmentHLInfo.addMultipleInfo(commonAttachments);
attachmentSearchResult.put(d, new Pair(bhlinfo, attachmentHLInfo));
}
} else if (commonAttachments.size() == 0 && docsForTerm.contains(d)) {
// means the document had the term only in its body and not in the attachment.
BodyHLInfo bhlinfo = inputSet.matchedDocs.get(d).first;
AttachmentHLInfo attachmentHLInfo = inputSet.matchedDocs.get(d).second;
bhlinfo.addTerm(term);
attachmentSearchResult.put(d, new Pair(bhlinfo, attachmentHLInfo));
}
});
outputSet = new SearchResult(attachmentSearchResult, inputSet.archive, inputSet.queryParams, inputSet.commonHLInfo, inputSet.regexToHighlight);
} else {
// just retain only those document in inputSet.matchedDocs which are present in docsForTerm set.
inputSet.matchedDocs.keySet().retainAll(docsForTerm);
outputSet = inputSet;
}
// blobsForTerm.retainAll(inputSet.matchInAttachment.second);
/*
//query for the docs where these blobs are present. Note that we do not need to search for these blobs in all docs
//only those present in the input search object (matchInAttachment.first) are sufficient as by our invariant of
//matchInAttachment, the set of documents where matchInAttachment.second are present is same as matchInAttachment.first.
Set<Document> blobDocsForTerm = (Set<Document>) EmailUtils.getDocsForAttachments((Collection) inputSet.matchInAttachment.first, blobsForTerm);
attachmentSearchResult = new Pair(blobDocsForTerm,blobsForTerm);
*/
// Add term to common highlighting info (as it is without parsing) for highlighting.
// The term will be in lucene syntax (OR,AND etc.)
// lucene highlighter will take care of highlighting that.
outputSet.commonHLInfo.addTerm(term);
return outputSet;
}
Aggregations