use of edu.uci.ics.textdb.api.exception.DataFlowException in project textdb by TextDB.
the class FuzzyTokenMatcher method processOneInputTuple.
@Override
public Tuple processOneInputTuple(Tuple inputTuple) throws TextDBException {
ListField<Span> payloadField = inputTuple.getField(SchemaConstants.PAYLOAD);
List<Span> payload = payloadField.getValue();
List<Span> relevantSpans = filterRelevantSpans(payload);
List<Span> matchResults = new ArrayList<>();
/*
* The source operator returns spans even for those fields which did not
* satisfy the threshold criterion. So if two attributes A,B have 10 and
* 5 matching tokens, and we set threshold to 10, the number of spans
* returned is 15. So we need to filter those 5 spans for attribute B.
*/
for (String attributeName : this.predicate.getAttributeNames()) {
AttributeType attributeType = this.inputSchema.getAttribute(attributeName).getAttributeType();
// types other than TEXT and STRING: throw Exception for now
if (attributeType != AttributeType.TEXT && attributeType != AttributeType.STRING) {
throw new DataFlowException("FuzzyTokenMatcher: Fields other than TEXT or STRING are not supported");
}
List<Span> fieldSpans = relevantSpans.stream().filter(span -> span.getAttributeName().equals(attributeName)).filter(span -> predicate.getQueryTokens().contains(span.getKey())).collect(Collectors.toList());
if (fieldSpans.size() >= predicate.getThreshold()) {
matchResults.addAll(fieldSpans);
}
}
if (matchResults.isEmpty()) {
return null;
}
ListField<Span> spanListField = inputTuple.getField(predicate.getSpanListName());
List<Span> spanList = spanListField.getValue();
spanList.addAll(matchResults);
return inputTuple;
}
use of edu.uci.ics.textdb.api.exception.DataFlowException in project textdb by TextDB.
the class FuzzyTokenMatcherSourceOperator method createLuceneQueryObject.
public static Query createLuceneQueryObject(FuzzyTokenPredicate predicate) throws DataFlowException {
try {
/*
* By default the boolean query takes 1024 # of clauses as the max
* limit. Since our input query has no limitaion on the number of
* tokens, we have to put a check.
*/
if (predicate.getThreshold() > 1024)
BooleanQuery.setMaxClauseCount(predicate.getThreshold() + 1);
BooleanQuery.Builder builder = new BooleanQuery.Builder();
builder.setMinimumNumberShouldMatch(predicate.getThreshold());
MultiFieldQueryParser qp = new MultiFieldQueryParser(predicate.getAttributeNames().stream().toArray(String[]::new), LuceneAnalyzerConstants.getLuceneAnalyzer(predicate.getLuceneAnalyzerStr()));
for (String s : predicate.getQueryTokens()) {
builder.add(qp.parse(s), Occur.SHOULD);
}
return builder.build();
} catch (ParseException e) {
throw new DataFlowException(e);
}
}
use of edu.uci.ics.textdb.api.exception.DataFlowException in project textdb by TextDB.
the class Join method close.
@Override
public void close() throws TextDBException {
if (cursor == CLOSED) {
return;
}
try {
innerOperator.close();
outerOperator.close();
} catch (Exception e) {
throw new DataFlowException(e.getMessage(), e);
}
// Set the inner tuple list back to null on close.
innerTupleList = null;
innerTupleListCursor = 0;
cursor = CLOSED;
}
use of edu.uci.ics.textdb.api.exception.DataFlowException in project textdb by TextDB.
the class NlpSentimentOperator method open.
@Override
public void open() throws TextDBException {
if (cursor != CLOSED) {
return;
}
if (inputOperator == null) {
throw new DataFlowException(ErrorMessages.INPUT_OPERATOR_NOT_SPECIFIED);
}
inputOperator.open();
Schema inputSchema = inputOperator.getOutputSchema();
// check if input schema is present
if (!inputSchema.containsField(predicate.getInputAttributeName())) {
throw new RuntimeException(String.format("input attribute %s is not in the input schema %s", predicate.getInputAttributeName(), inputSchema.getAttributeNames()));
}
// check if attribute type is valid
AttributeType inputAttributeType = inputSchema.getAttribute(predicate.getInputAttributeName()).getAttributeType();
boolean isValidType = inputAttributeType.equals(AttributeType.STRING) || inputAttributeType.equals(AttributeType.TEXT);
if (!isValidType) {
throw new RuntimeException(String.format("input attribute %s must have type String or Text, its actual type is %s", predicate.getInputAttributeName(), inputAttributeType));
}
// generate output schema by transforming the input schema
outputSchema = transformSchema(inputOperator.getOutputSchema());
cursor = OPENED;
// setup NLP sentiment analysis pipeline
Properties props = new Properties();
props.setProperty("annotators", "tokenize, ssplit, parse, sentiment");
sentimentPipeline = new StanfordCoreNLP(props);
}
use of edu.uci.ics.textdb.api.exception.DataFlowException in project textdb by TextDB.
the class NlpSplitOperator method open.
@Override
public void open() throws TextDBException {
if (cursor != CLOSED) {
return;
}
if (inputOperator == null) {
throw new DataFlowException(ErrorMessages.INPUT_OPERATOR_NOT_SPECIFIED);
}
inputOperator.open();
Schema inputSchema = inputOperator.getOutputSchema();
// check if input schema is present
if (!inputSchema.containsField(predicate.getInputAttributeName())) {
throw new DataFlowException(String.format("input attribute %s is not in the input schema %s", predicate.getInputAttributeName(), inputSchema.getAttributeNames()));
}
// check if attribute type is valid
AttributeType inputAttributeType = inputSchema.getAttribute(predicate.getInputAttributeName()).getAttributeType();
boolean isValidType = inputAttributeType.equals(AttributeType.STRING) || inputAttributeType.equals(AttributeType.TEXT);
if (!isValidType) {
throw new DataFlowException(String.format("input attribute %s must have type String or Text, its actual type is %s", predicate.getInputAttributeName(), inputAttributeType));
}
// generate output schema by transforming the input schema based on what output format
// is chosen (OneToOne vs. OneToMany)
outputSchema = transformSchema(inputOperator.getOutputSchema());
cursor = OPENED;
}
Aggregations