use of org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode in project lucene-solr by apache.
the class AnalyzerQueryNodeProcessor method postProcessNode.
@Override
protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException {
if (node instanceof TextableQueryNode && !(node instanceof WildcardQueryNode) && !(node instanceof FuzzyQueryNode) && !(node instanceof RegexpQueryNode) && !(node.getParent() instanceof RangeQueryNode)) {
FieldQueryNode fieldNode = ((FieldQueryNode) node);
String text = fieldNode.getTextAsString();
String field = fieldNode.getFieldAsString();
CachingTokenFilter buffer = null;
PositionIncrementAttribute posIncrAtt = null;
int numTokens = 0;
int positionCount = 0;
boolean severalTokensAtSamePosition = false;
try {
try (TokenStream source = this.analyzer.tokenStream(field, text)) {
buffer = new CachingTokenFilter(source);
buffer.reset();
if (buffer.hasAttribute(PositionIncrementAttribute.class)) {
posIncrAtt = buffer.getAttribute(PositionIncrementAttribute.class);
}
try {
while (buffer.incrementToken()) {
numTokens++;
int positionIncrement = (posIncrAtt != null) ? posIncrAtt.getPositionIncrement() : 1;
if (positionIncrement != 0) {
positionCount += positionIncrement;
} else {
severalTokensAtSamePosition = true;
}
}
} catch (IOException e) {
// ignore
}
// rewind the buffer stream
//will never through on subsequent reset calls
buffer.reset();
} catch (IOException e) {
throw new RuntimeException(e);
}
if (!buffer.hasAttribute(CharTermAttribute.class)) {
return new NoTokenFoundQueryNode();
}
CharTermAttribute termAtt = buffer.getAttribute(CharTermAttribute.class);
if (numTokens == 0) {
return new NoTokenFoundQueryNode();
} else if (numTokens == 1) {
String term = null;
try {
boolean hasNext;
hasNext = buffer.incrementToken();
assert hasNext == true;
term = termAtt.toString();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
fieldNode.setText(term);
return fieldNode;
} else if (severalTokensAtSamePosition || !(node instanceof QuotedFieldQueryNode)) {
if (positionCount == 1 || !(node instanceof QuotedFieldQueryNode)) {
if (positionCount == 1) {
// simple case: only one position, with synonyms
LinkedList<QueryNode> children = new LinkedList<>();
for (int i = 0; i < numTokens; i++) {
String term = null;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
term = termAtt.toString();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
children.add(new FieldQueryNode(field, term, -1, -1));
}
return new GroupQueryNode(new SynonymQueryNode(children));
} else {
// multiple positions
QueryNode q = new BooleanQueryNode(Collections.<QueryNode>emptyList());
QueryNode currentQuery = null;
for (int i = 0; i < numTokens; i++) {
String term = null;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
term = termAtt.toString();
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (posIncrAtt != null && posIncrAtt.getPositionIncrement() == 0) {
if (!(currentQuery instanceof BooleanQueryNode)) {
QueryNode t = currentQuery;
currentQuery = new SynonymQueryNode(Collections.<QueryNode>emptyList());
((BooleanQueryNode) currentQuery).add(t);
}
((BooleanQueryNode) currentQuery).add(new FieldQueryNode(field, term, -1, -1));
} else {
if (currentQuery != null) {
if (this.defaultOperator == Operator.OR) {
q.add(currentQuery);
} else {
q.add(new ModifierQueryNode(currentQuery, Modifier.MOD_REQ));
}
}
currentQuery = new FieldQueryNode(field, term, -1, -1);
}
}
if (this.defaultOperator == Operator.OR) {
q.add(currentQuery);
} else {
q.add(new ModifierQueryNode(currentQuery, Modifier.MOD_REQ));
}
if (q instanceof BooleanQueryNode) {
q = new GroupQueryNode(q);
}
return q;
}
} else {
// phrase query:
MultiPhraseQueryNode mpq = new MultiPhraseQueryNode();
List<FieldQueryNode> multiTerms = new ArrayList<>();
int position = -1;
int i = 0;
int termGroupCount = 0;
for (; i < numTokens; i++) {
String term = null;
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
term = termAtt.toString();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
if (positionIncrement > 0 && multiTerms.size() > 0) {
for (FieldQueryNode termNode : multiTerms) {
if (this.positionIncrementsEnabled) {
termNode.setPositionIncrement(position);
} else {
termNode.setPositionIncrement(termGroupCount);
}
mpq.add(termNode);
}
// Only increment once for each "group" of
// terms that were in the same position:
termGroupCount++;
multiTerms.clear();
}
position += positionIncrement;
multiTerms.add(new FieldQueryNode(field, term, -1, -1));
}
for (FieldQueryNode termNode : multiTerms) {
if (this.positionIncrementsEnabled) {
termNode.setPositionIncrement(position);
} else {
termNode.setPositionIncrement(termGroupCount);
}
mpq.add(termNode);
}
return mpq;
}
} else {
TokenizedPhraseQueryNode pq = new TokenizedPhraseQueryNode();
int position = -1;
for (int i = 0; i < numTokens; i++) {
String term = null;
int positionIncrement = 1;
try {
boolean hasNext = buffer.incrementToken();
assert hasNext == true;
term = termAtt.toString();
if (posIncrAtt != null) {
positionIncrement = posIncrAtt.getPositionIncrement();
}
} catch (IOException e) {
// safe to ignore, because we know the number of tokens
}
FieldQueryNode newFieldNode = new FieldQueryNode(field, term, -1, -1);
if (this.positionIncrementsEnabled) {
position += positionIncrement;
newFieldNode.setPositionIncrement(position);
} else {
newFieldNode.setPositionIncrement(i);
}
pq.add(newFieldNode);
}
return pq;
}
} finally {
if (buffer != null) {
try {
buffer.close();
} catch (IOException e) {
// safe to ignore
}
}
}
}
return node;
}
use of org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode in project lucene-solr by apache.
the class PointQueryNodeProcessor method postProcessNode.
@Override
protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException {
if (node instanceof FieldQueryNode && !(node.getParent() instanceof RangeQueryNode)) {
QueryConfigHandler config = getQueryConfigHandler();
if (config != null) {
FieldQueryNode fieldNode = (FieldQueryNode) node;
FieldConfig fieldConfig = config.getFieldConfig(fieldNode.getFieldAsString());
if (fieldConfig != null) {
PointsConfig numericConfig = fieldConfig.get(ConfigurationKeys.POINTS_CONFIG);
if (numericConfig != null) {
NumberFormat numberFormat = numericConfig.getNumberFormat();
String text = fieldNode.getTextAsString();
Number number = null;
if (text.length() > 0) {
try {
number = numberFormat.parse(text);
} catch (ParseException e) {
throw new QueryNodeParseException(new MessageImpl(QueryParserMessages.COULD_NOT_PARSE_NUMBER, fieldNode.getTextAsString(), numberFormat.getClass().getCanonicalName()), e);
}
if (Integer.class.equals(numericConfig.getType())) {
number = number.intValue();
} else if (Long.class.equals(numericConfig.getType())) {
number = number.longValue();
} else if (Double.class.equals(numericConfig.getType())) {
number = number.doubleValue();
} else if (Float.class.equals(numericConfig.getType())) {
number = number.floatValue();
}
} else {
throw new QueryNodeParseException(new MessageImpl(QueryParserMessages.NUMERIC_CANNOT_BE_EMPTY, fieldNode.getFieldAsString()));
}
PointQueryNode lowerNode = new PointQueryNode(fieldNode.getField(), number, numberFormat);
PointQueryNode upperNode = new PointQueryNode(fieldNode.getField(), number, numberFormat);
return new PointRangeQueryNode(lowerNode, upperNode, true, true, numericConfig);
}
}
}
}
return node;
}
use of org.apache.lucene.queryparser.flexible.core.nodes.FieldQueryNode in project lucene-solr by apache.
the class TermRangeQueryNodeProcessor method postProcessNode.
@Override
protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException {
if (node instanceof TermRangeQueryNode) {
TermRangeQueryNode termRangeNode = (TermRangeQueryNode) node;
FieldQueryNode upper = termRangeNode.getUpperBound();
FieldQueryNode lower = termRangeNode.getLowerBound();
DateTools.Resolution dateRes = null;
boolean inclusive = false;
Locale locale = getQueryConfigHandler().get(ConfigurationKeys.LOCALE);
if (locale == null) {
locale = Locale.getDefault();
}
TimeZone timeZone = getQueryConfigHandler().get(ConfigurationKeys.TIMEZONE);
if (timeZone == null) {
timeZone = TimeZone.getDefault();
}
CharSequence field = termRangeNode.getField();
String fieldStr = null;
if (field != null) {
fieldStr = field.toString();
}
FieldConfig fieldConfig = getQueryConfigHandler().getFieldConfig(fieldStr);
if (fieldConfig != null) {
dateRes = fieldConfig.get(ConfigurationKeys.DATE_RESOLUTION);
}
if (termRangeNode.isUpperInclusive()) {
inclusive = true;
}
String part1 = lower.getTextAsString();
String part2 = upper.getTextAsString();
try {
DateFormat df = DateFormat.getDateInstance(DateFormat.SHORT, locale);
df.setLenient(true);
if (part1.length() > 0) {
Date d1 = df.parse(part1);
part1 = DateTools.dateToString(d1, dateRes);
lower.setText(part1);
}
if (part2.length() > 0) {
Date d2 = df.parse(part2);
if (inclusive) {
// The user can only specify the date, not the time, so make sure
// the time is set to the latest possible time of that date to
// really
// include all documents:
Calendar cal = Calendar.getInstance(timeZone, locale);
cal.setTime(d2);
cal.set(Calendar.HOUR_OF_DAY, 23);
cal.set(Calendar.MINUTE, 59);
cal.set(Calendar.SECOND, 59);
cal.set(Calendar.MILLISECOND, 999);
d2 = cal.getTime();
}
part2 = DateTools.dateToString(d2, dateRes);
upper.setText(part2);
}
} catch (Exception e) {
// not a date
Analyzer analyzer = getQueryConfigHandler().get(ConfigurationKeys.ANALYZER);
if (analyzer != null) {
// because we call utf8ToString, this will only work with the default TermToBytesRefAttribute
part1 = analyzer.normalize(lower.getFieldAsString(), part1).utf8ToString();
part2 = analyzer.normalize(lower.getFieldAsString(), part2).utf8ToString();
lower.setText(part1);
upper.setText(part2);
}
}
}
return node;
}
Aggregations