use of org.apache.lucene.util.BytesRefBuilder in project elasticsearch by elastic.
the class BlobStoreRepository method maybeRecalculateMetadataHash.
/**
* This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them.
* The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
* comparison of the files on a per-segment / per-commit level.
*/
private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Exception {
final StoreFileMetaData metadata;
if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
// we might have multiple parts even though the file is small... make sure we read all of it.
try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
BytesRefBuilder builder = new BytesRefBuilder();
Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
// reset the file infos metadata hash
BytesRef hash = fileInfo.metadata().hash();
assert hash.length == 0;
hash.bytes = builder.bytes();
hash.offset = 0;
hash.length = builder.length();
}
}
}
}
use of org.apache.lucene.util.BytesRefBuilder in project elasticsearch by elastic.
the class ScriptSortBuilder method build.
@Override
public SortFieldAndFormat build(QueryShardContext context) throws IOException {
final SearchScript searchScript = context.getSearchScript(script, ScriptContext.Standard.SEARCH);
MultiValueMode valueMode = null;
if (sortMode != null) {
valueMode = MultiValueMode.fromString(sortMode.toString());
}
boolean reverse = (order == SortOrder.DESC);
if (valueMode == null) {
valueMode = reverse ? MultiValueMode.MAX : MultiValueMode.MIN;
}
final Nested nested = resolveNested(context, nestedPath, nestedFilter);
final IndexFieldData.XFieldComparatorSource fieldComparatorSource;
switch(type) {
case STRING:
fieldComparatorSource = new BytesRefFieldComparatorSource(null, null, valueMode, nested) {
LeafSearchScript leafScript;
@Override
protected SortedBinaryDocValues getValues(LeafReaderContext context) throws IOException {
leafScript = searchScript.getLeafSearchScript(context);
final BinaryDocValues values = new BinaryDocValues() {
final BytesRefBuilder spare = new BytesRefBuilder();
@Override
public BytesRef get(int docID) {
leafScript.setDocument(docID);
spare.copyChars(leafScript.run().toString());
return spare.get();
}
};
return FieldData.singleton(values, null);
}
@Override
protected void setScorer(Scorer scorer) {
leafScript.setScorer(scorer);
}
};
break;
case NUMBER:
fieldComparatorSource = new DoubleValuesComparatorSource(null, Double.MAX_VALUE, valueMode, nested) {
LeafSearchScript leafScript;
@Override
protected SortedNumericDoubleValues getValues(LeafReaderContext context) throws IOException {
leafScript = searchScript.getLeafSearchScript(context);
final NumericDoubleValues values = new NumericDoubleValues() {
@Override
public double get(int docID) {
leafScript.setDocument(docID);
return leafScript.runAsDouble();
}
};
return FieldData.singleton(values, null);
}
@Override
protected void setScorer(Scorer scorer) {
leafScript.setScorer(scorer);
}
};
break;
default:
throw new QueryShardException(context, "custom script sort type [" + type + "] not supported");
}
return new SortFieldAndFormat(new SortField("_script", fieldComparatorSource, reverse), DocValueFormat.RAW);
}
use of org.apache.lucene.util.BytesRefBuilder in project elasticsearch by elastic.
the class TermSuggester method queryTerms.
private static List<Token> queryTerms(SuggestionContext suggestion, CharsRefBuilder spare) throws IOException {
final List<Token> result = new ArrayList<>();
final String field = suggestion.getField();
DirectCandidateGenerator.analyze(suggestion.getAnalyzer(), suggestion.getText(), field, new DirectCandidateGenerator.TokenConsumer() {
@Override
public void nextToken() {
Term term = new Term(field, BytesRef.deepCopyOf(fillBytesRef(new BytesRefBuilder())));
result.add(new Token(term, offsetAttr.startOffset(), offsetAttr.endOffset()));
}
}, spare);
return result;
}
use of org.apache.lucene.util.BytesRefBuilder in project elasticsearch by elastic.
the class Correction method join.
public BytesRef join(BytesRef separator, BytesRefBuilder result, BytesRef preTag, BytesRef postTag) {
BytesRef[] toJoin = new BytesRef[this.candidates.length];
int len = separator.length * this.candidates.length - 1;
for (int i = 0; i < toJoin.length; i++) {
Candidate candidate = candidates[i];
if (preTag == null || candidate.userInput) {
toJoin[i] = candidate.term;
} else {
final int maxLen = preTag.length + postTag.length + candidate.term.length;
// just allocate once
final BytesRefBuilder highlighted = new BytesRefBuilder();
highlighted.grow(maxLen);
if (i == 0 || candidates[i - 1].userInput) {
highlighted.append(preTag);
}
highlighted.append(candidate.term);
if (toJoin.length == i + 1 || candidates[i + 1].userInput) {
highlighted.append(postTag);
}
toJoin[i] = highlighted.get();
}
len += toJoin[i].length;
}
result.grow(len);
return WordScorer.join(separator, result, toJoin);
}
use of org.apache.lucene.util.BytesRefBuilder in project stargate-core by tuplejump.
the class LuceneUtils method tsTerm.
public static Term tsTerm(long ts) {
BytesRefBuilder tsBytes = new BytesRefBuilder();
NumericUtils.longToPrefixCoded(ts, NumericUtils.PRECISION_STEP_DEFAULT, tsBytes);
return new Term(CF_TS_INDEXED, tsBytes);
}
Aggregations