use of org.opensearch.common.io.stream.StreamOutput in project OpenSearch by opensearch-project.
the class SearchContextId method encode.
public static String encode(List<SearchPhaseResult> searchPhaseResults, Map<String, AliasFilter> aliasFilter, Version version) {
final Map<ShardId, SearchContextIdForNode> shards = new HashMap<>();
for (SearchPhaseResult searchPhaseResult : searchPhaseResults) {
final SearchShardTarget target = searchPhaseResult.getSearchShardTarget();
shards.put(target.getShardId(), new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()));
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(version);
Version.writeVersion(version, out);
out.writeMap(shards, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o));
out.writeMap(aliasFilter, StreamOutput::writeString, (o, v) -> v.writeTo(o));
return Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(out.bytes()));
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
use of org.opensearch.common.io.stream.StreamOutput in project OpenSearch by opensearch-project.
the class PublicationTransportHandler method serializeDiffClusterState.
private static BytesReference serializeDiffClusterState(Diff<ClusterState> diff, Version nodeVersion) throws IOException {
final BytesStreamOutput bStream = new BytesStreamOutput();
try (StreamOutput stream = new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(bStream))) {
stream.setVersion(nodeVersion);
stream.writeBoolean(false);
diff.writeTo(stream);
}
return bStream.bytes();
}
use of org.opensearch.common.io.stream.StreamOutput in project OpenSearch by opensearch-project.
the class SearchHit method writeTo.
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeFloat(score);
out.writeOptionalText(id);
if (out.getVersion().before(Version.V_2_0_0)) {
out.writeOptionalText(SINGLE_MAPPING_TYPE);
}
out.writeOptionalWriteable(nestedIdentity);
out.writeLong(version);
out.writeZLong(seqNo);
out.writeVLong(primaryTerm);
out.writeBytesReference(source);
if (explanation == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
writeExplanation(out, explanation);
}
if (out.getVersion().onOrAfter(LegacyESVersion.V_7_8_0)) {
out.writeMap(documentFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream));
out.writeMap(metaFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream));
} else {
writeFields(out, this.getFields());
}
if (highlightFields == null) {
out.writeVInt(0);
} else {
out.writeVInt(highlightFields.size());
for (HighlightField highlightField : highlightFields.values()) {
highlightField.writeTo(out);
}
}
sortValues.writeTo(out);
if (matchedQueries.length == 0) {
out.writeVInt(0);
} else {
out.writeVInt(matchedQueries.length);
for (String matchedFilter : matchedQueries) {
out.writeString(matchedFilter);
}
}
out.writeOptionalWriteable(shard);
if (innerHits == null) {
out.writeVInt(0);
} else {
out.writeVInt(innerHits.size());
for (Map.Entry<String, SearchHits> entry : innerHits.entrySet()) {
out.writeString(entry.getKey());
entry.getValue().writeTo(out);
}
}
}
use of org.opensearch.common.io.stream.StreamOutput in project OpenSearch by opensearch-project.
the class BucketHelpersTests method testReturnMultiValueObject.
public void testReturnMultiValueObject() {
MultiBucketsAggregation agg = new MultiBucketsAggregation() {
@Override
public List<? extends Bucket> getBuckets() {
return null;
}
@Override
public String getName() {
return "foo";
}
@Override
public String getType() {
return null;
}
@Override
public Map<String, Object> getMetadata() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return null;
}
};
InternalMultiBucketAggregation.InternalBucket bucket = new InternalMultiBucketAggregation.InternalBucket() {
@Override
public void writeTo(StreamOutput out) throws IOException {
}
@Override
public Object getKey() {
return null;
}
@Override
public String getKeyAsString() {
return null;
}
@Override
public long getDocCount() {
return 0;
}
@Override
public Aggregations getAggregations() {
return null;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return null;
}
@Override
public Object getProperty(String containingAggName, List<String> path) {
return mock(InternalTDigestPercentiles.class);
}
};
AggregationExecutionException e = expectThrows(AggregationExecutionException.class, () -> BucketHelpers.resolveBucketValue(agg, bucket, "foo>bar", BucketHelpers.GapPolicy.SKIP));
assertThat(e.getMessage(), equalTo("buckets_path must reference either a number value or a single value numeric " + "metric aggregation, but [foo] contains multiple values. Please specify which to use."));
}
use of org.opensearch.common.io.stream.StreamOutput in project OpenSearch by opensearch-project.
the class TransportDecompressorTests method testIncrementalMultiPageCompression.
public void testIncrementalMultiPageCompression() throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
try (StreamOutput deflateStream = new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(output)))) {
for (int i = 0; i < 10000; ++i) {
deflateStream.writeInt(i);
}
}
BytesReference bytes = output.bytes();
TransportDecompressor decompressor = new TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE);
int split1 = (int) (bytes.length() * 0.3);
int split2 = (int) (bytes.length() * 0.65);
BytesReference inbound1 = bytes.slice(0, split1);
BytesReference inbound2 = bytes.slice(split1, split2 - split1);
BytesReference inbound3 = bytes.slice(split2, bytes.length() - split2);
int bytesConsumed1 = decompressor.decompress(inbound1);
assertEquals(inbound1.length(), bytesConsumed1);
assertFalse(decompressor.isEOS());
int bytesConsumed2 = decompressor.decompress(inbound2);
assertEquals(inbound2.length(), bytesConsumed2);
assertFalse(decompressor.isEOS());
int bytesConsumed3 = decompressor.decompress(inbound3);
assertEquals(inbound3.length(), bytesConsumed3);
assertTrue(decompressor.isEOS());
ReleasableBytesReference reference1 = decompressor.pollDecompressedPage();
ReleasableBytesReference reference2 = decompressor.pollDecompressedPage();
ReleasableBytesReference reference3 = decompressor.pollDecompressedPage();
assertNull(decompressor.pollDecompressedPage());
BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3);
assertEquals(4 * 10000, composite.length());
StreamInput streamInput = composite.streamInput();
for (int i = 0; i < 10000; ++i) {
assertEquals(i, streamInput.readInt());
}
Releasables.close(reference1, reference2, reference3);
}
}
Aggregations