use of org.opensearch.common.io.stream.OutputStreamStreamOutput in project OpenSearch by opensearch-project.
the class TermVectorsUnitTests method testStreamRequestLegacyVersion.
public void testStreamRequestLegacyVersion() throws IOException {
for (int i = 0; i < 10; i++) {
TermVectorsRequest request = new TermVectorsRequest("index", "id");
request.offsets(random().nextBoolean());
request.fieldStatistics(random().nextBoolean());
request.payloads(random().nextBoolean());
request.positions(random().nextBoolean());
request.termStatistics(random().nextBoolean());
String pref = random().nextBoolean() ? "somePreference" : null;
request.preference(pref);
request.doc(new BytesArray("{}"), randomBoolean(), XContentType.JSON);
// write using older version which contains types
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
out.setVersion(LegacyESVersion.V_7_2_0);
request.writeTo(out);
// First check the type on the stream was written as "_doc" by manually parsing the stream until the type
ByteArrayInputStream opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer);
TaskId.readFromStream(opensearchBuffer);
if (opensearchBuffer.readBoolean()) {
new ShardId(opensearchBuffer);
}
opensearchBuffer.readOptionalString();
assertThat(opensearchBuffer.readString(), equalTo("_doc"));
// now read the stream as normal to check it is parsed correct if received from an older node
opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer);
opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0);
TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer);
assertThat(request.offsets(), equalTo(req2.offsets()));
assertThat(request.fieldStatistics(), equalTo(req2.fieldStatistics()));
assertThat(request.payloads(), equalTo(req2.payloads()));
assertThat(request.positions(), equalTo(req2.positions()));
assertThat(request.termStatistics(), equalTo(req2.termStatistics()));
assertThat(request.preference(), equalTo(pref));
assertThat(request.routing(), equalTo(null));
assertEquals(new BytesArray("{}"), request.doc());
assertEquals(XContentType.JSON, request.xContentType());
}
}
use of org.opensearch.common.io.stream.OutputStreamStreamOutput in project OpenSearch by opensearch-project.
the class TermVectorsUnitTests method testStreamResponse.
public void testStreamResponse() throws Exception {
TermVectorsResponse outResponse = new TermVectorsResponse("a", "c");
outResponse.setExists(true);
writeStandardTermVector(outResponse);
// write
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
outResponse.writeTo(out);
// read
ByteArrayInputStream esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
InputStreamStreamInput esBuffer = new InputStreamStreamInput(esInBuffer);
TermVectorsResponse inResponse = new TermVectorsResponse(esBuffer);
// see if correct
checkIfStandardTermVector(inResponse);
outResponse = new TermVectorsResponse("a", "c");
writeEmptyTermVector(outResponse);
// write
outBuffer = new ByteArrayOutputStream();
out = new OutputStreamStreamOutput(outBuffer);
outResponse.writeTo(out);
// read
esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
esBuffer = new InputStreamStreamInput(esInBuffer);
inResponse = new TermVectorsResponse(esBuffer);
assertTrue(inResponse.isExists());
}
use of org.opensearch.common.io.stream.OutputStreamStreamOutput in project OpenSearch by opensearch-project.
the class SignificanceHeuristicTests method testStreamResponse.
// test that stream output can actually be read - does not replace bwc test
public void testStreamResponse() throws Exception {
Version version = randomVersion(random());
InternalMappedSignificantTerms<?, ?> sigTerms = getRandomSignificantTerms(getRandomSignificanceheuristic());
// write
ByteArrayOutputStream outBuffer = new ByteArrayOutputStream();
OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer);
out.setVersion(version);
if (version.before(LegacyESVersion.V_7_8_0)) {
sigTerms.mergePipelineTreeForBWCSerialization(PipelineAggregator.PipelineTree.EMPTY);
}
out.writeNamedWriteable(sigTerms);
// read
ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray());
StreamInput in = new InputStreamStreamInput(inBuffer);
// populates the registry through side effects
SearchModule searchModule = new SearchModule(Settings.EMPTY, emptyList());
NamedWriteableRegistry registry = new NamedWriteableRegistry(searchModule.getNamedWriteables());
in = new NamedWriteableAwareStreamInput(in, registry);
in.setVersion(version);
InternalMappedSignificantTerms<?, ?> read = (InternalMappedSignificantTerms<?, ?>) in.readNamedWriteable(InternalAggregation.class);
assertEquals(sigTerms.significanceHeuristic, read.significanceHeuristic);
SignificantTerms.Bucket originalBucket = sigTerms.getBuckets().get(0);
SignificantTerms.Bucket streamedBucket = read.getBuckets().get(0);
assertThat(originalBucket.getKeyAsString(), equalTo(streamedBucket.getKeyAsString()));
assertThat(originalBucket.getSupersetDf(), equalTo(streamedBucket.getSupersetDf()));
assertThat(originalBucket.getSubsetDf(), equalTo(streamedBucket.getSubsetDf()));
assertThat(streamedBucket.getSubsetSize(), equalTo(10L));
assertThat(streamedBucket.getSupersetSize(), equalTo(20L));
}
use of org.opensearch.common.io.stream.OutputStreamStreamOutput in project OpenSearch by opensearch-project.
the class TransportDecompressorTests method testMultiPageCompression.
public void testMultiPageCompression() throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
try (StreamOutput deflateStream = new OutputStreamStreamOutput(CompressorFactory.COMPRESSOR.threadLocalOutputStream(Streams.flushOnCloseStream(output)))) {
for (int i = 0; i < 10000; ++i) {
deflateStream.writeInt(i);
}
}
BytesReference bytes = output.bytes();
TransportDecompressor decompressor = new TransportDecompressor(PageCacheRecycler.NON_RECYCLING_INSTANCE);
int bytesConsumed = decompressor.decompress(bytes);
assertEquals(bytes.length(), bytesConsumed);
assertTrue(decompressor.isEOS());
ReleasableBytesReference reference1 = decompressor.pollDecompressedPage();
ReleasableBytesReference reference2 = decompressor.pollDecompressedPage();
ReleasableBytesReference reference3 = decompressor.pollDecompressedPage();
assertNull(decompressor.pollDecompressedPage());
BytesReference composite = CompositeBytesReference.of(reference1, reference2, reference3);
assertEquals(4 * 10000, composite.length());
StreamInput streamInput = composite.streamInput();
for (int i = 0; i < 10000; ++i) {
assertEquals(i, streamInput.readInt());
}
Releasables.close(reference1, reference2, reference3);
}
}
use of org.opensearch.common.io.stream.OutputStreamStreamOutput in project OpenSearch by opensearch-project.
the class TranslogHeaderTests method writeHeaderWithoutTerm.
static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException {
final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel));
CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS);
final BytesRef uuid = new BytesRef(translogUUID);
out.writeInt(uuid.length);
out.writeBytes(uuid.bytes, uuid.offset, uuid.length);
channel.force(true);
assertThat(channel.position(), equalTo(43L));
}
Aggregations