use of org.opensearch.common.io.stream.StreamInput in project OpenSearch by opensearch-project.
the class PercolateQueryBuilder method createStore.
static PercolateQuery.QueryStore createStore(MappedFieldType queryBuilderFieldType, QueryShardContext context) {
Version indexVersion = context.indexVersionCreated();
NamedWriteableRegistry registry = context.getWriteableRegistry();
return ctx -> {
LeafReader leafReader = ctx.reader();
BinaryDocValues binaryDocValues = leafReader.getBinaryDocValues(queryBuilderFieldType.name());
if (binaryDocValues == null) {
return docId -> null;
}
return docId -> {
if (binaryDocValues.advanceExact(docId)) {
BytesRef qbSource = binaryDocValues.binaryValue();
try (InputStream in = new ByteArrayInputStream(qbSource.bytes, qbSource.offset, qbSource.length)) {
try (StreamInput input = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(in, qbSource.length), registry)) {
input.setVersion(indexVersion);
// Query builder's content is stored via BinaryFieldMapper, which has a custom encoding
// to encode multiple binary values into a single binary doc values field.
// This is the reason we need to first need to read the number of values and
// then the length of the field value in bytes.
int numValues = input.readVInt();
assert numValues == 1;
int valueLength = input.readVInt();
assert valueLength > 0;
QueryBuilder queryBuilder = input.readNamedWriteable(QueryBuilder.class);
assert in.read() == -1;
queryBuilder = Rewriteable.rewrite(queryBuilder, context);
return queryBuilder.toQuery(context);
}
}
} else {
return null;
}
};
};
}
use of org.opensearch.common.io.stream.StreamInput in project OpenSearch by opensearch-project.
the class RankEvalResponseTests method testSerialization.
public void testSerialization() throws IOException {
RankEvalResponse randomResponse = createRandomResponse();
try (BytesStreamOutput output = new BytesStreamOutput()) {
randomResponse.writeTo(output);
try (StreamInput in = output.bytes().streamInput()) {
RankEvalResponse deserializedResponse = new RankEvalResponse(in);
assertEquals(randomResponse.getMetricScore(), deserializedResponse.getMetricScore(), Double.MIN_VALUE);
assertEquals(randomResponse.getPartialResults(), deserializedResponse.getPartialResults());
assertEquals(randomResponse.getFailures().keySet(), deserializedResponse.getFailures().keySet());
assertNotSame(randomResponse, deserializedResponse);
assertEquals(-1, in.read());
}
}
}
use of org.opensearch.common.io.stream.StreamInput in project OpenSearch by opensearch-project.
the class ClusterStateDiffIT method testClusterStateDiffSerialization.
public void testClusterStateDiffSerialization() throws Exception {
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables());
DiscoveryNode masterNode = randomNode("master");
DiscoveryNode otherNode = randomNode("other");
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(masterNode).add(otherNode).localNodeId(masterNode.getId()).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build();
ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode, namedWriteableRegistry);
int iterationCount = randomIntBetween(10, 300);
for (int iteration = 0; iteration < iterationCount; iteration++) {
ClusterState previousClusterState = clusterState;
ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs;
int changesCount = randomIntBetween(1, 4);
ClusterState.Builder builder = null;
for (int i = 0; i < changesCount; i++) {
if (i > 0) {
clusterState = builder.build();
}
switch(randomInt(5)) {
case 0:
builder = randomNodes(clusterState);
break;
case 1:
builder = randomRoutingTable(clusterState);
break;
case 2:
builder = randomBlocks(clusterState);
break;
case 3:
builder = randomClusterStateCustoms(clusterState);
break;
case 4:
builder = randomMetadataChanges(clusterState);
break;
case 5:
builder = randomCoordinationMetadata(clusterState);
break;
default:
throw new IllegalArgumentException("Shouldn't be here");
}
}
clusterState = builder.incrementVersion().build();
if (randomIntBetween(0, 10) < 1) {
// Update cluster state via full serialization from time to time
clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().getLocalNode(), namedWriteableRegistry);
} else {
// Update cluster states using diffs
Diff<ClusterState> diffBeforeSerialization = clusterState.diff(previousClusterState);
BytesStreamOutput os = new BytesStreamOutput();
diffBeforeSerialization.writeTo(os);
byte[] diffBytes = BytesReference.toBytes(os.bytes());
Diff<ClusterState> diff;
try (StreamInput input = StreamInput.wrap(diffBytes)) {
StreamInput namedInput = new NamedWriteableAwareStreamInput(input, namedWriteableRegistry);
diff = ClusterState.readDiffFrom(namedInput, previousClusterStateFromDiffs.nodes().getLocalNode());
clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs);
}
}
try {
// Check non-diffable elements
assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version()));
assertThat(clusterStateFromDiffs.coordinationMetadata(), equalTo(clusterState.coordinationMetadata()));
// Check nodes
assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes()));
assertThat(clusterStateFromDiffs.nodes().getLocalNodeId(), equalTo(previousClusterStateFromDiffs.nodes().getLocalNodeId()));
assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes()));
for (ObjectCursor<String> node : clusterStateFromDiffs.nodes().getNodes().keys()) {
DiscoveryNode node1 = clusterState.nodes().get(node.value);
DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value);
assertThat(node1.getVersion(), equalTo(node2.getVersion()));
assertThat(node1.getAddress(), equalTo(node2.getAddress()));
assertThat(node1.getAttributes(), equalTo(node2.getAttributes()));
}
// Check routing table
assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version()));
assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting()));
// Check cluster blocks
assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global()));
assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices()));
assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence()));
// Check metadata
assertThat(clusterStateFromDiffs.metadata().version(), equalTo(clusterState.metadata().version()));
assertThat(clusterStateFromDiffs.metadata().clusterUUID(), equalTo(clusterState.metadata().clusterUUID()));
assertThat(clusterStateFromDiffs.metadata().transientSettings(), equalTo(clusterState.metadata().transientSettings()));
assertThat(clusterStateFromDiffs.metadata().persistentSettings(), equalTo(clusterState.metadata().persistentSettings()));
assertThat(clusterStateFromDiffs.metadata().indices(), equalTo(clusterState.metadata().indices()));
assertThat(clusterStateFromDiffs.metadata().templates(), equalTo(clusterState.metadata().templates()));
assertThat(clusterStateFromDiffs.metadata().customs(), equalTo(clusterState.metadata().customs()));
assertThat(clusterStateFromDiffs.metadata().equalsAliases(clusterState.metadata()), is(true));
// JSON Serialization test - make sure that both states produce similar JSON
assertNull(differenceBetweenMapsIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)));
// Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order
// however, serialized size should remain the same
assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length));
} catch (AssertionError error) {
logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString());
throw error;
}
}
logger.info("Final cluster state:[{}]", clusterState.toString());
}
use of org.opensearch.common.io.stream.StreamInput in project OpenSearch by opensearch-project.
the class ConcurrentSeqNoVersioningIT method runLinearizabilityChecker.
@SuppressForbidden(reason = "system out is ok for a command line tool")
private static void runLinearizabilityChecker(FileInputStream fileInputStream, long primaryTerm, long seqNo) throws IOException {
StreamInput is = new InputStreamStreamInput(Base64.getDecoder().wrap(fileInputStream));
is = new NamedWriteableAwareStreamInput(is, createNamedWriteableRegistry());
LinearizabilityChecker.History history = readHistory(is);
Version initialVersion = new Version(primaryTerm, seqNo);
boolean result = new LinearizabilityChecker().isLinearizable(new CASSequentialSpec(initialVersion), history, missingResponseGenerator());
System.out.println(LinearizabilityChecker.visualize(new CASSequentialSpec(initialVersion), history, missingResponseGenerator()));
System.out.println("Linearizable?: " + result);
}
use of org.opensearch.common.io.stream.StreamInput in project OpenSearch by opensearch-project.
the class AbstractSimpleTransportTestCase method testTransportStatsWithException.
public void testTransportStatsWithException() throws Exception {
MockTransportService serviceC = buildService("TS_C", version0, Settings.EMPTY);
CountDownLatch receivedLatch = new CountDownLatch(1);
CountDownLatch sendResponseLatch = new CountDownLatch(1);
Exception ex = new RuntimeException("boom");
ex.setStackTrace(new StackTraceElement[0]);
serviceB.registerRequestHandler("internal:action", ThreadPool.Names.SAME, TestRequest::new, (request, channel, task) -> {
// don't block on a network thread here
threadPool.generic().execute(new AbstractRunnable() {
@Override
public void onFailure(Exception e) {
try {
channel.sendResponse(e);
} catch (IOException e1) {
throw new UncheckedIOException(e1);
}
}
@Override
protected void doRun() throws Exception {
receivedLatch.countDown();
sendResponseLatch.await();
onFailure(ex);
}
});
});
serviceC.start();
serviceC.acceptIncomingRequests();
CountDownLatch responseLatch = new CountDownLatch(1);
AtomicReference<TransportException> receivedException = new AtomicReference<>(null);
TransportResponseHandler<TransportResponse> transportResponseHandler = new TransportResponseHandler<TransportResponse>() {
@Override
public TransportResponse read(StreamInput in) {
return TransportResponse.Empty.INSTANCE;
}
@Override
public void handleResponse(TransportResponse response) {
responseLatch.countDown();
}
@Override
public void handleException(TransportException exp) {
receivedException.set(exp);
responseLatch.countDown();
}
@Override
public String executor() {
return ThreadPool.Names.SAME;
}
};
// nothing transmitted / read yet
TransportStats stats = serviceC.transport.getStats();
assertEquals(0, stats.getRxCount());
assertEquals(0, stats.getTxCount());
assertEquals(0, stats.getRxSize().getBytes());
assertEquals(0, stats.getTxSize().getBytes());
ConnectionProfile.Builder builder = new ConnectionProfile.Builder();
builder.addConnections(1, TransportRequestOptions.Type.BULK, TransportRequestOptions.Type.PING, TransportRequestOptions.Type.RECOVERY, TransportRequestOptions.Type.REG, TransportRequestOptions.Type.STATE);
try (Transport.Connection connection = serviceC.openConnection(serviceB.getLocalNode(), builder.build())) {
assertBusy(() -> {
// netty for instance invokes this concurrently so we better use assert busy here
// request has been sent
TransportStats transportStats = serviceC.transport.getStats();
assertEquals(1, transportStats.getRxCount());
assertEquals(1, transportStats.getTxCount());
assertEquals(25, transportStats.getRxSize().getBytes());
assertEquals(51, transportStats.getTxSize().getBytes());
});
serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler);
receivedLatch.await();
assertBusy(() -> {
// netty for instance invokes this concurrently so we better use assert busy here
// request has been sent
TransportStats transportStats = serviceC.transport.getStats();
assertEquals(1, transportStats.getRxCount());
assertEquals(2, transportStats.getTxCount());
assertEquals(25, transportStats.getRxSize().getBytes());
assertEquals(111, transportStats.getTxSize().getBytes());
});
sendResponseLatch.countDown();
responseLatch.await();
// exception response has been received
stats = serviceC.transport.getStats();
assertEquals(2, stats.getRxCount());
assertEquals(2, stats.getTxCount());
TransportException exception = receivedException.get();
assertNotNull(exception);
BytesStreamOutput streamOutput = new BytesStreamOutput();
exception.writeTo(streamOutput);
String failedMessage = "Unexpected read bytes size. The transport exception that was received=" + exception;
// 53 bytes are the non-exception message bytes that have been received. It should include the initial
// handshake message and the header, version, etc bytes in the exception message.
assertEquals(failedMessage, 53 + streamOutput.bytes().length(), stats.getRxSize().getBytes());
assertEquals(111, stats.getTxSize().getBytes());
} finally {
serviceC.close();
}
}
Aggregations