use of org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput in project elasticsearch by elastic.
the class IndicesService method loadIntoContext.
/**
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
* the same cache.
*/
public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception {
assert canCache(request, context);
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
boolean[] loadedFromCache = new boolean[] { true };
BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> {
queryPhase.execute(context);
try {
context.queryResult().writeToNoId(out);
} catch (IOException e) {
throw new AssertionError("Could not serialize response", e);
}
loadedFromCache[0] = false;
});
if (loadedFromCache[0]) {
// restore the cached query result into the context
final QuerySearchResult result = context.queryResult();
StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);
result.readFromWithId(context.id(), in);
result.shardTarget(context.shardTarget());
} else if (context.queryResult().searchTimedOut()) {
// we have to invalidate the cache entry if we cached a query result form a request that timed out.
// we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are
// multiple requests that wait for the cache entry to be calculated they'd fail all with the same exception.
// instead we all caching such a result for the time being, return the timed out result for all other searches with that cache
// key invalidate the result in the thread that caused the timeout. This will end up to be simpler and eventually correct since
// running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the
// cache. One other option is to not cache requests with a timeout at all...
indicesRequestCache.invalidate(new IndexShardCacheEntity(context.indexShard()), directoryReader, request.cacheKey());
}
}
use of org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput in project elasticsearch by elastic.
the class TcpTransport method messageReceived.
/**
* This method handles the message receive part for both request and responses
*/
public final void messageReceived(BytesReference reference, Channel channel, String profileName, InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException {
final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
transportServiceAdapter.addBytesReceived(totalMessageSize);
// we have additional bytes to read, outside of the header
boolean hasMessageBytesToRead = (totalMessageSize - TcpHeader.HEADER_SIZE) > 0;
StreamInput streamIn = reference.streamInput();
boolean success = false;
try (ThreadContext.StoredContext tCtx = threadPool.getThreadContext().stashContext()) {
long requestId = streamIn.readLong();
byte status = streamIn.readByte();
Version version = Version.fromId(streamIn.readInt());
if (TransportStatus.isCompress(status) && hasMessageBytesToRead && streamIn.available() > 0) {
Compressor compressor;
try {
final int bytesConsumed = TcpHeader.REQUEST_ID_SIZE + TcpHeader.STATUS_SIZE + TcpHeader.VERSION_ID_SIZE;
compressor = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed));
} catch (NotCompressedException ex) {
int maxToRead = Math.min(reference.length(), 10);
StringBuilder sb = new StringBuilder("stream marked as compressed, but no compressor found, first [").append(maxToRead).append("] content bytes out of [").append(reference.length()).append("] readable bytes with message size [").append(messageLengthBytes).append("] ").append("] are [");
for (int i = 0; i < maxToRead; i++) {
sb.append(reference.get(i)).append(",");
}
sb.append("]");
throw new IllegalStateException(sb.toString());
}
streamIn = compressor.streamInput(streamIn);
}
if (version.isCompatible(getCurrentVersion()) == false) {
throw new IllegalStateException("Received message from unsupported version: [" + version + "] minimal compatible version is: [" + getCurrentVersion().minimumCompatibilityVersion() + "]");
}
streamIn = new NamedWriteableAwareStreamInput(streamIn, namedWriteableRegistry);
streamIn.setVersion(version);
threadPool.getThreadContext().readHeaders(streamIn);
if (TransportStatus.isRequest(status)) {
handleRequest(channel, profileName, streamIn, requestId, messageLengthBytes, version, remoteAddress, status);
} else {
final TransportResponseHandler<?> handler;
if (TransportStatus.isHandshake(status)) {
handler = pendingHandshakes.remove(requestId);
} else {
TransportResponseHandler theHandler = transportServiceAdapter.onResponseReceived(requestId);
if (theHandler == null && TransportStatus.isError(status)) {
handler = pendingHandshakes.remove(requestId);
} else {
handler = theHandler;
}
}
// ignore if its null, the adapter logs it
if (handler != null) {
if (TransportStatus.isError(status)) {
handlerResponseError(streamIn, handler);
} else {
handleResponse(remoteAddress, streamIn, handler);
}
// Check the entire message has been read
final int nextByte = streamIn.read();
// calling read() is useful to make sure the message is fully read, even if there is an EOS marker
if (nextByte != -1) {
throw new IllegalStateException("Message not fully read (response) for requestId [" + requestId + "], handler [" + handler + "], error [" + TransportStatus.isError(status) + "]; resetting");
}
}
}
success = true;
} finally {
if (success) {
IOUtils.close(streamIn);
} else {
IOUtils.closeWhileHandlingException(streamIn);
}
}
}
use of org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput in project elasticsearch by elastic.
the class ClusterRerouteRequestTests method roundTripThroughBytes.
private ClusterRerouteRequest roundTripThroughBytes(ClusterRerouteRequest original) throws IOException {
try (BytesStreamOutput output = new BytesStreamOutput()) {
original.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
ClusterRerouteRequest copy = new ClusterRerouteRequest();
copy.readFrom(in);
return copy;
}
}
}
use of org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput in project elasticsearch by elastic.
the class ClusterRerouteTests method testSerializeRequest.
public void testSerializeRequest() throws IOException {
ClusterRerouteRequest req = new ClusterRerouteRequest();
req.setRetryFailed(randomBoolean());
req.dryRun(randomBoolean());
req.explain(randomBoolean());
req.add(new AllocateEmptyPrimaryAllocationCommand("foo", 1, "bar", randomBoolean()));
req.timeout(TimeValue.timeValueMillis(randomIntBetween(0, 100)));
BytesStreamOutput out = new BytesStreamOutput();
req.writeTo(out);
BytesReference bytes = out.bytes();
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(NetworkModule.getNamedWriteables());
StreamInput wrap = new NamedWriteableAwareStreamInput(bytes.streamInput(), namedWriteableRegistry);
ClusterRerouteRequest deserializedReq = new ClusterRerouteRequest();
deserializedReq.readFrom(wrap);
assertEquals(req.isRetryFailed(), deserializedReq.isRetryFailed());
assertEquals(req.dryRun(), deserializedReq.dryRun());
assertEquals(req.explain(), deserializedReq.explain());
assertEquals(req.timeout(), deserializedReq.timeout());
// allocation commands have their own tests
assertEquals(1, deserializedReq.getCommands().commands().size());
assertEquals(req.getCommands().commands().size(), deserializedReq.getCommands().commands().size());
}
use of org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput in project elasticsearch by elastic.
the class ClusterSearchShardsResponseTests method testSerialization.
public void testSerialization() throws Exception {
Map<String, AliasFilter> indicesAndFilters = new HashMap<>();
Set<DiscoveryNode> nodes = new HashSet<>();
int numShards = randomIntBetween(1, 10);
ClusterSearchShardsGroup[] clusterSearchShardsGroups = new ClusterSearchShardsGroup[numShards];
for (int i = 0; i < numShards; i++) {
String index = randomAsciiOfLengthBetween(3, 10);
ShardId shardId = new ShardId(index, randomAsciiOfLength(12), i);
String nodeId = randomAsciiOfLength(10);
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, randomBoolean(), ShardRoutingState.STARTED);
clusterSearchShardsGroups[i] = new ClusterSearchShardsGroup(shardId, new ShardRouting[] { shardRouting });
DiscoveryNode node = new DiscoveryNode(shardRouting.currentNodeId(), new TransportAddress(TransportAddress.META_ADDRESS, randomInt(0xFFFF)), VersionUtils.randomVersion(random()));
nodes.add(node);
AliasFilter aliasFilter;
if (randomBoolean()) {
aliasFilter = new AliasFilter(RandomQueryBuilder.createQuery(random()), "alias-" + index);
} else {
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
}
indicesAndFilters.put(index, aliasFilter);
}
ClusterSearchShardsResponse clusterSearchShardsResponse = new ClusterSearchShardsResponse(clusterSearchShardsGroups, nodes.toArray(new DiscoveryNode[nodes.size()]), indicesAndFilters);
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList());
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(searchModule.getNamedWriteables());
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(entries);
Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, Version.CURRENT);
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(version);
clusterSearchShardsResponse.writeTo(out);
try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) {
in.setVersion(version);
ClusterSearchShardsResponse deserialized = new ClusterSearchShardsResponse();
deserialized.readFrom(in);
assertArrayEquals(clusterSearchShardsResponse.getNodes(), deserialized.getNodes());
assertEquals(clusterSearchShardsResponse.getGroups().length, deserialized.getGroups().length);
for (int i = 0; i < clusterSearchShardsResponse.getGroups().length; i++) {
ClusterSearchShardsGroup clusterSearchShardsGroup = clusterSearchShardsResponse.getGroups()[i];
ClusterSearchShardsGroup deserializedGroup = deserialized.getGroups()[i];
assertEquals(clusterSearchShardsGroup.getShardId(), deserializedGroup.getShardId());
assertArrayEquals(clusterSearchShardsGroup.getShards(), deserializedGroup.getShards());
}
if (version.onOrAfter(Version.V_5_1_1_UNRELEASED)) {
assertEquals(clusterSearchShardsResponse.getIndicesAndFilters(), deserialized.getIndicesAndFilters());
} else {
assertNull(deserialized.getIndicesAndFilters());
}
}
}
}
Aggregations