use of org.elasticsearch.common.io.stream.StreamInput in project elasticsearch by elastic.
the class ShardSearchTransportRequestTests method testSerialization.
public void testSerialization() throws Exception {
ShardSearchTransportRequest shardSearchTransportRequest = createShardSearchTransportRequest();
try (BytesStreamOutput output = new BytesStreamOutput()) {
shardSearchTransportRequest.writeTo(output);
try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) {
ShardSearchTransportRequest deserializedRequest = new ShardSearchTransportRequest();
deserializedRequest.readFrom(in);
assertEquals(deserializedRequest.scroll(), shardSearchTransportRequest.scroll());
assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases());
assertArrayEquals(deserializedRequest.indices(), shardSearchTransportRequest.indices());
assertArrayEquals(deserializedRequest.types(), shardSearchTransportRequest.types());
assertEquals(deserializedRequest.indicesOptions(), shardSearchTransportRequest.indicesOptions());
assertEquals(deserializedRequest.isProfile(), shardSearchTransportRequest.isProfile());
assertEquals(deserializedRequest.nowInMillis(), shardSearchTransportRequest.nowInMillis());
assertEquals(deserializedRequest.source(), shardSearchTransportRequest.source());
assertEquals(deserializedRequest.searchType(), shardSearchTransportRequest.searchType());
assertEquals(deserializedRequest.shardId(), shardSearchTransportRequest.shardId());
assertEquals(deserializedRequest.numberOfShards(), shardSearchTransportRequest.numberOfShards());
assertEquals(deserializedRequest.cacheKey(), shardSearchTransportRequest.cacheKey());
assertNotSame(deserializedRequest, shardSearchTransportRequest);
assertEquals(deserializedRequest.filteringAliases(), shardSearchTransportRequest.filteringAliases());
assertEquals(deserializedRequest.indexBoost(), shardSearchTransportRequest.indexBoost(), 0.0f);
}
}
}
use of org.elasticsearch.common.io.stream.StreamInput in project elasticsearch by elastic.
the class IndicesService method loadIntoContext.
/**
* Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
* value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
* to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
* the same cache.
*/
public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception {
assert canCache(request, context);
final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
boolean[] loadedFromCache = new boolean[] { true };
BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> {
queryPhase.execute(context);
try {
context.queryResult().writeToNoId(out);
} catch (IOException e) {
throw new AssertionError("Could not serialize response", e);
}
loadedFromCache[0] = false;
});
if (loadedFromCache[0]) {
// restore the cached query result into the context
final QuerySearchResult result = context.queryResult();
StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);
result.readFromWithId(context.id(), in);
result.shardTarget(context.shardTarget());
} else if (context.queryResult().searchTimedOut()) {
// we have to invalidate the cache entry if we cached a query result form a request that timed out.
// we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are
// multiple requests that wait for the cache entry to be calculated they'd fail all with the same exception.
// instead we all caching such a result for the time being, return the timed out result for all other searches with that cache
// key invalidate the result in the thread that caused the timeout. This will end up to be simpler and eventually correct since
// running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the
// cache. One other option is to not cache requests with a timeout at all...
indicesRequestCache.invalidate(new IndexShardCacheEntity(context.indexShard()), directoryReader, request.cacheKey());
}
}
use of org.elasticsearch.common.io.stream.StreamInput in project crate by crate.
the class SymbolSerializerTest method testValueSymbol.
@Test
public void testValueSymbol() throws Exception {
Value v = new Value(DataTypes.STRING);
BytesStreamOutput out = new BytesStreamOutput();
Symbols.toStream(v, out);
StreamInput in = StreamInput.wrap(out.bytes());
Value v2 = (Value) Symbols.fromStream(in);
assertEquals(v2.valueType(), DataTypes.STRING);
}
use of org.elasticsearch.common.io.stream.StreamInput in project crate by crate.
the class KillJobsRequestTest method testStreaming.
@Test
public void testStreaming() throws Exception {
ImmutableList<UUID> toKill = ImmutableList.of(UUID.randomUUID(), UUID.randomUUID());
KillJobsRequest r = new KillJobsRequest(toKill);
BytesStreamOutput out = new BytesStreamOutput();
r.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
KillJobsRequest r2 = new KillJobsRequest();
r2.readFrom(in);
assertThat(r.toKill(), equalTo(r2.toKill()));
}
use of org.elasticsearch.common.io.stream.StreamInput in project crate by crate.
the class NodeFetchResponseTest method testStreaming.
@Test
public void testStreaming() throws Exception {
IntObjectHashMap<IntContainer> toFetch = new IntObjectHashMap<>();
IntHashSet docIds = new IntHashSet(3);
toFetch.put(1, docIds);
IntObjectMap<Streamer[]> streamers = new IntObjectHashMap<>(1);
streamers.put(1, new Streamer[] { DataTypes.BOOLEAN.streamer() });
StreamBucket.Builder builder = new StreamBucket.Builder(streamers.get(1));
builder.add(new RowN(new Object[] { true }));
IntObjectHashMap<StreamBucket> fetched = new IntObjectHashMap<>(1);
fetched.put(1, builder.build());
NodeFetchResponse orig = NodeFetchResponse.forSending(fetched);
BytesStreamOutput out = new BytesStreamOutput();
orig.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes());
// receiving side is required to set the streamers
NodeFetchResponse streamed = NodeFetchResponse.forReceiveing(streamers);
streamed.readFrom(in);
assertThat((Row) Iterables.getOnlyElement(streamed.fetched().get(1)), isRow(true));
}
Aggregations