use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class BulkProcessorIT method testBulkProcessorFlush.
public void testBulkProcessorFlush() throws Exception {
final CountDownLatch latch = new CountDownLatch(1);
BulkProcessorTestListener listener = new BulkProcessorTestListener(latch);
int numDocs = randomIntBetween(10, 100);
try (BulkProcessor processor = BulkProcessor.builder(client(), listener).setName("foo").setConcurrentRequests(randomIntBetween(0, 10)).setBulkActions(numDocs + randomIntBetween(1, 100)).setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)).build()) {
MultiGetRequestBuilder multiGetRequestBuilder = indexDocs(client(), processor, numDocs);
assertThat(latch.await(randomInt(500), TimeUnit.MILLISECONDS), equalTo(false));
//we really need an explicit flush as none of the bulk thresholds was reached
processor.flush();
latch.await();
assertThat(listener.beforeCounts.get(), equalTo(1));
assertThat(listener.afterCounts.get(), equalTo(1));
assertThat(listener.bulkFailures.size(), equalTo(0));
assertResponseItems(listener.bulkItems, numDocs);
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
}
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class RemoteScrollableHitSourceTests method testTooLargeResponse.
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testTooLargeResponse() throws Exception {
ContentTooLongException tooLong = new ContentTooLongException("too long!");
CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class), any(HttpClientContext.class), any(FutureCallback.class))).then(new Answer<Future<HttpResponse>>() {
@Override
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1];
FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3];
assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit());
callback.failed(tooLong);
return null;
}
});
RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient);
AtomicBoolean called = new AtomicBoolean();
Consumer<Response> checkResponse = r -> called.set(true);
Throwable e = expectThrows(RuntimeException.class, () -> source.doStartNextScroll(FAKE_SCROLL_ID, timeValueMillis(0), checkResponse));
// Unwrap the some artifacts from the test
while (e.getMessage().equals("failed")) {
e = e.getCause();
}
// This next exception is what the user sees
assertEquals("Remote responded with a chunk that was too large. Use a smaller batch size.", e.getMessage());
// And that exception is reported as being caused by the underlying exception returned by the client
assertSame(tooLong, e.getCause());
assertFalse(called.get());
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class Netty4HttpServerTransport method doStart.
@Override
protected void doStart() {
boolean success = false;
try {
this.serverOpenChannels = new Netty4OpenChannelsHandler(logger);
serverBootstrap = new ServerBootstrap();
serverBootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, HTTP_SERVER_WORKER_THREAD_NAME_PREFIX)));
serverBootstrap.channel(NioServerSocketChannel.class);
serverBootstrap.childHandler(configureServerChannelHandler());
serverBootstrap.childOption(ChannelOption.TCP_NODELAY, SETTING_HTTP_TCP_NO_DELAY.get(settings));
serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, SETTING_HTTP_TCP_KEEP_ALIVE.get(settings));
final ByteSizeValue tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
if (tcpSendBufferSize.getBytes() > 0) {
serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes()));
}
final ByteSizeValue tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
if (tcpReceiveBufferSize.getBytes() > 0) {
serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes()));
}
serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator);
final boolean reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings);
serverBootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, reuseAddress);
this.boundAddress = createBoundHttpAddress();
if (logger.isInfoEnabled()) {
logger.info("{}", boundAddress);
}
success = true;
} finally {
if (success == false) {
// otherwise we leak threads since we never moved to started
doStop();
}
}
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class Netty4HttpServerTransportTests method testBadRequest.
public void testBadRequest() throws InterruptedException {
final AtomicReference<Throwable> causeReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
causeReference.set(cause);
try {
final ElasticsearchException e = new ElasticsearchException("you sent a bad request and you should feel bad");
channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e));
} catch (final IOException e) {
throw new AssertionError(e);
}
}
};
final Settings settings;
final int maxInitialLineLength;
final Setting<ByteSizeValue> httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
if (randomBoolean()) {
maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt();
settings = Settings.EMPTY;
} else {
maxInitialLineLength = randomIntBetween(1, 8192);
settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build();
}
try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8"));
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
final FullHttpResponse response = client.post(remoteAddress.address(), request);
assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST));
assertThat(new String(response.content().array(), Charset.forName("UTF-8")), containsString("you sent a bad request and you should feel bad"));
}
}
assertNotNull(causeReference.get());
assertThat(causeReference.get(), instanceOf(TooLongFrameException.class));
}
use of org.elasticsearch.common.unit.ByteSizeValue in project elasticsearch by elastic.
the class S3BlobStoreContainerTests method newBlobStore.
protected BlobStore newBlobStore() throws IOException {
MockAmazonS3 client = new MockAmazonS3();
String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
return new S3BlobStore(Settings.EMPTY, client, bucket, false, new ByteSizeValue(10, ByteSizeUnit.MB), 5, "public-read-write", "standard");
}
Aggregations