use of org.graylog.shaded.elasticsearch7.org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class Netty4HttpServerTransportTests method testBadRequest.
public void testBadRequest() throws InterruptedException {
final AtomicReference<Throwable> causeReference = new AtomicReference<>();
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
throw new AssertionError();
}
@Override
public void dispatchBadRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
causeReference.set(cause);
try {
final ElasticsearchException e = new ElasticsearchException("you sent a bad request and you should feel bad");
channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e));
} catch (final IOException e) {
throw new AssertionError(e);
}
}
};
final Settings settings;
final int maxInitialLineLength;
final Setting<ByteSizeValue> httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
if (randomBoolean()) {
maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt();
settings = Settings.EMPTY;
} else {
maxInitialLineLength = randomIntBetween(1, 8192);
settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build();
}
try (Netty4HttpServerTransport transport = new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses());
try (Netty4HttpClient client = new Netty4HttpClient()) {
final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8"));
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url);
final FullHttpResponse response = client.post(remoteAddress.address(), request);
assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST));
assertThat(new String(response.content().array(), Charset.forName("UTF-8")), containsString("you sent a bad request and you should feel bad"));
}
}
assertNotNull(causeReference.get());
assertThat(causeReference.get(), instanceOf(TooLongFrameException.class));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class TestAmazonS3 method hashCode.
private int hashCode(String path) {
try {
MessageDigest digest = MessageDigest.getInstance("MD5");
byte[] bytes = digest.digest(path.getBytes("UTF-8"));
int i = 0;
return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16) | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
} catch (UnsupportedEncodingException ex) {
throw new ElasticsearchException("cannot calculate hashcode", ex);
} catch (NoSuchAlgorithmException ex) {
throw new ElasticsearchException("cannot calculate hashcode", ex);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class TestAmazonS3 method putObject.
@Override
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
if (shouldFail(bucketName, key, writeFailureRate)) {
long length = metadata.getContentLength();
long partToRead = (long) (length * randomDouble());
byte[] buffer = new byte[1024];
for (long cur = 0; cur < partToRead; cur += buffer.length) {
try {
input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
} catch (IOException ex) {
throw new ElasticsearchException("cannot read input stream", ex);
}
}
logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
ex.setStatusCode(400);
ex.setErrorCode("RequestTimeout");
throw ex;
} else {
return super.putObject(bucketName, key, input, metadata);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class AzureRepositoryF method main.
public static void main(String[] args) throws Throwable {
Settings.Builder settings = Settings.builder();
settings.put("http.cors.enabled", "true");
settings.put("http.cors.allow-origin", "*");
settings.put("cluster.name", AzureRepositoryF.class.getSimpleName());
// Example for azure repo settings
// settings.put("cloud.azure.storage.my_account1.account", "account_name");
// settings.put("cloud.azure.storage.my_account1.key", "account_key");
// settings.put("cloud.azure.storage.my_account1.default", true);
// settings.put("cloud.azure.storage.my_account2.account", "account_name");
// settings.put("cloud.azure.storage.my_account2.key", "account_key_secondary");
final CountDownLatch latch = new CountDownLatch(1);
final Node node = new MockNode(settings.build(), Collections.singletonList(AzureRepositoryPlugin.class));
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
IOUtils.close(node);
} catch (IOException e) {
throw new ElasticsearchException(e);
} finally {
latch.countDown();
}
}
});
node.start();
latch.await();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.ElasticsearchException in project elasticsearch by elastic.
the class PendingClusterStatesQueueTests method testFailedStateCleansSupersededStatesOnly.
public void testFailedStateCleansSupersededStatesOnly() {
List<ClusterState> states = randomStates(scaledRandomIntBetween(10, 50), "master1", "master2", "master3", "master4");
PendingClusterStatesQueue queue = createQueueWithStates(states);
List<ClusterStateContext> committedContexts = randomCommitStates(queue);
ClusterState toFail = randomFrom(committedContexts).state;
queue.markAsFailed(toFail, new ElasticsearchException("boo!"));
final Map<String, ClusterStateContext> committedContextsById = new HashMap<>();
for (ClusterStateContext context : committedContexts) {
committedContextsById.put(context.stateUUID(), context);
}
// now check that queue doesn't contain superseded states
for (ClusterStateContext context : queue.pendingStates) {
if (context.committed()) {
assertFalse("found a committed cluster state, which is superseded by a failed state.\nFound:" + context.state + "\nfailed:" + toFail, toFail.supersedes(context.state));
}
}
// check no state has been erroneously removed
for (ClusterState state : states) {
ClusterStateContext pendingContext = queue.findState(state.stateUUID());
if (pendingContext != null) {
continue;
}
if (state.equals(toFail)) {
continue;
}
assertThat("non-committed states should never be removed", committedContextsById, hasKey(state.stateUUID()));
final ClusterStateContext context = committedContextsById.get(state.stateUUID());
assertThat("removed state is not superseded by failed state. \nRemoved state:" + context + "\nfailed: " + toFail, toFail.supersedes(context.state), equalTo(true));
assertThat("removed state was failed with wrong exception", ((MockListener) context.listener).failure, notNullValue());
assertThat("removed state was failed with wrong exception", ((MockListener) context.listener).failure.getMessage(), containsString("boo"));
}
}
Aggregations