use of org.elasticsearch.ElasticSearchException in project elasticsearch by elastic.
the class Setting method arrayToParsableString.
private static String arrayToParsableString(String[] array) {
try {
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
builder.startArray();
for (String element : array) {
builder.value(element);
}
builder.endArray();
return builder.string();
} catch (IOException ex) {
throw new ElasticsearchException(ex);
}
}
use of org.elasticsearch.ElasticSearchException in project crate by crate.
the class BlobRecoveryHandler method phase1.
public void phase1() throws Exception {
logger.debug("[{}][{}] recovery [phase1] to {}: start", request.shardId().index().name(), request.shardId().id(), request.targetNode().getName());
StopWatch stopWatch = new StopWatch().start();
blobTransferTarget.startRecovery();
blobTransferTarget.createActiveTransfersSnapshot();
sendStartRecoveryRequest();
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
try {
syncVarFiles(lastException);
} catch (InterruptedException ex) {
throw new ElasticsearchException("blob recovery phase1 failed", ex);
}
Exception exception = lastException.get();
if (exception != null) {
throw exception;
}
/**
* as soon as the recovery starts the target node will receive PutChunkReplicaRequests
* the target node will then request the bytes it is missing from the source node
* (it is missing bytes from PutChunk/StartBlob requests that happened before the recovery)
* here we need to block so that the target node has enough time to request the head chunks
*
* e.g.
* Target Node receives Chunk X with bytes 10-19
* Target Node requests bytes 0-9 from Source Node
* Source Node sends bytes 0-9
* Source Node sets transferTakenOver
*/
blobTransferTarget.waitForGetHeadRequests(GET_HEAD_TIMEOUT, TimeUnit.SECONDS);
blobTransferTarget.createActivePutHeadChunkTransfersSnapshot();
/**
* After receiving a getHeadRequest the source node starts to send HeadChunks to the target
* wait for all PutHeadChunk-Runnables to finish before ending the recovery.
*/
blobTransferTarget.waitUntilPutHeadChunksAreFinished();
sendFinalizeRecoveryRequest();
blobTransferTarget.stopRecovery();
stopWatch.stop();
logger.debug("[{}][{}] recovery [phase1] to {}: took [{}]", request.shardId().index().name(), request.shardId().id(), request.targetNode().getName(), stopWatch.totalTime());
}
use of org.elasticsearch.ElasticSearchException in project crate by crate.
the class BlobRecoverySourceHandler method sendSnapshot.
/**
* Send the given snapshot's operations to this handler's target node.
* <p/>
* Operations are bulked into a single request depending on an operation
* count limit or size-in-bytes limit
*
* @return the total number of translog operations that were sent
*/
protected int sendSnapshot(final Translog.Snapshot snapshot) {
int ops = 0;
long size = 0;
int totalOperations = 0;
final List<Translog.Operation> operations = new ArrayList<>();
Translog.Operation operation;
try {
// this ex should bubble up
operation = snapshot.next();
} catch (IOException ex) {
throw new ElasticsearchException("failed to get next operation from translog", ex);
}
final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder().withCompress(recoverySettings.compress()).withType(TransportRequestOptions.Type.RECOVERY).withTimeout(recoverySettings.internalActionLongTimeout()).build();
if (operation == null) {
logger.trace("[{}][{}] no translog operations to send to {}", indexName, shardId, request.targetNode());
}
while (operation != null) {
if (shard.state() == IndexShardState.CLOSED) {
throw new IndexShardClosedException(request.shardId());
}
cancellableThreads.checkForCancel();
operations.add(operation);
ops += 1;
size += operation.estimateSize();
totalOperations++;
// if so, send it off
if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().getBytes()) {
// don't throttle translog, since we lock for phase3 indexing,
// so we need to move it as fast as possible. Note, since we
// index docs to replicas while the index files are recovered
// the lock can potentially be removed, in which case, it might
// make sense to re-enable throttling in this phase
// if (recoverySettings.rateLimiter() != null) {
// recoverySettings.rateLimiter().pause(size);
// }
cancellableThreads.execute(new Interruptable() {
@Override
public void run() throws InterruptedException {
final RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations());
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
});
if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] sent batch of [{}][{}] (total: [{}]) translog operations to {}", indexName, shardId, ops, new ByteSizeValue(size), snapshot.estimatedTotalOperations(), request.targetNode());
}
ops = 0;
size = 0;
operations.clear();
}
try {
// this ex should bubble up
operation = snapshot.next();
} catch (IOException ex) {
throw new ElasticsearchException("failed to get next operation from translog", ex);
}
}
// send the leftover
if (!operations.isEmpty()) {
cancellableThreads.execute(new Interruptable() {
@Override
public void run() throws InterruptedException {
RecoveryTranslogOperationsRequest translogOperationsRequest = new RecoveryTranslogOperationsRequest(request.recoveryId(), request.shardId(), operations, snapshot.estimatedTotalOperations());
transportService.submitRequest(request.targetNode(), RecoveryTarget.Actions.TRANSLOG_OPS, translogOperationsRequest, recoveryOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
});
}
if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] sent final batch of [{}][{}] (total: [{}]) translog operations to {}", indexName, shardId, ops, new ByteSizeValue(size), snapshot.estimatedTotalOperations(), request.targetNode());
}
return totalOperations;
}
use of org.elasticsearch.ElasticSearchException in project elasticsearch by elastic.
the class RandomObjects method randomShardInfoFailure.
/**
* Returns a tuple that contains a randomized {@link Failure} value (left side) and its corresponding
* value (right side) after it has been printed out as a {@link ToXContent} and parsed back using a parsing
* method like {@link ShardInfo.Failure#fromXContent(XContentParser)}.
*
* @param random Random generator
*/
private static Tuple<Failure, Failure> randomShardInfoFailure(Random random) {
String index = randomAsciiOfLength(random, 5);
String indexUuid = randomAsciiOfLength(random, 5);
int shardId = randomIntBetween(random, 1, 10);
String nodeId = randomAsciiOfLength(random, 5);
RestStatus status = randomFrom(random, RestStatus.INTERNAL_SERVER_ERROR, RestStatus.FORBIDDEN, RestStatus.NOT_FOUND);
boolean primary = random.nextBoolean();
ShardId shard = new ShardId(index, indexUuid, shardId);
Exception actualException;
ElasticsearchException expectedException;
int type = randomIntBetween(random, 0, 3);
switch(type) {
case 0:
actualException = new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES));
expectedException = new ElasticsearchException("Elasticsearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]");
break;
case 1:
actualException = new ShardNotFoundException(shard);
expectedException = new ElasticsearchException("Elasticsearch exception [type=shard_not_found_exception, " + "reason=no such shard]");
expectedException.setShard(shard);
break;
case 2:
actualException = new IllegalArgumentException("Closed resource", new RuntimeException("Resource"));
expectedException = new ElasticsearchException("Elasticsearch exception [type=illegal_argument_exception, " + "reason=Closed resource]", new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=Resource]"));
break;
case 3:
actualException = new IndexShardRecoveringException(shard);
expectedException = new ElasticsearchException("Elasticsearch exception [type=index_shard_recovering_exception, " + "reason=CurrentState[RECOVERING] Already recovering]");
expectedException.setShard(shard);
break;
default:
throw new UnsupportedOperationException("No randomized exceptions generated for type [" + type + "]");
}
Failure actual = new Failure(shard, nodeId, actualException, status, primary);
Failure expected = new Failure(new ShardId(index, INDEX_UUID_NA_VALUE, shardId), nodeId, expectedException, status, primary);
return Tuple.tuple(actual, expected);
}
use of org.elasticsearch.ElasticSearchException in project storm-elastic-search by hmsonline.
the class ElasticSearchState method createIndices.
public void createIndices(TridentElasticSearchMapper mapper, List<TridentTuple> tuples) {
BulkRequestBuilder bulkRequest = client.prepareBulk();
Set<String> existingIndex = new HashSet<String>();
for (TridentTuple tuple : tuples) {
String indexName = mapper.mapToIndex(tuple);
String type = mapper.mapToType(tuple);
String key = mapper.mapToKey(tuple);
Map<String, Object> data = mapper.mapToData(tuple);
String parentId = mapper.mapToParentId(tuple);
if (!existingIndex.contains(indexName) && !client.admin().indices().exists(new IndicesExistsRequest(indexName)).actionGet().isExists()) {
createIndex(bulkRequest, indexName, mapper.mapToIndexSettings(tuple));
createMapping(bulkRequest, indexName, type, mapper.mapToMappingSettings(tuple));
existingIndex.add(indexName);
}
if (StringUtils.isBlank(parentId)) {
bulkRequest.add(client.prepareIndex(indexName, type, key).setSource(data));
} else {
LOGGER.debug("parent: " + parentId);
bulkRequest.add(client.prepareIndex(indexName, type, key).setSource(data).setParent(parentId));
}
}
try {
BulkResponse bulkResponse = bulkRequest.execute().actionGet();
if (bulkResponse.hasFailures()) {
// Index failed. Retry!
throw new FailedException("Cannot create index via ES: " + bulkResponse.buildFailureMessage());
}
} catch (ElasticSearchException e) {
StormElasticSearchUtils.handleElasticSearchException(getClass(), e);
}
}
Aggregations