use of org.elasticsearch.ElasticsearchTimeoutException in project nifi by apache.
the class TestFetchElasticsearch method testFetchElasticsearchOnTriggerWithExceptions.
@Test
public void testFetchElasticsearchOnTriggerWithExceptions() throws IOException {
FetchElasticsearchTestProcessor processor = new FetchElasticsearchTestProcessor(true);
runner = TestRunners.newTestRunner(processor);
runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s");
runner.setProperty(FetchElasticsearch.INDEX, "doc");
runner.setProperty(FetchElasticsearch.TYPE, "status");
runner.setValidateExpressionUsage(true);
runner.setProperty(FetchElasticsearch.DOC_ID, "${doc_id}");
// No Node Available exception
processor.setExceptionToThrow(new NoNodeAvailableException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652140");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Elasticsearch Timeout exception
processor.setExceptionToThrow(new ElasticsearchTimeoutException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652141");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Receive Timeout Transport exception
processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class)));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652141");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Node Closed exception
processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class)));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652141");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Elasticsearch Parse exception
processor.setExceptionToThrow(new ElasticsearchParseException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652141");
}
});
runner.run(1, true, true);
// This test generates an exception on execute(),routes to failure
runner.assertTransferCount(FetchElasticsearch.REL_FAILURE, 1);
}
use of org.elasticsearch.ElasticsearchTimeoutException in project nifi by apache.
the class TestPutElasticsearch method testPutElasticsearchOnTriggerWithExceptions.
@Test
public void testPutElasticsearchOnTriggerWithExceptions() throws IOException {
PutElasticsearchTestProcessor processor = new PutElasticsearchTestProcessor(false);
runner = TestRunners.newTestRunner(processor);
runner.setProperty(AbstractElasticsearchTransportClientProcessor.CLUSTER_NAME, "elasticsearch");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.HOSTS, "127.0.0.1:9300");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.PING_TIMEOUT, "5s");
runner.setProperty(AbstractElasticsearchTransportClientProcessor.SAMPLER_INTERVAL, "5s");
runner.setProperty(PutElasticsearch.INDEX, "doc");
runner.setProperty(PutElasticsearch.TYPE, "status");
runner.setValidateExpressionUsage(true);
runner.setProperty(PutElasticsearch.ID_ATTRIBUTE, "doc_id");
// No Node Available exception
processor.setExceptionToThrow(new NoNodeAvailableException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652140");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Elasticsearch Timeout exception
processor.setExceptionToThrow(new ElasticsearchTimeoutException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652141");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Receive Timeout Transport exception
processor.setExceptionToThrow(new ReceiveTimeoutTransportException(mock(StreamInput.class)));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652142");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Node Closed exception
processor.setExceptionToThrow(new NodeClosedException(mock(StreamInput.class)));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652143");
}
});
runner.run(1, true, true);
runner.assertAllFlowFilesTransferred(FetchElasticsearch.REL_RETRY, 1);
runner.clearTransferState();
// Elasticsearch Parse exception
processor.setExceptionToThrow(new ElasticsearchParseException("test"));
runner.enqueue(docExample, new HashMap<String, String>() {
{
put("doc_id", "28039652144");
}
});
runner.run(1, true, true);
// This test generates an exception on execute(),routes to failure
runner.assertTransferCount(PutElasticsearch.REL_FAILURE, 1);
}
use of org.elasticsearch.ElasticsearchTimeoutException in project MSEC by Tencent.
the class ESHelper method waitForClusterReady.
public void waitForClusterReady(final TransportClient client, ArrayList<String> ips, final ClusterHealthStatus status) throws IOException {
Logger logger = Logger.getLogger(ESHelper.class);
int timeout = 60;
int node_num = 0;
long begin = System.currentTimeMillis() / 1000L;
long end = begin;
Set<String> done_ips = new HashSet<>();
try {
logger.info("waiting for cluster state: " + status.name());
ClusterHealthResponse healthResponse = null;
while (true) {
try {
healthResponse = client.admin().cluster().prepareHealth().setWaitForStatus(status).setTimeout(TimeValue.timeValueSeconds(5)).execute().actionGet();
} catch (NoNodeAvailableException | MasterNotDiscoveredException ex) {
end = System.currentTimeMillis() / 1000L;
if (end - begin >= timeout)
throw new IOException("Server start timeout");
logger.info("server still starting/discovering, retry...");
try {
TimeUnit.SECONDS.sleep(2);
} catch (InterruptedException e) {
}
continue;
}
if (healthResponse != null && healthResponse.isTimedOut()) {
end = System.currentTimeMillis() / 1000L;
if (// timeout
end - begin >= timeout)
throw new IOException("cluster not ready, current state is " + healthResponse.getStatus().name());
continue;
} else {
logger.info("cluster state ok");
int new_node_num = healthResponse.getNumberOfNodes();
if (new_node_num > node_num) {
node_num = new_node_num;
NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().all().get();
for (NodeInfo node : nodeInfos.getNodes()) {
if (!done_ips.contains(node.getHostname()) && ips.contains(node.getHostname())) {
updateStatus(node.getHostname(), "Done.");
done_ips.add(node.getHostname());
}
}
if (done_ips.size() == ips.size())
break;
end = System.currentTimeMillis() / 1000L;
if (// timeout
end - begin >= timeout)
break;
}
}
}
} catch (final ElasticsearchTimeoutException e) {
throw new IOException("ES API timeout");
}
}
use of org.elasticsearch.ElasticsearchTimeoutException in project crate by crate.
the class SchemaUpdateClient method blockingUpdateOnMaster.
public void blockingUpdateOnMaster(Index index, Mapping mappingUpdate) {
TimeValue timeout = this.dynamicMappingUpdateTimeout;
var response = FutureUtils.get(schemaUpdateAction.execute(new SchemaUpdateRequest(index, mappingUpdate.toString())));
if (!response.isAcknowledged()) {
throw new ElasticsearchTimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
}
}
use of org.elasticsearch.ElasticsearchTimeoutException in project crate by crate.
the class TransportAddVotingConfigExclusionsAction method masterOperation.
@Override
protected void masterOperation(AddVotingConfigExclusionsRequest request, ClusterState state, ActionListener<AddVotingConfigExclusionsResponse> listener) throws Exception {
// throws IAE if no nodes matched or maximum exceeded
resolveVotingConfigExclusionsAndCheckMaximum(request, state);
clusterService.submitStateUpdateTask("add-voting-config-exclusions", new ClusterStateUpdateTask(Priority.URGENT) {
private Set<VotingConfigExclusion> resolvedExclusions;
@Override
public ClusterState execute(ClusterState currentState) {
assert resolvedExclusions == null : resolvedExclusions;
resolvedExclusions = resolveVotingConfigExclusionsAndCheckMaximum(request, currentState);
final CoordinationMetadata.Builder builder = CoordinationMetadata.builder(currentState.coordinationMetadata());
resolvedExclusions.forEach(builder::addVotingConfigExclusion);
final Metadata newMetadata = Metadata.builder(currentState.metadata()).coordinationMetadata(builder.build()).build();
final ClusterState newState = ClusterState.builder(currentState).metadata(newMetadata).build();
assert newState.getVotingConfigExclusions().size() <= MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING.get(currentState.metadata().settings());
return newState;
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.getTimeout(), logger);
final Set<String> excludedNodeIds = resolvedExclusions.stream().map(VotingConfigExclusion::getNodeId).collect(Collectors.toSet());
final Predicate<ClusterState> allNodesRemoved = clusterState -> {
final Set<String> votingConfigNodeIds = clusterState.getLastCommittedConfiguration().getNodeIds();
return excludedNodeIds.stream().noneMatch(votingConfigNodeIds::contains);
};
final Listener clusterStateListener = new Listener() {
@Override
public void onNewClusterState(ClusterState state) {
listener.onResponse(new AddVotingConfigExclusionsResponse());
}
@Override
public void onClusterServiceClose() {
listener.onFailure(new ElasticsearchException("cluster service closed while waiting for voting config exclusions " + resolvedExclusions + " to take effect"));
}
@Override
public void onTimeout(TimeValue timeout) {
listener.onFailure(new ElasticsearchTimeoutException("timed out waiting for voting config exclusions " + resolvedExclusions + " to take effect"));
}
};
if (allNodesRemoved.test(newState)) {
clusterStateListener.onNewClusterState(newState);
} else {
observer.waitForNextChange(clusterStateListener, allNodesRemoved);
}
}
});
}
Aggregations