use of org.elasticsearch.cluster.block.ClusterBlockException in project rssriver by dadoonet.
the class RssRiver method start.
@Override
public void start() {
if (logger.isInfoEnabled())
logger.info("Starting rss stream");
try {
client.admin().indices().prepareCreate(indexName).execute().actionGet();
} catch (Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof IndexAlreadyExistsException) {
// that's fine
} else if (ExceptionsHelper.unwrapCause(e) instanceof ClusterBlockException) {
// ok, not recovered yet..., lets start indexing and hope we
// recover by the first bulk
// TODO: a smarter logic can be to register for cluster event
// listener here, and only start sampling when the block is
// removed...
} else {
logger.warn("failed to create index [{}], disabling river...", e, indexName);
return;
}
}
try {
pushMapping(indexName, typeName, RssToJson.buildRssMapping(typeName, raw));
} catch (Exception e) {
logger.warn("failed to create mapping for [{}/{}], disabling river...", e, indexName, typeName);
return;
}
// Creating bulk processor
this.bulkProcessor = BulkProcessor.builder(client, new BulkProcessor.Listener() {
@Override
public void beforeBulk(long executionId, BulkRequest request) {
logger.debug("Going to execute new bulk composed of {} actions", request.numberOfActions());
}
@Override
public void afterBulk(long executionId, BulkRequest request, BulkResponse response) {
logger.debug("Executed bulk composed of {} actions", request.numberOfActions());
if (response.hasFailures()) {
logger.warn("There was failures while executing bulk", response.buildFailureMessage());
if (logger.isDebugEnabled()) {
for (BulkItemResponse item : response.getItems()) {
if (item.isFailed()) {
logger.debug("Error for {}/{}/{} for {} operation: {}", item.getIndex(), item.getType(), item.getId(), item.getOpType(), item.getFailureMessage());
}
}
}
}
}
@Override
public void afterBulk(long executionId, BulkRequest request, Throwable failure) {
logger.warn("Error executing bulk", failure);
}
}).setBulkActions(bulkSize).setConcurrentRequests(maxConcurrentBulk).setFlushInterval(bulkFlushInterval).build();
// We create as many Threads as there are feeds
threads = new ArrayList<Thread>(feedsDefinition.size());
int threadNumber = 0;
for (RssRiverFeedDefinition feedDefinition : feedsDefinition) {
Thread thread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "rss_slurper_" + threadNumber).newThread(new RSSParser(feedDefinition));
thread.start();
threads.add(thread);
threadNumber++;
}
}
use of org.elasticsearch.cluster.block.ClusterBlockException in project elasticsearch by elastic.
the class TribeIT method testGlobalReadWriteBlocks.
public void testGlobalReadWriteBlocks() throws Exception {
Settings additionalSettings = Settings.builder().put("tribe.blocks.write", true).put("tribe.blocks.metadata", true).build();
try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
// Creates 2 indices, test1 on cluster1 and test2 on cluster2
assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
ensureGreen(cluster1.client());
assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
ensureGreen(cluster2.client());
// Wait for the tribe node to connect to the two remote clusters
assertNodes(ALL);
// Wait for the tribe node to retrieve the indices into its cluster state
assertIndicesExist(client(), "test1", "test2");
// Writes not allowed through the tribe node
ClusterBlockException e = expectThrows(ClusterBlockException.class, () -> {
client().prepareIndex("test1", "type1").setSource("field", "value").get();
});
assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/11/tribe node, write not allowed]"));
e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("test2", "type2").setSource("field", "value").get());
assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/11/tribe node, write not allowed]"));
e = expectThrows(ClusterBlockException.class, () -> client().admin().indices().prepareForceMerge("test1").get());
assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/10/tribe node, metadata not allowed]"));
e = expectThrows(ClusterBlockException.class, () -> client().admin().indices().prepareForceMerge("test2").get());
assertThat(e.getMessage(), containsString("blocked by: [BAD_REQUEST/10/tribe node, metadata not allowed]"));
}
}
use of org.elasticsearch.cluster.block.ClusterBlockException in project elasticsearch by elastic.
the class TribeIT method testIndexWriteBlocks.
public void testIndexWriteBlocks() throws Exception {
Settings additionalSettings = Settings.builder().put("tribe.blocks.write.indices", "block_*").build();
try (Releasable tribeNode = startTribeNode(ALL, additionalSettings)) {
// Creates 2 indices on each remote cluster, test1 and block_test1 on cluster1 and test2 and block_test2 on cluster2
assertAcked(cluster1.client().admin().indices().prepareCreate("test1"));
assertAcked(cluster1.client().admin().indices().prepareCreate("block_test1"));
ensureGreen(cluster1.client());
assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
assertAcked(cluster2.client().admin().indices().prepareCreate("block_test2"));
ensureGreen(cluster2.client());
// Wait for the tribe node to connect to the two remote clusters
assertNodes(ALL);
// Wait for the tribe node to retrieve the indices into its cluster state
assertIndicesExist(client(), "test1", "test2", "block_test1", "block_test2");
// Writes allowed through the tribe node for test1/test2 indices
client().prepareIndex("test1", "type1").setSource("field", "value").get();
client().prepareIndex("test2", "type2").setSource("field", "value").get();
ClusterBlockException e;
e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("block_test1", "type1").setSource("foo", 0).get());
assertThat(e.getMessage(), containsString("blocked by: [FORBIDDEN/8/index write (api)]"));
e = expectThrows(ClusterBlockException.class, () -> client().prepareIndex("block_test2", "type2").setSource("foo", 0).get());
assertThat(e.getMessage(), containsString("blocked by: [FORBIDDEN/8/index write (api)]"));
}
}
use of org.elasticsearch.cluster.block.ClusterBlockException in project elasticsearch by elastic.
the class ElasticsearchAssertions method assertBlocked.
/**
* Checks that all shard requests of a replicated broadcast request failed due to a cluster block
*
* @param replicatedBroadcastResponse the response that should only contain failed shard responses
*
* */
public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) {
assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards()));
for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) {
ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class);
assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException);
assertThat(clusterBlockException.blocks().size(), greaterThan(0));
assertThat(clusterBlockException.status(), CoreMatchers.equalTo(RestStatus.FORBIDDEN));
}
}
use of org.elasticsearch.cluster.block.ClusterBlockException in project elasticsearch by elastic.
the class RandomObjects method randomShardInfoFailure.
/**
* Returns a tuple that contains a randomized {@link Failure} value (left side) and its corresponding
* value (right side) after it has been printed out as a {@link ToXContent} and parsed back using a parsing
* method like {@link ShardInfo.Failure#fromXContent(XContentParser)}.
*
* @param random Random generator
*/
private static Tuple<Failure, Failure> randomShardInfoFailure(Random random) {
String index = randomAsciiOfLength(random, 5);
String indexUuid = randomAsciiOfLength(random, 5);
int shardId = randomIntBetween(random, 1, 10);
String nodeId = randomAsciiOfLength(random, 5);
RestStatus status = randomFrom(random, RestStatus.INTERNAL_SERVER_ERROR, RestStatus.FORBIDDEN, RestStatus.NOT_FOUND);
boolean primary = random.nextBoolean();
ShardId shard = new ShardId(index, indexUuid, shardId);
Exception actualException;
ElasticsearchException expectedException;
int type = randomIntBetween(random, 0, 3);
switch(type) {
case 0:
actualException = new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES));
expectedException = new ElasticsearchException("Elasticsearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]");
break;
case 1:
actualException = new ShardNotFoundException(shard);
expectedException = new ElasticsearchException("Elasticsearch exception [type=shard_not_found_exception, " + "reason=no such shard]");
expectedException.setShard(shard);
break;
case 2:
actualException = new IllegalArgumentException("Closed resource", new RuntimeException("Resource"));
expectedException = new ElasticsearchException("Elasticsearch exception [type=illegal_argument_exception, " + "reason=Closed resource]", new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=Resource]"));
break;
case 3:
actualException = new IndexShardRecoveringException(shard);
expectedException = new ElasticsearchException("Elasticsearch exception [type=index_shard_recovering_exception, " + "reason=CurrentState[RECOVERING] Already recovering]");
expectedException.setShard(shard);
break;
default:
throw new UnsupportedOperationException("No randomized exceptions generated for type [" + type + "]");
}
Failure actual = new Failure(shard, nodeId, actualException, status, primary);
Failure expected = new Failure(new ShardId(index, INDEX_UUID_NA_VALUE, shardId), nodeId, expectedException, status, primary);
return Tuple.tuple(actual, expected);
}
Aggregations