use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method randomlyUpdateClusterState.
public ClusterState randomlyUpdateClusterState(ClusterState state, Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap, Supplier<MockIndicesService> indicesServiceSupplier) {
// randomly create new indices (until we have 200 max)
for (int i = 0; i < randomInt(5); i++) {
if (state.metaData().indices().size() > 200) {
break;
}
String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT);
Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)).put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
if (randomBoolean()) {
settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true);
}
CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metaData().hasIndex(name));
}
// randomly delete indices
Set<String> indicesToDelete = new HashSet<>();
int numberOfIndicesToDelete = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metaData().indices().keys().toArray(String.class))) {
indicesToDelete.add(state.metaData().index(index).getIndex().getName());
}
if (indicesToDelete.isEmpty() == false) {
DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
state = cluster.deleteIndices(state, deleteRequest);
for (String index : indicesToDelete) {
assertFalse(state.metaData().hasIndex(index));
}
}
// randomly close indices
int numberOfIndicesToClose = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToClose, state.metaData().indices().keys().toArray(String.class))) {
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.closeIndices(state, closeIndexRequest);
}
// randomly open indices
int numberOfIndicesToOpen = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metaData().indices().keys().toArray(String.class))) {
OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.openIndices(state, openIndexRequest);
}
// randomly update settings
Set<String> indicesToUpdate = new HashSet<>();
boolean containsClosedIndex = false;
int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metaData().indices().keys().toArray(String.class))) {
indicesToUpdate.add(state.metaData().index(index).getIndex().getName());
if (state.metaData().index(index).getState() == IndexMetaData.State.CLOSE) {
containsClosedIndex = true;
}
}
if (indicesToUpdate.isEmpty() == false) {
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
Settings.Builder settings = Settings.builder();
if (containsClosedIndex == false) {
settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
}
settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
updateSettingsRequest.settings(settings.build());
state = cluster.updateSettings(state, updateSettingsRequest);
}
// randomly reroute
if (rarely()) {
state = cluster.reroute(state, new ClusterRerouteRequest());
}
// randomly start and fail allocated shards
List<ShardRouting> startedShards = new ArrayList<>();
List<FailedShard> failedShards = new ArrayList<>();
for (DiscoveryNode node : state.nodes()) {
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
for (MockIndexService indexService : indicesService) {
for (MockIndexShard indexShard : indexService) {
ShardRouting persistedShardRouting = indexShard.routingEntry();
if (persistedShardRouting.initializing() && randomBoolean()) {
startedShards.add(persistedShardRouting);
} else if (rarely()) {
failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception()));
}
}
}
}
state = cluster.applyFailedShards(state, failedShards);
state = cluster.applyStartedShards(state, startedShards);
// randomly add and remove nodes (except current master)
if (rarely()) {
if (randomBoolean()) {
// add node
if (state.nodes().getSize() < 10) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
// always reroute after node leave
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
} else {
// remove node
if (state.nodes().getDataNodes().size() > 3) {
DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.deassociateDeadNodes(state, true, "removed and added a node");
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
if (randomBoolean()) {
// and add it back
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(discoveryNode).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
}
}
}
return state;
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest in project elasticsearch by elastic.
the class RestDeleteIndexAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(Strings.splitStringByCommaToArray(request.param("index")));
deleteIndexRequest.timeout(request.paramAsTime("timeout", deleteIndexRequest.timeout()));
deleteIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteIndexRequest.masterNodeTimeout()));
deleteIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteIndexRequest.indicesOptions()));
return channel -> client.admin().indices().delete(deleteIndexRequest, new AcknowledgedRestListener<>(channel));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest in project crate by crate.
the class TableCreator method deleteOrphanedPartitions.
/**
* if some orphaned partition with the same table name still exist,
* delete them beforehand as they would create unwanted and maybe invalid
* initial data.
* <p>
* should never delete partitions of existing partitioned tables
*/
private void deleteOrphanedPartitions(final CreateTableResponseListener listener, TableIdent tableIdent) {
String partitionWildCard = PartitionName.templateName(tableIdent.schema(), tableIdent.name()) + "*";
String[] orphans = indexNameExpressionResolver.concreteIndices(clusterService.state(), IndicesOptions.strictExpand(), partitionWildCard);
if (orphans.length > 0) {
if (logger.isDebugEnabled()) {
logger.debug("Deleting orphaned partitions: {}", Joiner.on(", ").join(orphans));
}
transportActionProvider.transportDeleteIndexAction().execute(new DeleteIndexRequest(orphans), new ActionListener<DeleteIndexResponse>() {
@Override
public void onResponse(DeleteIndexResponse response) {
if (!response.isAcknowledged()) {
warnNotAcknowledged("deleting orphans");
}
listener.onResponse(SUCCESS_RESULT);
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
} else {
listener.onResponse(SUCCESS_RESULT);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest in project ecs-dashboard by carone1.
the class ElasticBillingDAO method initBillingBucketIndex.
// =======================
// Billing bucket methods
// =======================
private void initBillingBucketIndex(Date collectionTime) {
String collectionDayString = DATA_DATE_FORMAT.format(collectionTime);
billingBucketIndexDayName = BILLING_BUCKET_INDEX_NAME + "-" + collectionDayString;
if (elasticClient.admin().indices().exists(new IndicesExistsRequest(billingBucketIndexDayName)).actionGet().isExists()) {
// Index already exists need to truncate it and recreate it
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(billingBucketIndexDayName);
ActionFuture<DeleteIndexResponse> futureResult = elasticClient.admin().indices().delete(deleteIndexRequest);
// Wait until deletion is done
while (!futureResult.isDone()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
elasticClient.admin().indices().create(new CreateIndexRequest(billingBucketIndexDayName)).actionGet();
try {
PutMappingResponse putMappingResponse = elasticClient.admin().indices().preparePutMapping(billingBucketIndexDayName).setType(BILLING_BUCKET_INDEX_TYPE).setSource(XContentFactory.jsonBuilder().prettyPrint().startObject().startObject(BILLING_BUCKET_INDEX_TYPE).startObject("properties").startObject(BucketBillingInfo.NAME_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(BucketBillingInfo.NAME_TAG + ANALYZED_TAG).field("type", "string").field("index", ANALYZED_INDEX).endObject().startObject(BucketBillingInfo.NAMESPACE_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(BucketBillingInfo.TOTAL_OBJECTS_TAG).field("type", "long").endObject().startObject(BucketBillingInfo.TOTAL_SIZE_TAG).field("type", "long").endObject().startObject(BucketBillingInfo.TOTAL_SIZE_UNIT_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(BucketBillingInfo.VPOOL_ID_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(BucketBillingInfo.API_TYPE).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(COLLECTION_TIME).field("type", "date").field("format", "strict_date_optional_time||epoch_millis").endObject().endObject().startArray("dynamic_templates").startObject().startObject("notanalyzed").field("match", "*").field("match_mapping_type", "string").startObject("mapping").field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().endObject().endObject().endArray().endObject().endObject()).execute().actionGet();
if (putMappingResponse.isAcknowledged()) {
LOGGER.info("Index Created: " + billingBucketIndexDayName);
} else {
LOGGER.error("Index {} did not exist. " + "While attempting to create the index from stored ElasticSearch " + "Templates we were unable to get an acknowledgement.", billingBucketIndexDayName);
LOGGER.error("Error Message: {}", putMappingResponse.toString());
throw new RuntimeException("Unable to create index " + billingBucketIndexDayName);
}
} catch (IOException e) {
throw new RuntimeException("Unable to create index " + billingBucketIndexDayName + " " + e.getMessage());
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest in project ecs-dashboard by carone1.
the class ElasticBillingDAO method initObjectBucketIndex.
// ======================
// Object bucket methods
// ======================
private void initObjectBucketIndex(Date collectionTime) {
String collectionDayString = DATA_DATE_FORMAT.format(collectionTime);
objectBucketIndexDayName = OBJECT_BUCKET_INDEX_NAME + "-" + collectionDayString;
if (elasticClient.admin().indices().exists(new IndicesExistsRequest(objectBucketIndexDayName)).actionGet().isExists()) {
// Index already exists need to truncate it and recreate it
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(objectBucketIndexDayName);
ActionFuture<DeleteIndexResponse> futureResult = elasticClient.admin().indices().delete(deleteIndexRequest);
// Wait until deletion is done
while (!futureResult.isDone()) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
}
// create index
elasticClient.admin().indices().create(new CreateIndexRequest(objectBucketIndexDayName)).actionGet();
try {
PutMappingResponse putMappingResponse = elasticClient.admin().indices().preparePutMapping(objectBucketIndexDayName).setType(OBJECT_BUCKET_INDEX_TYPE).setSource(XContentFactory.jsonBuilder().prettyPrint().startObject().startObject(OBJECT_BUCKET_INDEX_TYPE).startObject("properties").startObject(ObjectBucket.CREATED_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.SOFT_QUOTA_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.FS_ACCESS_ENABLED_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.LOCKED_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.V_POOL_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.NAMESPACE_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.OWNER_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.IS_STALE_ALLOWED_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.IS_ENCRYPTION_ENABLED_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_RETENTION_TAG).field("type", "long").endObject().startObject(ObjectBucket.BLOCK_SIZE_TAG).field("type", "long").endObject().startObject(ObjectBucket.NOTIFICATION_SIZE_TAG).field("type", "long").endObject().startObject(ObjectBucket.API_TYPE_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.RETENTION_TAG).field("type", "long").endObject().startObject(ObjectBucket.DEFAULT_GROUP_FILE_READ_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_FILE_WRITE_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_FILE_EXECUTE_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_DIR_READ_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_DIR_WRITE_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_DIR_EXECUTE_PERMISSION_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.DEFAULT_GROUP_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.NAME_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.NAME_TAG + ANALYZED_TAG).field("type", "string").field("index", ANALYZED_INDEX).endObject().startObject(ObjectBucket.ID_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.LINK_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.CREATION_TIME_TAG).field("type", "date").field("format", "strict_date_optional_time||epoch_millis").endObject().startObject(ObjectBucket.INACTIVE_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.GLOBAL_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.REMOTE_TAG).field("type", "boolean").endObject().startObject(ObjectBucket.VDC_TAG).field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().startObject(ObjectBucket.INTERNAL_TAG).field("type", "boolean").endObject().startObject(COLLECTION_TIME).field("type", "date").field("format", "strict_date_optional_time||epoch_millis").endObject().endObject().startArray("dynamic_templates").startObject().startObject("notanalyzed").field("match", "*").field("match_mapping_type", "string").startObject("mapping").field("type", "string").field("index", NOT_ANALYZED_INDEX).endObject().endObject().endObject().endArray().endObject().endObject()).execute().actionGet();
if (putMappingResponse.isAcknowledged()) {
LOGGER.info("Index Created: " + objectBucketIndexDayName);
} else {
LOGGER.error("Index {} did not exist. " + "While attempting to create the index from stored ElasticSearch " + "Templates we were unable to get an acknowledgement.", objectBucketIndexDayName);
LOGGER.error("Error Message: {}", putMappingResponse.toString());
throw new RuntimeException("Unable to create index " + objectBucketIndexDayName);
}
} catch (IOException e) {
throw new RuntimeException("Unable to create index " + objectBucketIndexDayName + " " + e.getMessage());
}
}
Aggregations