use of org.elasticsearch.action.admin.indices.create.CreateIndexRequest in project elasticsearch by elastic.
the class IndicesClusterStateServiceRandomUpdatesTests method randomlyUpdateClusterState.
public ClusterState randomlyUpdateClusterState(ClusterState state, Map<DiscoveryNode, IndicesClusterStateService> clusterStateServiceMap, Supplier<MockIndicesService> indicesServiceSupplier) {
// randomly create new indices (until we have 200 max)
for (int i = 0; i < randomInt(5); i++) {
if (state.metaData().indices().size() > 200) {
break;
}
String name = "index_" + randomAsciiOfLength(15).toLowerCase(Locale.ROOT);
Settings.Builder settingsBuilder = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3)).put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
if (randomBoolean()) {
settingsBuilder.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true);
}
CreateIndexRequest request = new CreateIndexRequest(name, settingsBuilder.build()).waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metaData().hasIndex(name));
}
// randomly delete indices
Set<String> indicesToDelete = new HashSet<>();
int numberOfIndicesToDelete = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToDelete, state.metaData().indices().keys().toArray(String.class))) {
indicesToDelete.add(state.metaData().index(index).getIndex().getName());
}
if (indicesToDelete.isEmpty() == false) {
DeleteIndexRequest deleteRequest = new DeleteIndexRequest(indicesToDelete.toArray(new String[indicesToDelete.size()]));
state = cluster.deleteIndices(state, deleteRequest);
for (String index : indicesToDelete) {
assertFalse(state.metaData().hasIndex(index));
}
}
// randomly close indices
int numberOfIndicesToClose = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToClose, state.metaData().indices().keys().toArray(String.class))) {
CloseIndexRequest closeIndexRequest = new CloseIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.closeIndices(state, closeIndexRequest);
}
// randomly open indices
int numberOfIndicesToOpen = randomInt(Math.min(1, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToOpen, state.metaData().indices().keys().toArray(String.class))) {
OpenIndexRequest openIndexRequest = new OpenIndexRequest(state.metaData().index(index).getIndex().getName());
state = cluster.openIndices(state, openIndexRequest);
}
// randomly update settings
Set<String> indicesToUpdate = new HashSet<>();
boolean containsClosedIndex = false;
int numberOfIndicesToUpdate = randomInt(Math.min(2, state.metaData().indices().size()));
for (String index : randomSubsetOf(numberOfIndicesToUpdate, state.metaData().indices().keys().toArray(String.class))) {
indicesToUpdate.add(state.metaData().index(index).getIndex().getName());
if (state.metaData().index(index).getState() == IndexMetaData.State.CLOSE) {
containsClosedIndex = true;
}
}
if (indicesToUpdate.isEmpty() == false) {
UpdateSettingsRequest updateSettingsRequest = new UpdateSettingsRequest(indicesToUpdate.toArray(new String[indicesToUpdate.size()]));
Settings.Builder settings = Settings.builder();
if (containsClosedIndex == false) {
settings.put(SETTING_NUMBER_OF_REPLICAS, randomInt(2));
}
settings.put("index.refresh_interval", randomIntBetween(1, 5) + "s");
updateSettingsRequest.settings(settings.build());
state = cluster.updateSettings(state, updateSettingsRequest);
}
// randomly reroute
if (rarely()) {
state = cluster.reroute(state, new ClusterRerouteRequest());
}
// randomly start and fail allocated shards
List<ShardRouting> startedShards = new ArrayList<>();
List<FailedShard> failedShards = new ArrayList<>();
for (DiscoveryNode node : state.nodes()) {
IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node);
MockIndicesService indicesService = (MockIndicesService) indicesClusterStateService.indicesService;
for (MockIndexService indexService : indicesService) {
for (MockIndexShard indexShard : indexService) {
ShardRouting persistedShardRouting = indexShard.routingEntry();
if (persistedShardRouting.initializing() && randomBoolean()) {
startedShards.add(persistedShardRouting);
} else if (rarely()) {
failedShards.add(new FailedShard(persistedShardRouting, "fake shard failure", new Exception()));
}
}
}
}
state = cluster.applyFailedShards(state, failedShards);
state = cluster.applyStartedShards(state, startedShards);
// randomly add and remove nodes (except current master)
if (rarely()) {
if (randomBoolean()) {
// add node
if (state.nodes().getSize() < 10) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(createNode()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
// always reroute after node leave
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
} else {
// remove node
if (state.nodes().getDataNodes().size() > 3) {
DiscoveryNode discoveryNode = randomFrom(state.nodes().getNodes().values().toArray(DiscoveryNode.class));
if (discoveryNode.equals(state.nodes().getMasterNode()) == false) {
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).remove(discoveryNode.getId()).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.deassociateDeadNodes(state, true, "removed and added a node");
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
if (randomBoolean()) {
// and add it back
DiscoveryNodes newNodes = DiscoveryNodes.builder(state.nodes()).add(discoveryNode).build();
state = ClusterState.builder(state).nodes(newNodes).build();
state = cluster.reroute(state, new ClusterRerouteRequest());
updateNodes(state, clusterStateServiceMap, indicesServiceSupplier);
}
}
}
}
return state;
}
use of org.elasticsearch.action.admin.indices.create.CreateIndexRequest in project elasticsearch by elastic.
the class RestCreateIndexAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
CreateIndexRequest createIndexRequest = new CreateIndexRequest(request.param("index"));
if (request.hasContent()) {
createIndexRequest.source(request.content(), request.getXContentType());
}
createIndexRequest.updateAllTypes(request.paramAsBoolean("update_all_types", false));
createIndexRequest.timeout(request.paramAsTime("timeout", createIndexRequest.timeout()));
createIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", createIndexRequest.masterNodeTimeout()));
createIndexRequest.waitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards")));
return channel -> client.admin().indices().create(createIndexRequest, new AcknowledgedRestListener<CreateIndexResponse>(channel) {
@Override
public void addCustomFields(XContentBuilder builder, CreateIndexResponse response) throws IOException {
response.addCustomFields(builder);
}
});
}
use of org.elasticsearch.action.admin.indices.create.CreateIndexRequest in project elasticsearch by elastic.
the class TaskResultsService method storeResult.
public void storeResult(TaskResult taskResult, ActionListener<Void> listener) {
ClusterState state = clusterService.state();
if (state.routingTable().hasIndex(TASK_INDEX) == false) {
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
createIndexRequest.settings(taskResultIndexSettings());
createIndexRequest.index(TASK_INDEX);
createIndexRequest.mapping(TASK_TYPE, taskResultIndexMapping(), XContentType.JSON);
createIndexRequest.cause("auto(task api)");
createIndexAction.execute(null, createIndexRequest, new ActionListener<CreateIndexResponse>() {
@Override
public void onResponse(CreateIndexResponse result) {
doStoreResult(taskResult, listener);
}
@Override
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException) {
// we have the index, do it
try {
doStoreResult(taskResult, listener);
} catch (Exception inner) {
inner.addSuppressed(e);
listener.onFailure(inner);
}
} else {
listener.onFailure(e);
}
}
});
} else {
IndexMetaData metaData = state.getMetaData().index(TASK_INDEX);
if (metaData.getMappings().containsKey(TASK_TYPE) == false) {
// The index already exists but doesn't have our mapping
client.admin().indices().preparePutMapping(TASK_INDEX).setType(TASK_TYPE).setSource(taskResultIndexMapping(), XContentType.JSON).execute(new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse putMappingResponse) {
doStoreResult(taskResult, listener);
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
} else {
doStoreResult(taskResult, listener);
}
}
}
use of org.elasticsearch.action.admin.indices.create.CreateIndexRequest in project elasticsearch by elastic.
the class ShrinkRequest method readFrom.
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shrinkIndexRequest = new CreateIndexRequest();
shrinkIndexRequest.readFrom(in);
sourceIndex = in.readString();
}
use of org.elasticsearch.action.admin.indices.create.CreateIndexRequest in project elasticsearch by elastic.
the class TransportShrinkAction method prepareCreateIndexRequest.
// static for unittesting this method
static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkRequest, final ClusterState state, final IntFunction<DocsStats> perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) {
final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex());
final CreateIndexRequest targetIndex = shrinkRequest.getShrinkIndexRequest();
final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index());
final IndexMetaData metaData = state.metaData().index(sourceIndex);
final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
int numShards = 1;
if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) {
numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings);
}
for (int i = 0; i < numShards; i++) {
Set<ShardId> shardIds = IndexMetaData.selectShrinkShards(i, metaData, numShards);
long count = 0;
for (ShardId id : shardIds) {
DocsStats docsStats = perShardDocStats.apply(id.id());
if (docsStats != null) {
count += docsStats.getCount();
}
if (count > IndexWriter.MAX_DOCS) {
throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS + "] docs - too many documents in shards " + shardIds);
}
}
}
if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
throw new IllegalArgumentException("cannot provide a routing partition size value when shrinking an index");
}
targetIndex.cause("shrink_index");
Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings);
settingsBuilder.put("index.number_of_shards", numShards);
targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(targetIndex, "shrink_index", targetIndex.index(), targetIndexName, true).ackTimeout(targetIndex.timeout()).masterNodeTimeout(targetIndex.masterNodeTimeout()).settings(targetIndex.settings()).aliases(targetIndex.aliases()).customs(targetIndex.customs()).waitForActiveShards(targetIndex.waitForActiveShards()).shrinkFrom(metaData.getIndex());
}
Aggregations