use of org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse in project crate by crate.
the class RepositoryService method execute.
public CompletableFuture<Long> execute(CreateRepositoryAnalyzedStatement statement) {
final CompletableFuture<Long> result = new CompletableFuture<>();
final String repoName = statement.repositoryName();
PutRepositoryRequest request = new PutRepositoryRequest(repoName);
request.type(statement.repositoryType());
request.settings(statement.settings());
putRepositoryAction.execute(request, new ActionListener<PutRepositoryResponse>() {
@Override
public void onResponse(PutRepositoryResponse putRepositoryResponse) {
result.complete(1L);
}
@Override
public void onFailure(Throwable e) {
final Throwable t = convertRepositoryException(e);
// in case the put repo action fails in the verificationPhase the repository got already created
// but an exception is raised anyway.
// --> remove the repo and then return the exception to the user
dropIfExists(repoName, new Runnable() {
@Override
public void run() {
result.completeExceptionally(t);
}
});
}
});
return result;
}
use of org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse in project elasticsearch by elastic.
the class BlobStoreRepositoryTests method testRetrieveSnapshots.
public void testRetrieveSnapshots() throws Exception {
final Client client = client();
final Path location = ESIntegTestCase.randomRepoPath(node().settings());
final String repositoryName = "test-repo";
logger.info("--> creating repository");
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository(repositoryName).setType("fs").setSettings(Settings.builder().put(node().settings()).put("location", location)).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
logger.info("--> creating an index and indexing documents");
final String indexName = "test-idx";
createIndex(indexName);
ensureGreen();
int numDocs = randomIntBetween(10, 20);
for (int i = 0; i < numDocs; i++) {
String id = Integer.toString(i);
client().prepareIndex(indexName, "type1", id).setSource("text", "sometext").get();
}
client().admin().indices().prepareFlush(indexName).get();
logger.info("--> create first snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, "test-snap-1").setWaitForCompletion(true).setIndices(indexName).get();
final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId();
logger.info("--> create second snapshot");
createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, "test-snap-2").setWaitForCompletion(true).setIndices(indexName).get();
final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId();
logger.info("--> make sure the node's repository can resolve the snapshots");
final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
@SuppressWarnings("unchecked") final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(repositoryName);
final List<SnapshotId> originalSnapshots = Arrays.asList(snapshotId1, snapshotId2);
List<SnapshotId> snapshotIds = repository.getRepositoryData().getSnapshotIds().stream().sorted((s1, s2) -> s1.getName().compareTo(s2.getName())).collect(Collectors.toList());
assertThat(snapshotIds, equalTo(originalSnapshots));
}
use of org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse in project elasticsearch by elastic.
the class DedicatedClusterSnapshotRestoreIT method testRestoreIndexWithShardsMissingInLocalGateway.
public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception {
logger.info("--> start 2 nodes");
Settings nodeSettings = Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE).build();
internalCluster().startNode(nodeSettings);
internalCluster().startNode(nodeSettings);
cluster().wipeIndices("_all");
logger.info("--> create repository");
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath())).execute().actionGet();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
int numberOfShards = 6;
logger.info("--> create an index that will have some unallocated shards");
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", numberOfShards).put("number_of_replicas", 0)));
ensureGreen();
logger.info("--> indexing some data into test-idx");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> start snapshot");
assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx").setWaitForCompletion(true).get().getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS));
logger.info("--> close the index");
assertAcked(client().admin().indices().prepareClose("test-idx"));
logger.info("--> shutdown one of the nodes that should make half of the shards unavailable");
internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() {
@Override
public boolean clearData(String nodeName) {
return true;
}
});
assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setTimeout("1m").setWaitForNodes("2").execute().actionGet().isTimedOut(), equalTo(false));
logger.info("--> restore index snapshot");
assertThat(client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setRestoreGlobalState(false).setWaitForCompletion(true).get().getRestoreInfo().successfulShards(), equalTo(6));
ensureGreen("test-idx");
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
IntSet reusedShards = new IntHashSet();
for (RecoveryState recoveryState : client().admin().indices().prepareRecoveries("test-idx").get().shardRecoveryStates().get("test-idx")) {
if (recoveryState.getIndex().reusedBytes() > 0) {
reusedShards.add(recoveryState.getShardId().getId());
}
}
logger.info("--> check that at least half of the shards had some reuse: [{}]", reusedShards);
assertThat(reusedShards.size(), greaterThanOrEqualTo(numberOfShards / 2));
}
use of org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse in project elasticsearch by elastic.
the class RepositoriesIT method testRepositoryAckTimeout.
public void testRepositoryAckTimeout() throws Exception {
logger.info("--> creating repository test-repo-1 with 0s timeout - shouldn't ack");
PutRepositoryResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-1").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES)).setTimeout("0s").get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(false));
logger.info("--> creating repository test-repo-2 with standard timeout - should ack");
putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo-2").setType("fs").setSettings(Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(5, 100), ByteSizeUnit.BYTES)).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
logger.info("--> deleting repository test-repo-2 with 0s timeout - shouldn't ack");
DeleteRepositoryResponse deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-2").setTimeout("0s").get();
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(false));
logger.info("--> deleting repository test-repo-1 with standard timeout - should ack");
deleteRepositoryResponse = client().admin().cluster().prepareDeleteRepository("test-repo-1").get();
assertThat(deleteRepositoryResponse.isAcknowledged(), equalTo(true));
}
use of org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse in project elasticsearch by elastic.
the class DedicatedClusterSnapshotRestoreIT method testSnapshotWithStuckNode.
public void testSnapshotWithStuckNode() throws Exception {
logger.info("--> start 2 nodes");
ArrayList<String> nodes = new ArrayList<>();
nodes.add(internalCluster().startNode());
nodes.add(internalCluster().startNode());
Client client = client();
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2).put("number_of_replicas", 0)));
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> creating repository");
Path repo = randomRepoPath();
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo").setType("mock").setSettings(Settings.builder().put("location", repo).put("random", randomAsciiOfLength(10)).put("wait_after_unblock", 200)).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
// Pick one node and block it
String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
// Remove it from the list of available nodes
nodes.remove(blockedNode);
int numberOfFilesBeforeSnapshot = numberOfFiles(repo);
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
logger.info("--> waiting for block to kick in");
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
logger.info("--> execution was blocked on node [{}], aborting snapshot", blockedNode);
ListenableActionFuture<DeleteSnapshotResponse> deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
// Make sure that abort makes some progress
Thread.sleep(100);
unblockNode("test-repo", blockedNode);
logger.info("--> stopping node [{}]", blockedNode);
stopNode(blockedNode);
try {
DeleteSnapshotResponse deleteSnapshotResponse = deleteSnapshotResponseFuture.actionGet();
assertThat(deleteSnapshotResponse.isAcknowledged(), equalTo(true));
} catch (SnapshotMissingException ex) {
// When master node is closed during this test, it sometime manages to delete the snapshot files before
// completely stopping. In this case the retried delete snapshot operation on the new master can fail
// with SnapshotMissingException
}
logger.info("--> making sure that snapshot no longer exists");
assertThrows(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute(), SnapshotMissingException.class);
// Subtract three files that will remain in the repository:
// (1) index-1
// (2) index-0 (because we keep the previous version) and
// (3) index-latest
assertThat("not all files were deleted during snapshot cancellation", numberOfFilesBeforeSnapshot, equalTo(numberOfFiles(repo) - 3));
logger.info("--> done");
}
Aggregations