use of org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse in project OpenSearch by opensearch-project.
the class SnapshotBlocksIT method testRestoreSnapshotWithBlocks.
public void testRestoreSnapshotWithBlocks() {
assertAcked(client().admin().indices().prepareDelete(INDEX_NAME, OTHER_INDEX_NAME));
assertFalse(client().admin().indices().prepareExists(INDEX_NAME, OTHER_INDEX_NAME).get().isExists());
logger.info("--> restoring a snapshot is blocked when the cluster is read only");
try {
setClusterReadOnly(true);
assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), Metadata.CLUSTER_READ_ONLY_BLOCK);
} finally {
setClusterReadOnly(false);
}
logger.info("--> creating a snapshot is allowed when the cluster is not read only");
RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).setWaitForCompletion(true).execute().actionGet();
assertThat(response.status(), equalTo(RestStatus.OK));
assertTrue(client().admin().indices().prepareExists(INDEX_NAME).get().isExists());
assertTrue(client().admin().indices().prepareExists(OTHER_INDEX_NAME).get().isExists());
}
use of org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse in project OpenSearch by opensearch-project.
the class ClusterShardLimitIT method testRestoreSnapshotOverLimit.
public void testRestoreSnapshotOverLimit() {
Client client = client();
logger.info("--> creating repository");
Settings.Builder repoSettings = Settings.builder();
repoSettings.put("location", randomRepoPath());
repoSettings.put("compress", randomBoolean());
repoSettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES);
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(repoSettings.build()));
int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size();
ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes);
createIndex("snapshot-index", Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, counts.getFailingIndexShards()).put(SETTING_NUMBER_OF_REPLICAS, counts.getFailingIndexReplicas()).build());
ensureGreen();
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("snapshot-index").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
List<SnapshotInfo> snapshotInfos = client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
assertThat(snapshotInfos.size(), equalTo(1));
SnapshotInfo snapshotInfo = snapshotInfos.get(0);
assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS));
assertThat(snapshotInfo.version(), equalTo(Version.CURRENT));
// Test restore after index deletion
logger.info("--> delete indices");
cluster().wipeIndices("snapshot-index");
// Reduce the shard limit and fill it up
setShardsPerNode(counts.getShardsPerNode());
createIndex("test-fill", Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, counts.getFirstIndexShards()).put(SETTING_NUMBER_OF_REPLICAS, counts.getFirstIndexReplicas()).build());
logger.info("--> restore one index after deletion");
try {
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("snapshot-index").execute().actionGet();
fail("Should not have been able to restore snapshot in full cluster");
} catch (IllegalArgumentException e) {
verifyException(dataNodes, counts, e);
}
ensureGreen();
ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
assertFalse(clusterState.getMetadata().hasIndex("snapshot-index"));
}
use of org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse in project OpenSearch by opensearch-project.
the class OpenSearchBlobStoreRepositoryIntegTestCase method assertSuccessfulRestore.
protected static void assertSuccessfulRestore(RestoreSnapshotRequestBuilder requestBuilder) {
RestoreSnapshotResponse response = requestBuilder.get();
assertSuccessfulRestore(response);
}
use of org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse in project OpenSearch by opensearch-project.
the class SnapshotResiliencyTests method testSuccessfulSnapshotAndRestore.
public void testSuccessfulSnapshotAndRestore() {
setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10));
String repoName = "repo";
String snapshotName = "snapshot";
final String index = "test";
final int shards = randomIntBetween(1, 10);
final int documents = randomIntBetween(0, 100);
final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseListener = new StepListener<>();
continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> {
final Runnable afterIndexing = () -> client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).execute(createSnapshotResponseListener);
if (documents == 0) {
afterIndexing.run();
} else {
final BulkRequest bulkRequest = new BulkRequest().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
for (int i = 0; i < documents; ++i) {
bulkRequest.add(new IndexRequest(index).source(Collections.singletonMap("foo", "bar" + i)));
}
final StepListener<BulkResponse> bulkResponseStepListener = new StepListener<>();
client().bulk(bulkRequest, bulkResponseStepListener);
continueOrDie(bulkResponseStepListener, bulkResponse -> {
assertFalse("Failures in bulk response: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures());
assertEquals(documents, bulkResponse.getItems().length);
afterIndexing.run();
});
}
});
final StepListener<AcknowledgedResponse> deleteIndexListener = new StepListener<>();
continueOrDie(createSnapshotResponseListener, createSnapshotResponse -> client().admin().indices().delete(new DeleteIndexRequest(index), deleteIndexListener));
final StepListener<RestoreSnapshotResponse> restoreSnapshotResponseListener = new StepListener<>();
continueOrDie(deleteIndexListener, ignored -> client().admin().cluster().restoreSnapshot(new RestoreSnapshotRequest(repoName, snapshotName).waitForCompletion(true), restoreSnapshotResponseListener));
final StepListener<SearchResponse> searchResponseListener = new StepListener<>();
continueOrDie(restoreSnapshotResponseListener, restoreSnapshotResponse -> {
assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards());
client().search(new SearchRequest(index).source(new SearchSourceBuilder().size(0).trackTotalHits(true)), searchResponseListener);
});
final AtomicBoolean documentCountVerified = new AtomicBoolean();
continueOrDie(searchResponseListener, r -> {
assertEquals(documents, Objects.requireNonNull(r.getHits().getTotalHits()).value);
documentCountVerified.set(true);
});
runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L));
assertNotNull(createSnapshotResponseListener.result());
assertNotNull(restoreSnapshotResponseListener.result());
assertTrue(documentCountVerified.get());
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
assertThat(snapshotIds, hasSize(1));
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next());
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertThat(snapshotInfo.indices(), containsInAnyOrder(index));
assertEquals(shards, snapshotInfo.successfulShards());
assertEquals(0, snapshotInfo.failedShards());
}
use of org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse in project OpenSearch by opensearch-project.
the class SnapshotResiliencyTests method testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates.
public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() {
setupTestCluster(randomFrom(1, 3, 5), randomIntBetween(2, 10));
String repoName = "repo";
String snapshotName = "snapshot";
final String index = "test";
final int shards = randomIntBetween(1, 10);
final int documents = randomIntBetween(2, 100);
TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final StepListener<CreateSnapshotResponse> createSnapshotResponseStepListener = new StepListener<>();
continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> {
final AtomicBoolean initiatedSnapshot = new AtomicBoolean(false);
for (int i = 0; i < documents; ++i) {
// Index a few documents with different field names so we trigger a dynamic mapping update for each of them
client().bulk(new BulkRequest().add(new IndexRequest(index).source(Collections.singletonMap("foo" + i, "bar"))).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), assertNoFailureListener(bulkResponse -> {
assertFalse("Failures in bulkresponse: " + bulkResponse.buildFailureMessage(), bulkResponse.hasFailures());
if (initiatedSnapshot.compareAndSet(false, true)) {
client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName).setWaitForCompletion(true).execute(createSnapshotResponseStepListener);
}
}));
}
});
final String restoredIndex = "restored";
final StepListener<RestoreSnapshotResponse> restoreSnapshotResponseStepListener = new StepListener<>();
continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> client().admin().cluster().restoreSnapshot(new RestoreSnapshotRequest(repoName, snapshotName).renamePattern(index).renameReplacement(restoredIndex).waitForCompletion(true), restoreSnapshotResponseStepListener));
final StepListener<SearchResponse> searchResponseStepListener = new StepListener<>();
continueOrDie(restoreSnapshotResponseStepListener, restoreSnapshotResponse -> {
assertEquals(shards, restoreSnapshotResponse.getRestoreInfo().totalShards());
client().search(new SearchRequest(restoredIndex).source(new SearchSourceBuilder().size(documents).trackTotalHits(true)), searchResponseStepListener);
});
final AtomicBoolean documentCountVerified = new AtomicBoolean();
continueOrDie(searchResponseStepListener, r -> {
final long hitCount = r.getHits().getTotalHits().value;
assertThat("Documents were restored but the restored index mapping was older than some documents and misses some of their fields", (int) hitCount, lessThanOrEqualTo(((Map<?, ?>) clusterManagerNode.clusterService.state().metadata().index(restoredIndex).mapping().sourceAsMap().get("properties")).size()));
documentCountVerified.set(true);
});
runUntil(documentCountVerified::get, TimeUnit.MINUTES.toMillis(5L));
assertNotNull(createSnapshotResponseStepListener.result());
assertNotNull(restoreSnapshotResponseStepListener.result());
SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE);
assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false));
final Repository repository = clusterManagerNode.repositoriesService.repository(repoName);
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
assertThat(snapshotIds, hasSize(1));
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotIds.iterator().next());
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
assertThat(snapshotInfo.indices(), containsInAnyOrder(index));
assertEquals(shards, snapshotInfo.successfulShards());
assertEquals(0, snapshotInfo.failedShards());
}
Aggregations