use of java.nio.file.Path in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testDeleteRepositoryWhileSnapshotting.
public void testDeleteRepositoryWhileSnapshotting() throws Exception {
Client client = client();
Path repositoryLocation = randomRepoPath();
logger.info("--> creating repository");
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo").setType("mock").setSettings(Settings.builder().put("location", repositoryLocation).put("random", randomAsciiOfLength(10)).put("wait_after_unblock", 200)).get();
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
// Create index on 2 nodes and make sure each node has a primary by setting no replicas
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_replicas", 0)));
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
// Pick one node and block it
String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
logger.info("--> waiting for block to kick in");
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
logger.info("--> execution was blocked on node [{}], trying to delete repository", blockedNode);
try {
client.admin().cluster().prepareDeleteRepository("test-repo").execute().get();
fail("shouldn't be able to delete in-use repository");
} catch (Exception ex) {
logger.info("--> in-use repository deletion failed");
}
logger.info("--> trying to move repository to another location");
try {
client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))).get();
fail("shouldn't be able to replace in-use repository");
} catch (Exception ex) {
logger.info("--> in-use repository replacement failed");
}
logger.info("--> trying to create a repository with different name");
assertAcked(client.admin().cluster().preparePutRepository("test-repo-2").setType("fs").setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))));
logger.info("--> unblocking blocked node");
unblockNode("test-repo", blockedNode);
logger.info("--> waiting for completion");
SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
logger.info("--> done");
List<SnapshotInfo> snapshotInfos = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots();
assertThat(snapshotInfos.size(), equalTo(1));
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
assertThat(snapshotInfos.get(0).shardFailures().size(), equalTo(0));
logger.info("--> delete index");
cluster().wipeIndices("test-idx");
logger.info("--> replace mock repository with real one at the same location");
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", repositoryLocation)));
logger.info("--> restore index");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
}
use of java.nio.file.Path in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testGetSnapshotsRequest.
public void testGetSnapshotsRequest() throws Exception {
final String repositoryName = "test-repo";
final String indexName = "test-idx";
final Client client = client();
final Path repo = randomRepoPath();
logger.info("--> creating repository at {}", repo.toAbsolutePath());
assertAcked(client.admin().cluster().preparePutRepository(repositoryName).setType("mock").setSettings(Settings.builder().put("location", repo).put("compress", false).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES).put("wait_after_unblock", 200)));
logger.info("--> get snapshots on an empty repository");
expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot").get());
// with ignore unavailable set to true, should not throw an exception
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setIgnoreUnavailable(true).addSnapshots("non-existent-snapshot").get();
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
logger.info("--> creating an index and indexing documents");
// Create index on 2 nodes and make sure each node has a primary by setting no replicas
assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
ensureGreen();
for (int i = 0; i < 10; i++) {
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
// make sure we return only the in-progress snapshot when taking the first snapshot on a clean repository
// take initial snapshot with a block, making sure we only get 1 in-progress snapshot returned
// block a node so the create snapshot operation can remain in progress
final String initialBlockedNode = blockNodeWithIndex(repositoryName, indexName);
ListenableActionFuture<CreateSnapshotResponse> responseListener = client.admin().cluster().prepareCreateSnapshot(repositoryName, "snap-on-empty-repo").setWaitForCompletion(false).setIndices(indexName).execute();
// wait for block to kick in
waitForBlock(initialBlockedNode, repositoryName, TimeValue.timeValueSeconds(60));
getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots(randomFrom("_all", "_current", "snap-on-*", "*-on-empty-repo", "snap-on-empty-repo")).get();
assertEquals(1, getSnapshotsResponse.getSnapshots().size());
assertEquals("snap-on-empty-repo", getSnapshotsResponse.getSnapshots().get(0).snapshotId().getName());
// unblock node
unblockNode(repositoryName, initialBlockedNode);
// timeout after 10 seconds
responseListener.actionGet(TimeValue.timeValueMillis(10000L));
client.admin().cluster().prepareDeleteSnapshot(repositoryName, "snap-on-empty-repo").get();
final int numSnapshots = randomIntBetween(1, 3) + 1;
logger.info("--> take {} snapshot(s)", numSnapshots - 1);
final String[] snapshotNames = new String[numSnapshots];
for (int i = 0; i < numSnapshots - 1; i++) {
final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).setIndices(indexName).get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
snapshotNames[i] = snapshotName;
}
logger.info("--> take another snapshot to be in-progress");
// add documents so there are data files to block on
for (int i = 10; i < 20; i++) {
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
final String inProgressSnapshot = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
snapshotNames[numSnapshots - 1] = inProgressSnapshot;
// block a node so the create snapshot operation can remain in progress
final String blockedNode = blockNodeWithIndex(repositoryName, indexName);
client.admin().cluster().prepareCreateSnapshot(repositoryName, inProgressSnapshot).setWaitForCompletion(false).setIndices(indexName).get();
// wait for block to kick in
waitForBlock(blockedNode, repositoryName, TimeValue.timeValueSeconds(60));
logger.info("--> get all snapshots with a current in-progress");
// with ignore unavailable set to true, should not throw an exception
final List<String> snapshotsToGet = new ArrayList<>();
if (randomBoolean()) {
// use _current plus the individual names of the finished snapshots
snapshotsToGet.add("_current");
for (int i = 0; i < numSnapshots - 1; i++) {
snapshotsToGet.add(snapshotNames[i]);
}
} else {
snapshotsToGet.add("_all");
}
getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).setSnapshots(snapshotsToGet.toArray(Strings.EMPTY_ARRAY)).get();
List<String> sortedNames = Arrays.asList(snapshotNames);
Collections.sort(sortedNames);
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).get();
sortedNames = Arrays.asList(snapshotNames);
Collections.sort(sortedNames);
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
logger.info("--> make sure duplicates are not returned in the response");
String regexName = snapshotNames[randomIntBetween(0, numSnapshots - 1)];
final int splitPos = regexName.length() / 2;
final String firstRegex = regexName.substring(0, splitPos) + "*";
final String secondRegex = "*" + regexName.substring(splitPos);
getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots(snapshotNames).addSnapshots(firstRegex, secondRegex).get();
assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
assertThat(getSnapshotsResponse.getSnapshots().stream().map(s -> s.snapshotId().getName()).sorted().collect(Collectors.toList()), equalTo(sortedNames));
// unblock node
unblockNode(repositoryName, blockedNode);
waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60));
}
use of java.nio.file.Path in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testThrottling.
public void testThrottling() throws Exception {
Client client = client();
logger.info("--> creating repository");
Path repositoryLocation = randomRepoPath();
boolean throttleSnapshot = randomBoolean();
boolean throttleRestore = randomBoolean();
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", repositoryLocation).put("compress", randomBoolean()).put("chunk_size", randomIntBetween(1000, 10000), ByteSizeUnit.BYTES).put("max_restore_bytes_per_sec", throttleRestore ? "10k" : "0").put("max_snapshot_bytes_per_sec", throttleSnapshot ? "10k" : "0")));
createIndex("test-idx");
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
logger.info("--> snapshot");
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> delete index");
cluster().wipeIndices("test-idx");
logger.info("--> restore index");
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
long snapshotPause = 0L;
long restorePause = 0L;
for (RepositoriesService repositoriesService : internalCluster().getDataNodeInstances(RepositoriesService.class)) {
snapshotPause += repositoriesService.repository("test-repo").getSnapshotThrottleTimeInNanos();
restorePause += repositoriesService.repository("test-repo").getRestoreThrottleTimeInNanos();
}
if (throttleSnapshot) {
assertThat(snapshotPause, greaterThan(0L));
} else {
assertThat(snapshotPause, equalTo(0L));
}
if (throttleRestore) {
assertThat(restorePause, greaterThan(0L));
} else {
assertThat(restorePause, equalTo(0L));
}
}
use of java.nio.file.Path in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testSnapshotSucceedsAfterSnapshotFailure.
public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {
logger.info("--> creating repository");
final Path repoPath = randomRepoPath();
final Client client = client();
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("mock").setVerify(false).setSettings(Settings.builder().put("location", repoPath).put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f).put("atomic_move", false).put("random", randomAsciiOfLength(10))));
logger.info("--> indexing some data");
assertAcked(prepareCreate("test-idx").setSettings(// that caused problems with writing subsequent snapshots if they happened to be lingering in the repository
Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
ensureGreen();
final int numDocs = randomIntBetween(1, 5);
for (int i = 0; i < numDocs; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
logger.info("--> snapshot with potential I/O failures");
try {
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
if (createSnapshotResponse.getSnapshotInfo().totalShards() != createSnapshotResponse.getSnapshotInfo().successfulShards()) {
assertThat(getFailureCount("test-repo"), greaterThan(0L));
assertThat(createSnapshotResponse.getSnapshotInfo().shardFailures().size(), greaterThan(0));
for (SnapshotShardFailure shardFailure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
assertThat(shardFailure.reason(), containsString("Random IOException"));
}
}
} catch (SnapshotCreationException | RepositoryException ex) {
// sometimes, the snapshot will fail with a top level I/O exception
assertThat(ExceptionsHelper.stackTrace(ex), containsString("Random IOException"));
}
logger.info("--> snapshot with no I/O failures");
assertAcked(client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(Settings.builder().put("location", repoPath)));
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo-2", "test-snap-2").setWaitForCompletion(true).setIndices("test-idx").get();
assertEquals(0, createSnapshotResponse.getSnapshotInfo().failedShards());
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo-2").addSnapshots("test-snap-2").get();
assertEquals(SnapshotState.SUCCESS, getSnapshotsResponse.getSnapshots().get(0).state());
}
use of java.nio.file.Path in project elasticsearch by elastic.
the class SharedClusterSnapshotRestoreIT method testSnapshotWithMissingShardLevelIndexFile.
public void testSnapshotWithMissingShardLevelIndexFile() throws Exception {
Path repo = randomRepoPath();
logger.info("--> creating repository at {}", repo.toAbsolutePath());
assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("fs").setSettings(Settings.builder().put("location", repo).put("compress", false)));
createIndex("test-idx-1", "test-idx-2");
logger.info("--> indexing some data");
indexRandom(true, client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"), client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
logger.info("--> creating snapshot");
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
logger.info("--> deleting shard level index file");
try (Stream<Path> files = Files.list(repo.resolve("indices"))) {
files.forEach(indexPath -> IOUtils.deleteFilesIgnoringExceptions(indexPath.resolve("0").resolve("index-0")));
}
logger.info("--> creating another snapshot");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices("test-idx-1").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertEquals(createSnapshotResponse.getSnapshotInfo().successfulShards(), createSnapshotResponse.getSnapshotInfo().totalShards());
logger.info("--> restoring the first snapshot, the repository should not have lost any shard data despite deleting index-N, " + "because it should have iterated over the snap-*.data files as backup");
client().admin().indices().prepareDelete("test-idx-1", "test-idx-2").get();
RestoreSnapshotResponse restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).get();
assertEquals(0, restoreSnapshotResponse.getRestoreInfo().failedShards());
}
Aggregations