use of org.elasticsearch.repositories.Repository in project crate by crate.
the class BlobStoreRepositoryRestoreTests method testSnapshotWithConflictingName.
public void testSnapshotWithConflictingName() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
final String shardGen = snapshotShard(shard, snapshot, repository);
assertNotNull(shardGen);
final Snapshot snapshotWithSameName = new Snapshot(repository.getMetadata().name(), new SnapshotId(snapshot.getSnapshotId().getName(), "_uuid2"));
final PlainActionFuture<SnapshotInfo> future = PlainActionFuture.newFuture();
repository.finalizeSnapshot(snapshot.getSnapshotId(), ShardGenerations.builder().put(indexId, 0, shardGen).build(), 0L, null, 1, Collections.emptyList(), -1L, false, Metadata.builder().put(shard.indexSettings().getIndexMetadata(), false).build(), true, future);
future.actionGet();
IndexShardSnapshotFailedException isfe = expectThrows(IndexShardSnapshotFailedException.class, () -> snapshotShard(shard, snapshotWithSameName, repository));
assertThat(isfe.getMessage(), containsString("Duplicate snapshot name"));
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
use of org.elasticsearch.repositories.Repository in project crate by crate.
the class CorruptedBlobStoreRepositoryIT method testFindDanglingLatestGeneration.
public void testFindDanglingLatestGeneration() throws Exception {
Path repo = randomRepoPath();
final String repoName = "test";
logger.info("--> creating repository at {}", repo.toAbsolutePath());
execute("CREATE REPOSITORY test TYPE fs with (location=?, compress=false, chunk_size=?)", new Object[] { repo.toAbsolutePath().toString(), randomIntBetween(100, 1000) + ByteSizeUnit.BYTES.getSuffix() });
execute("create table doc.test1(x integer)");
execute("create table doc.test2(x integer)");
logger.info("--> indexing some data");
execute("insert into doc.test1 values(1),(2)");
execute("insert into doc.test2 values(1),(2)");
final String snapshot = "snapshot1";
logger.info("--> creating snapshot");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshot).setWaitForCompletion(true).setIndices("test*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
logger.info("--> move index-N blob to next generation");
final RepositoryData repositoryData = getRepositoryData(repository);
final long beforeMoveGen = repositoryData.getGenId();
Files.move(repo.resolve("index-" + beforeMoveGen), repo.resolve("index-" + (beforeMoveGen + 1)));
logger.info("--> set next generation as pending in the cluster state");
final PlainActionFuture<Void> csUpdateFuture = PlainActionFuture.newFuture();
internalCluster().getCurrentMasterNodeInstance(ClusterService.class).submitStateUpdateTask("set pending generation", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1)).build()).build();
}
@Override
public void onFailure(String source, Exception e) {
csUpdateFuture.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
csUpdateFuture.onResponse(null);
}
});
csUpdateFuture.get();
logger.info("--> full cluster restart");
internalCluster().fullRestart();
ensureGreen();
Repository repositoryAfterRestart = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
logger.info("--> verify index-N blob is found at the new location");
assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 1));
logger.info("--> delete snapshot");
execute("drop snapshot test.snapshot1");
logger.info("--> verify index-N blob is found at the expected location");
assertThat(getRepositoryData(repositoryAfterRestart).getGenId(), is(beforeMoveGen + 2));
logger.info("--> make sure snapshot doesn't exist");
expectThrows(SnapshotMissingException.class, () -> client().admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get().getSnapshots());
}
use of org.elasticsearch.repositories.Repository in project crate by crate.
the class CorruptedBlobStoreRepositoryIT method testHandlingMissingRootLevelSnapshotMetadata.
public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception {
Path repo = randomRepoPath();
final String repoName = "test";
logger.info("--> creating repository at {}", repo.toAbsolutePath());
execute("CREATE REPOSITORY test TYPE fs with (location=?, compress=false, chunk_size=?)", new Object[] { repo.toAbsolutePath().toString(), randomIntBetween(100, 1000) + ByteSizeUnit.BYTES.getSuffix() });
final String snapshotPrefix = "test-snap-";
final int snapshots = randomIntBetween(1, 2);
logger.info("--> creating [{}] snapshots", snapshots);
for (int i = 0; i < snapshots; ++i) {
// Workaround to simulate BwC situation: taking a snapshot without indices here so that we don't create any new version shard
// generations (the existence of which would short-circuit checks for the repo containing old version snapshots)
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshotPrefix + i).setIndices().setWaitForCompletion(true).get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
}
final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
final RepositoryData repositoryData = getRepositoryData(repository);
final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds());
logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt);
Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID())));
logger.info("--> strip version information from index-N blob");
final RepositoryData withoutVersions = new RepositoryData(repositoryData.getGenId(), repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, Function.identity())), repositoryData.getSnapshotIds().stream().collect(Collectors.toMap(SnapshotId::getUUID, repositoryData::getSnapshotState)), Collections.emptyMap(), Collections.emptyMap(), ShardGenerations.EMPTY);
Files.write(repo.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + withoutVersions.getGenId()), BytesReference.toBytes(BytesReference.bytes(withoutVersions.snapshotsToXContent(XContentFactory.jsonBuilder(), true))), StandardOpenOption.TRUNCATE_EXISTING);
logger.info("--> verify that repo is assumed in old metadata format");
final SnapshotsService snapshotsService = internalCluster().getCurrentMasterNodeInstance(SnapshotsService.class);
final ThreadPool threadPool = internalCluster().getCurrentMasterNodeInstance(ThreadPool.class);
assertThat(PlainActionFuture.get(f -> threadPool.generic().execute(ActionRunnable.wrap(f, (d) -> snapshotsService.hasOldVersionSnapshots(repoName, getRepositoryData(repository), d, null)))), is(true));
logger.info("--> verify that snapshot with missing root level metadata can be deleted");
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get();
logger.info("--> verify that repository is assumed in new metadata format after removing corrupted snapshot");
assertThat(PlainActionFuture.get(f -> threadPool.generic().execute(ActionRunnable.wrap(f, (d) -> snapshotsService.hasOldVersionSnapshots(repoName, getRepositoryData(repository), d, null)))), is(false));
final RepositoryData finalRepositoryData = getRepositoryData(repository);
for (SnapshotId snapshotId : finalRepositoryData.getSnapshotIds()) {
assertThat(finalRepositoryData.getVersion(snapshotId), is(Version.CURRENT));
}
}
use of org.elasticsearch.repositories.Repository in project crate by crate.
the class CorruptedBlobStoreRepositoryIT method testCorruptedSnapshotIsIgnored.
public void testCorruptedSnapshotIsIgnored() throws Exception {
Path repo = randomRepoPath();
final String repoName = "test";
logger.info("--> creating repository at {}", repo.toAbsolutePath());
execute("CREATE REPOSITORY test TYPE fs with (location=?, compress=false, chunk_size=?)", new Object[] { repo.toAbsolutePath().toString(), randomIntBetween(100, 1000) + ByteSizeUnit.BYTES.getSuffix() });
final String snapshotPrefix = "test-snap-";
final int snapshots = randomIntBetween(2, 2);
logger.info("--> creating [{}] snapshots", snapshots);
for (int i = 0; i < snapshots; ++i) {
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot(repoName, snapshotPrefix + i).setIndices().setWaitForCompletion(true).get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), is(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
}
final Repository repository = internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName);
final RepositoryData repositoryData = getRepositoryData(repository);
final SnapshotId snapshotToCorrupt = randomFrom(repositoryData.getSnapshotIds());
logger.info("--> delete root level snapshot metadata blob for snapshot [{}]", snapshotToCorrupt);
Files.delete(repo.resolve(String.format(Locale.ROOT, BlobStoreRepository.SNAPSHOT_NAME_FORMAT, snapshotToCorrupt.getUUID())));
logger.info("--> ensure snapshot list can be retrieved without any error if ignoreUnavailable is set");
var resp = client().admin().cluster().prepareGetSnapshots(repoName).setIgnoreUnavailable(true).get();
assertThat(resp.getSnapshots().size(), is(snapshots - 1));
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotToCorrupt.getName()).get();
}
use of org.elasticsearch.repositories.Repository in project crate by crate.
the class SysSnapshotsTest method test_current_snapshot_does_not_fail_if_get_repository_data_raises_exception.
@Test
public void test_current_snapshot_does_not_fail_if_get_repository_data_raises_exception() throws Exception {
Repository r1 = mock(Repository.class);
Mockito.doThrow(new IllegalStateException("some error")).when(r1).getRepositoryData(any());
SysSnapshots sysSnapshots = new SysSnapshots(() -> List.of(r1));
CompletableFuture<Iterable<SysSnapshot>> currentSnapshots = sysSnapshots.currentSnapshots();
Iterable<SysSnapshot> iterable = currentSnapshots.get(5, TimeUnit.SECONDS);
assertThat(iterable.iterator().hasNext(), is(false));
}
Aggregations