use of org.elasticsearch.cluster.metadata.RepositoryMetadata in project crate by crate.
the class SnapshotRestoreAnalyzerTest method prepare.
@Before
public void prepare() throws IOException {
RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(Collections.singletonList(new RepositoryMetadata("my_repo", "fs", Settings.builder().put("location", "/tmp/my_repo").build())));
ClusterState clusterState = ClusterState.builder(new ClusterName("testing")).metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata)).build();
ClusterServiceUtils.setState(clusterService, clusterState);
e = SQLExecutor.builder(clusterService).addTable(USER_TABLE_DEFINITION).addTable(TEST_DOC_LOCATIONS_TABLE_DEFINITION).addPartitionedTable(TEST_PARTITIONED_TABLE_DEFINITION, TEST_PARTITIONED_TABLE_PARTITIONS).addBlobTable("create blob table my_blobs").build();
plannerContext = e.getPlannerContext(clusterService.state());
}
use of org.elasticsearch.cluster.metadata.RepositoryMetadata in project crate by crate.
the class BlobStoreRepository method writeIndexGen.
/**
* Writing a new index generation is a three step process.
* First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its
* pending generation {@code P} while its safe generation {@code N} remains unchanged.
* Second, the updated {@link RepositoryData} is written to generation {@code P + 1}.
* Lastly, the {@link RepositoryMetadata} entry for this repository is updated to the new generation {@code P + 1} and thus
* pending and safe generation are set to the same value marking the end of the update of the repository data.
*
* @param repositoryData RepositoryData to write
* @param expectedGen expected repository generation at the start of the operation
* @param writeShardGens whether to write {@link ShardGenerations} to the new {@link RepositoryData} blob
* @param listener completion listener
*/
protected void writeIndexGen(RepositoryData repositoryData, long expectedGen, boolean writeShardGens, ActionListener<Void> listener) {
// can not write to a read only repository
assert isReadOnly() == false;
final long currentGen = repositoryData.getGenId();
if (currentGen != expectedGen) {
// the index file was updated by a concurrent operation, so we were operating on stale
// repository data
listener.onFailure(new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + expectedGen + "], actual current generation [" + currentGen + "]"));
return;
}
// Step 1: Set repository generation state to the next possible pending generation
final StepListener<Long> setPendingStep = new StepListener<>();
clusterService.submitStateUpdateTask("set pending repository generation [" + metadata.name() + "][" + expectedGen + "]", new ClusterStateUpdateTask() {
private long newGen;
@Override
public ClusterState execute(ClusterState currentState) {
final RepositoryMetadata meta = getRepoMetadata(currentState);
final String repoName = metadata.name();
final long genInState = meta.generation();
final boolean uninitializedMeta = meta.generation() == RepositoryData.UNKNOWN_REPO_GEN || bestEffortConsistency;
if (uninitializedMeta == false && meta.pendingGeneration() != genInState) {
LOGGER.info("Trying to write new repository data over unfinished write, repo [{}] is at " + "safe generation [{}] and pending generation [{}]", meta.name(), genInState, meta.pendingGeneration());
}
assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation() : "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]";
// If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of
// all contents by an external process so we reset the safe generation to the empty generation.
final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN ? RepositoryData.EMPTY_REPO_GEN : (uninitializedMeta ? expectedGen : genInState);
// Regardless of whether or not the safe generation has been reset, the pending generation always increments so that
// even if a repository has been manually cleared of all contents we will never reuse the same repository generation.
// This is motivated by the consistency behavior the S3 based blob repository implementation has to support which does
// not offer any consistency guarantees when it comes to overwriting the same blob name with different content.
final long nextPendingGen = metadata.pendingGeneration() + 1;
newGen = uninitializedMeta ? Math.max(expectedGen + 1, nextPendingGen) : nextPendingGen;
assert newGen > latestKnownRepoGen.get() : "Attempted new generation [" + newGen + "] must be larger than latest known generation [" + latestKnownRepoGen.get() + "]";
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(repoName, safeGeneration, newGen)).build()).build();
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
setPendingStep.onResponse(newGen);
}
});
final StepListener<RepositoryData> filterRepositoryDataStep = new StepListener<>();
// Step 2: Write new index-N blob to repository and update index.latest
setPendingStep.whenComplete(newGen -> threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> {
// BwC logic: Load snapshot version information if any snapshot is missing a version in RepositoryData so that the new
// RepositoryData contains a version for every snapshot
final List<SnapshotId> snapshotIdsWithoutVersion = repositoryData.getSnapshotIds().stream().filter(snapshotId -> repositoryData.getVersion(snapshotId) == null).collect(Collectors.toList());
if (snapshotIdsWithoutVersion.isEmpty() == false) {
final Map<SnapshotId, Version> updatedVersionMap = new ConcurrentHashMap<>();
final GroupedActionListener<Void> loadAllVersionsListener = new GroupedActionListener<>(ActionListener.runAfter(new ActionListener<Collection<Void>>() {
@Override
public void onResponse(Collection<Void> voids) {
LOGGER.info("Successfully loaded all snapshot's version information for {} from snapshot metadata", AllocationService.firstListElementsToCommaDelimitedString(snapshotIdsWithoutVersion, SnapshotId::toString, LOGGER.isDebugEnabled()));
}
@Override
public void onFailure(Exception e) {
LOGGER.warn("Failure when trying to load missing version information from snapshot metadata", e);
}
}, () -> filterRepositoryDataStep.onResponse(repositoryData.withVersions(updatedVersionMap))), snapshotIdsWithoutVersion.size());
for (SnapshotId snapshotId : snapshotIdsWithoutVersion) {
threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(loadAllVersionsListener, () -> {
ActionListener<SnapshotInfo> snapshotInfoListener = ActionListener.delegateFailure(loadAllVersionsListener, (delegate, snapshotInfo) -> {
updatedVersionMap.put(snapshotId, snapshotInfo.version());
delegate.onResponse(null);
});
getSnapshotInfo(snapshotId, snapshotInfoListener);
}));
}
} else {
filterRepositoryDataStep.onResponse(repositoryData);
}
})), listener::onFailure);
filterRepositoryDataStep.whenComplete(filteredRepositoryData -> {
final long newGen = setPendingStep.result();
if (latestKnownRepoGen.get() >= newGen) {
throw new IllegalArgumentException("Tried writing generation [" + newGen + "] but repository is at least at generation [" + latestKnownRepoGen.get() + "] already");
}
// write the index file
final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen);
LOGGER.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob);
writeAtomic(indexBlob, BytesReference.bytes(filteredRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), writeShardGens)), true);
// write the current generation to the index-latest file
final BytesReference genBytes;
try (BytesStreamOutput bStream = new BytesStreamOutput()) {
bStream.writeLong(newGen);
genBytes = bStream.bytes();
}
LOGGER.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen);
writeAtomic(INDEX_LATEST_BLOB, genBytes, false);
// Step 3: Update CS to reflect new repository generation.
clusterService.submitStateUpdateTask("set safe repository generation [" + metadata.name() + "][" + newGen + "]", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
final RepositoryMetadata meta = getRepoMetadata(currentState);
if (meta.generation() != expectedGen) {
throw new IllegalStateException("Tried to update repo generation to [" + newGen + "] but saw unexpected generation in state [" + meta + "]");
}
if (meta.pendingGeneration() != newGen) {
throw new IllegalStateException("Tried to update from unexpected pending repo generation [" + meta.pendingGeneration() + "] after write to generation [" + newGen + "]");
}
return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(metadata.name(), newGen, newGen)).build()).build();
}
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(listener, () -> {
// Delete all now outdated index files up to 1000 blobs back from the new generation.
// If there are more than 1000 dangling index-N cleanup functionality on repo delete will take care of them.
// Deleting one older than the current expectedGen is done for BwC reasons as older versions used to keep
// two index-N blobs around.
final List<String> oldIndexN = LongStream.range(Math.max(Math.max(expectedGen - 1, 0), newGen - 1000), newGen).mapToObj(gen -> INDEX_FILE_PREFIX + gen).collect(Collectors.toList());
try {
blobContainer().deleteBlobsIgnoringIfNotExists(oldIndexN);
} catch (IOException e) {
LOGGER.warn("Failed to clean up old index blobs {}", oldIndexN);
}
}));
}
});
}, listener::onFailure);
}
use of org.elasticsearch.cluster.metadata.RepositoryMetadata in project crate by crate.
the class RepositoriesService method applyClusterState.
/**
* Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
* repositories accordingly.
*
* @param event cluster changed event
*/
@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
final ClusterState state = event.state();
RepositoriesMetadata oldMetadata = event.previousState().getMetadata().custom(RepositoriesMetadata.TYPE);
RepositoriesMetadata newMetadata = state.getMetadata().custom(RepositoriesMetadata.TYPE);
// Check if repositories got changed
if ((oldMetadata == null && newMetadata == null) || (oldMetadata != null && oldMetadata.equalsIgnoreGenerations(newMetadata))) {
for (Repository repo : repositories.values()) {
repo.updateState(state);
}
return;
}
LOGGER.trace("processing new index repositories for state version [{}]", event.state().version());
Map<String, Repository> survivors = new HashMap<>();
// First, remove repositories that are no longer there
for (Map.Entry<String, Repository> entry : repositories.entrySet()) {
if (newMetadata == null || newMetadata.repository(entry.getKey()) == null) {
LOGGER.debug("unregistering repository [{}]", entry.getKey());
closeRepository(entry.getValue());
} else {
survivors.put(entry.getKey(), entry.getValue());
}
}
Map<String, Repository> builder = new HashMap<>();
if (newMetadata != null) {
// Now go through all repositories and update existing or create missing
for (RepositoryMetadata repositoryMetadata : newMetadata.repositories()) {
Repository repository = survivors.get(repositoryMetadata.name());
if (repository != null) {
// Found previous version of this repository
RepositoryMetadata previousMetadata = repository.getMetadata();
if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) {
// Previous version is different from the version in settings
LOGGER.debug("updating repository [{}]", repositoryMetadata.name());
closeRepository(repository);
repository = null;
try {
repository = createRepository(repositoryMetadata);
} catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it
LOGGER.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), ex);
}
}
} else {
try {
repository = createRepository(repositoryMetadata);
} catch (RepositoryException ex) {
LOGGER.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetadata.name()), ex);
}
}
if (repository != null) {
LOGGER.debug("registering repository [{}]", repositoryMetadata.name());
builder.put(repositoryMetadata.name(), repository);
}
}
}
for (Repository repo : builder.values()) {
repo.updateState(state);
}
repositories = Collections.unmodifiableMap(builder);
} catch (Exception ex) {
LOGGER.warn("failure updating cluster state ", ex);
}
}
use of org.elasticsearch.cluster.metadata.RepositoryMetadata in project crate by crate.
the class RepositoryServiceTest method testRepositoryIsDroppedOnFailure.
@Test
public void testRepositoryIsDroppedOnFailure() throws Throwable {
expectedException.expect(RepositoryException.class);
// add repo to cluster service so that it exists..
RepositoriesMetadata repos = new RepositoriesMetadata(Collections.singletonList(new RepositoryMetadata("repo1", "fs", Settings.EMPTY)));
ClusterState state = ClusterState.builder(new ClusterName("dummy")).metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, repos)).build();
ClusterServiceUtils.setState(clusterService, state);
IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver();
final AtomicBoolean deleteRepoCalled = new AtomicBoolean(false);
MockTransportService transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, THREAD_POOL, clusterService.getClusterSettings());
TransportDeleteRepositoryAction deleteRepositoryAction = new TransportDeleteRepositoryAction(transportService, clusterService, mock(RepositoriesService.class), THREAD_POOL, indexNameExpressionResolver) {
@Override
protected void doExecute(DeleteRepositoryRequest request, ActionListener<AcknowledgedResponse> listener) {
deleteRepoCalled.set(true);
listener.onResponse(mock(AcknowledgedResponse.class));
}
};
TransportPutRepositoryAction putRepo = new TransportPutRepositoryAction(transportService, clusterService, mock(RepositoriesService.class), THREAD_POOL, indexNameExpressionResolver) {
@Override
protected void doExecute(PutRepositoryRequest request, ActionListener<AcknowledgedResponse> listener) {
listener.onFailure(new RepositoryException(request.name(), "failure"));
}
};
RepositoryService repositoryService = new RepositoryService(clusterService, deleteRepositoryAction, putRepo);
try {
PutRepositoryRequest request = new PutRepositoryRequest("repo1");
request.type("fs");
repositoryService.execute(request).get(10, TimeUnit.SECONDS);
} catch (ExecutionException e) {
assertThat(deleteRepoCalled.get(), is(true));
throw e.getCause();
}
}
use of org.elasticsearch.cluster.metadata.RepositoryMetadata in project crate by crate.
the class AccessControlMayExecuteTest method setUpSQLExecutor.
@Before
public void setUpSQLExecutor() throws Exception {
validationCallArguments = new ArrayList<>();
RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(singletonList(new RepositoryMetadata("my_repo", "fs", Settings.builder().put("location", "/tmp/my_repo").build())));
ClusterState clusterState = ClusterState.builder(clusterService.state()).metadata(Metadata.builder(clusterService.state().metadata()).putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata)).build();
ClusterServiceUtils.setState(clusterService, clusterState);
user = new User("normal", Set.of(), Set.of(), null) {
@Override
public boolean hasPrivilege(Privilege.Type type, Privilege.Clazz clazz, String ident, String defaultSchema) {
validationCallArguments.add(CollectionUtils.arrayAsArrayList(type, clazz, ident, user.name()));
return true;
}
};
superUser = new User("crate", EnumSet.of(User.Role.SUPERUSER), Set.of(), null) {
@Override
public boolean hasPrivilege(Privilege.Type type, Privilege.Clazz clazz, @Nullable String ident, String defaultSchema) {
validationCallArguments.add(CollectionUtils.arrayAsArrayList(type, clazz, ident, superUser.name()));
return true;
}
};
UserLookupService userLookupService = new UserLookupService(clusterService) {
@Nullable
@Override
public User findUser(String userName) {
if ("crate".equals(userName)) {
return superUser;
}
return super.findUser(userName);
}
};
userManager = new UserManagerService(null, null, null, null, mock(SysTableRegistry.class), clusterService, userLookupService, new DDLClusterStateService());
e = SQLExecutor.builder(clusterService).addBlobTable("create blob table blobs").enableDefaultTables().setUser(superUser).addView(new RelationName("doc", "v1"), "select * from users").setUserManager(userManager).build();
}
Aggregations