use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class RestGetTaskAction method prepareRequest.
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
TaskId taskId = new TaskId(request.param("taskId"));
boolean waitForCompletion = request.paramAsBoolean("wait_for_completion", false);
TimeValue timeout = request.paramAsTime("timeout", null);
GetTaskRequest getTaskRequest = new GetTaskRequest();
getTaskRequest.setTaskId(taskId);
getTaskRequest.setWaitForCompletion(waitForCompletion);
getTaskRequest.setTimeout(timeout);
return channel -> client.admin().cluster().getTask(getTaskRequest, new RestToXContentListener<>(channel));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class RestoreService method restoreSnapshot.
/**
* Restores snapshot specified in the restore request.
*
* @param request restore request
* @param listener restore listener
*/
public void restoreSnapshot(final RestoreRequest request, final ActionListener<RestoreCompletionResponse> listener) {
try {
// Read snapshot info and metadata from the repository
Repository repository = repositoriesService.repository(request.repositoryName);
final RepositoryData repositoryData = repository.getRepositoryData();
final Optional<SnapshotId> incompatibleSnapshotId = repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (incompatibleSnapshotId.isPresent()) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "cannot restore incompatible snapshot");
}
final Optional<SnapshotId> matchingSnapshotId = repositoryData.getSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
if (matchingSnapshotId.isPresent() == false) {
throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "snapshot does not exist");
}
final SnapshotId snapshotId = matchingSnapshotId.get();
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
// Make sure that we can restore from this snapshot
validateSnapshotRestorable(request.repositoryName, snapshotInfo);
// Find list of indices that we need to restore
final Map<String, String> renamedIndices = renamedIndices(request, filteredIndices);
// Now we can start the actual restore process by adding shards to be recovered in the cluster state
// and updating cluster metadata (global and index) as needed
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
RestoreInfo restoreInfo = null;
@Override
public ClusterState execute(ClusterState currentState) {
// Check if another restore process is already running - cannot run two restore processes at the
// same time
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster");
}
// Check if the snapshot to restore is currently being deleted
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
throw new ConcurrentSnapshotExecutionException(snapshot, "cannot restore a snapshot while a snapshot deletion is in-progress [" + deletionsInProgress.getEntries().get(0).getSnapshot() + "]");
}
// Updating cluster state
ClusterState.Builder builder = ClusterState.builder(currentState);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards;
Set<String> aliases = new HashSet<>();
if (!renamedIndices.isEmpty()) {
// We have some indices to restore
ImmutableOpenMap.Builder<ShardId, RestoreInProgress.ShardRestoreStatus> shardsBuilder = ImmutableOpenMap.builder();
final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion();
for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
try {
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData, minIndexCompatibilityVersion);
} catch (Exception ex) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
}
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
IntSet ignoreShards = new IntHashSet();
final Index renamedIndex;
if (currentIndexMetaData == null) {
// Index doesn't exist - create it and start recovery
// Make sure that the index we are about to create has a validate name
MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings());
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
// Remove all aliases - they shouldn't be restored
indexMdBuilder.removeAllAliases();
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
if (partial) {
populateIgnoredShards(index, ignoreShards);
}
rtBuilder.addAsNewRestore(updatedIndexMetaData, recoverySource, ignoreShards);
blocks.addBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
} else {
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
// Index exists and it's closed - open it in metadata and start recovery
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
if (!request.includeAliases()) {
// Remove all snapshot aliases
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
indexMdBuilder.removeAllAliases();
}
/// Add existing aliases
for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.getAliases().values()) {
indexMdBuilder.putAlias(alias.value);
}
} else {
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
aliases.add(alias.value);
}
}
indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
blocks.updateBlocks(updatedIndexMetaData);
mdBuilder.put(updatedIndexMetaData, true);
renamedIndex = updatedIndexMetaData.getIndex();
}
for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
if (!ignoreShards.contains(shard)) {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
} else {
shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
}
}
}
shards = shardsBuilder.build();
RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry));
} else {
shards = ImmutableOpenMap.of();
}
checkAliasNameConflicts(renamedIndices, aliases);
// Restore global state if needed
restoreGlobalStateIfRequested(mdBuilder);
if (completed(shards)) {
// We don't have any indices to restore - we are done
restoreInfo = new RestoreInfo(snapshotId.getName(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards));
}
RoutingTable rt = rtBuilder.build();
ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
return allocationService.reroute(updatedState, "restored snapshot [" + snapshot + "]");
}
private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
for (Map.Entry<String, String> renamedIndex : renamedIndices.entrySet()) {
if (aliases.contains(renamedIndex.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
}
}
}
private void populateIgnoredShards(String index, IntSet ignoreShards) {
for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
if (index.equals(failure.index())) {
ignoreShards.add(failure.shardId());
}
}
}
private boolean checkPartial(String index) {
// Make sure that index was fully snapshotted
if (failed(snapshotInfo, index)) {
if (request.partial()) {
return true;
} else {
throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
}
} else {
return false;
}
}
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
// Index exist - checking that it's closed
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because it's open");
}
// Index exist - checking if it's partial restore
if (partial) {
throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
}
/**
* Optionally updates index settings in indexMetaData by removing settings listed in ignoreSettings and
* merging them with settings in changeSettings.
*/
private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings changeSettings, String[] ignoreSettings) {
if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
return indexMetaData;
}
Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
Map<String, String> settingsMap = new HashMap<>(indexMetaData.getSettings().getAsMap());
List<String> simpleMatchPatterns = new ArrayList<>();
for (String ignoredSetting : ignoreSettings) {
if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
} else {
settingsMap.remove(ignoredSetting);
}
} else {
simpleMatchPatterns.add(ignoredSetting);
}
}
if (!simpleMatchPatterns.isEmpty()) {
String[] removePatterns = simpleMatchPatterns.toArray(new String[simpleMatchPatterns.size()]);
Iterator<Map.Entry<String, String>> iterator = settingsMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, String> entry = iterator.next();
if (UNREMOVABLE_SETTINGS.contains(entry.getKey()) == false) {
if (Regex.simpleMatch(removePatterns, entry.getKey())) {
iterator.remove();
}
}
}
}
for (Map.Entry<String, String> entry : normalizedChangeSettings.getAsMap().entrySet()) {
if (UNMODIFIABLE_SETTINGS.contains(entry.getKey())) {
throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + entry.getKey() + "] on restore");
} else {
settingsMap.put(entry.getKey(), entry.getValue());
}
}
return builder.settings(Settings.builder().put(settingsMap)).build();
}
private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) {
if (request.includeGlobalState()) {
if (metaData.persistentSettings() != null) {
Settings settings = metaData.persistentSettings();
clusterSettings.validateUpdate(settings);
mdBuilder.persistentSettings(settings);
}
if (metaData.templates() != null) {
// TODO: Should all existing templates be deleted first?
for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
mdBuilder.put(cursor.value);
}
}
if (metaData.customs() != null) {
for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
// Don't restore repositories while we are working with them
// TODO: Should we restore them at the end?
mdBuilder.putCustom(cursor.key, cursor.value);
}
}
}
}
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
listener.onFailure(e);
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
}
});
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
listener.onFailure(e);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class SnapshotsService method createSnapshot.
/**
* Initializes the snapshotting process.
* <p>
* This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and
* creates a snapshot record in cluster state metadata.
*
* @param request snapshot request
* @param listener snapshot creation listener
*/
public void createSnapshot(final SnapshotRequest request, final CreateSnapshotListener listener) {
final String repositoryName = request.repositoryName;
final String snapshotName = request.snapshotName;
validate(repositoryName, snapshotName);
// new UUID for the snapshot
final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID());
final RepositoryData repositoryData = repositoriesService.repository(repositoryName).getRepositoryData();
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
private SnapshotsInProgress.Entry newSnapshot = null;
@Override
public ClusterState execute(ClusterState currentState) {
validate(request, currentState);
SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "cannot snapshot while a snapshot deletion is in-progress");
}
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
if (snapshots == null || snapshots.entries().isEmpty()) {
// Store newSnapshot here to be processed in clusterStateProcessed
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices);
List<IndexId> snapshotIndices = repositoryData.resolveNewIndices(indices);
newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId), request.includeGlobalState(), request.partial(), State.INIT, snapshotIndices, System.currentTimeMillis(), repositoryData.getGenId(), null);
snapshots = new SnapshotsInProgress(newSnapshot);
} else {
throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "a snapshot is already running");
}
return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build();
}
@Override
public void onFailure(String source, Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e);
newSnapshot = null;
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) {
if (newSnapshot != null) {
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> beginSnapshot(newState, newSnapshot, request.partial(), listener));
}
}
@Override
public TimeValue timeout() {
return request.masterNodeTimeout();
}
});
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class ScalingExecutorBuilder method getSettings.
@Override
ScalingExecutorSettings getSettings(Settings settings) {
final String nodeName = Node.NODE_NAME_SETTING.get(settings);
final int coreThreads = coreSetting.get(settings);
final int maxThreads = maxSetting.get(settings);
final TimeValue keepAlive = keepAliveSetting.get(settings);
return new ScalingExecutorSettings(nodeName, coreThreads, maxThreads, keepAlive);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.common.unit.TimeValue in project elasticsearch by elastic.
the class AbstractAsyncBulkByScrollAction method startNextScroll.
/**
* Start the next scroll request.
*
* @param lastBatchSize the number of requests sent in the last batch. This is used to calculate the throttling values which are applied
* when the scroll returns
*/
void startNextScroll(TimeValue lastBatchStartTime, int lastBatchSize) {
if (task.isCancelled()) {
finishHim(null);
return;
}
TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize);
scrollSource.startNextScroll(extraKeepAlive, response -> {
onScrollResponse(lastBatchStartTime, lastBatchSize, response);
});
}
Aggregations