use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class BlobStoreRepository method deleteSnapshot.
@Override
public void deleteSnapshot(SnapshotId snapshotId, long repositoryStateId) {
if (isReadOnly()) {
throw new RepositoryException(metadata.name(), "cannot delete snapshot from a readonly repository");
}
final RepositoryData repositoryData = getRepositoryData();
List<String> indices = Collections.emptyList();
SnapshotInfo snapshot = null;
try {
snapshot = getSnapshotInfo(snapshotId);
indices = snapshot.indices();
} catch (SnapshotMissingException ex) {
throw ex;
} catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex);
}
MetaData metaData = null;
try {
if (snapshot != null) {
metaData = readSnapshotMetaData(snapshotId, snapshot.version(), repositoryData.resolveIndices(indices), true);
} else {
metaData = readSnapshotMetaData(snapshotId, null, repositoryData.resolveIndices(indices), true);
}
} catch (IOException | SnapshotException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("cannot read metadata for snapshot [{}]", snapshotId), ex);
}
try {
// Delete snapshot from the index file, since it is the maintainer of truth of active snapshots
final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId);
writeIndexGen(updatedRepositoryData, repositoryStateId);
// delete the snapshot file
safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
// delete the global metadata file
safeGlobalMetaDataBlobDelete(snapshot, snapshotId.getUUID());
// Now delete all indices
for (String index : indices) {
final IndexId indexId = repositoryData.resolveIndexId(index);
BlobPath indexPath = basePath().add("indices").add(indexId.getId());
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
indexMetaDataFormat.delete(indexMetaDataBlobContainer, snapshotId.getUUID());
} catch (IOException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete metadata for index [{}]", snapshotId, index), ex);
}
if (metaData != null) {
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData != null) {
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
try {
delete(snapshotId, snapshot.version(), indexId, new ShardId(indexMetaData.getIndex(), shardId));
} catch (SnapshotException ex) {
final int finalShardId = shardId;
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to delete shard data for shard [{}][{}]", snapshotId, index, finalShardId), ex);
}
}
}
}
}
// cleanup indices that are no longer part of the repository
final Collection<IndexId> indicesToCleanUp = Sets.newHashSet(repositoryData.getIndices().values());
indicesToCleanUp.removeAll(updatedRepositoryData.getIndices().values());
final BlobContainer indicesBlobContainer = blobStore().blobContainer(basePath().add("indices"));
for (final IndexId indexId : indicesToCleanUp) {
try {
indicesBlobContainer.deleteBlob(indexId.getId());
} catch (DirectoryNotEmptyException dnee) {
// if the directory isn't empty for some reason, it will fail to clean up;
// we'll ignore that and accept that cleanup didn't fully succeed.
// since we are using UUIDs for path names, this won't be an issue for
// snapshotting indices of the same name
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder due to the directory not being empty.", metadata.name(), indexId), dnee);
} catch (IOException ioe) {
// a different IOException occurred while trying to delete - will just log the issue for now
logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] index [{}] no longer part of any snapshots in the repository, but failed to clean up " + "its index folder.", metadata.name(), indexId), ioe);
}
}
} catch (IOException ex) {
throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class BytesRestResponse method build.
private static XContentBuilder build(RestChannel channel, RestStatus status, Exception e) throws IOException {
ToXContent.Params params = channel.request();
if (params.paramAsBoolean("error_trace", !REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) {
params = new ToXContent.DelegatingMapParams(singletonMap(REST_EXCEPTION_SKIP_STACK_TRACE, "false"), params);
} else if (e != null) {
Supplier<?> messageSupplier = () -> new ParameterizedMessage("path: {}, params: {}", channel.request().rawPath(), channel.request().params());
if (status.getStatus() < 500) {
SUPPRESSED_ERROR_LOGGER.debug(messageSupplier, e);
} else {
SUPPRESSED_ERROR_LOGGER.warn(messageSupplier, e);
}
}
XContentBuilder builder = channel.newErrorBuilder().startObject();
ElasticsearchException.generateFailureXContent(builder, params, e, channel.detailedErrorsEnabled());
builder.field(STATUS, status.getStatus());
builder.endObject();
return builder;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class BlobStoreRepository method readSnapshotMetaData.
private MetaData readSnapshotMetaData(SnapshotId snapshotId, Version snapshotVersion, List<IndexId> indices, boolean ignoreIndexErrors) throws IOException {
MetaData metaData;
if (snapshotVersion == null) {
// We can try detecting the version based on the metadata file format
assert ignoreIndexErrors;
if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
snapshotVersion = Version.CURRENT;
} else {
throw new SnapshotMissingException(metadata.name(), snapshotId);
}
}
try {
metaData = globalMetaDataFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
} catch (NoSuchFileException ex) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException ex) {
throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex);
}
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
for (IndexId index : indices) {
BlobPath indexPath = basePath().add("indices").add(index.getId());
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
metaDataBuilder.put(indexMetaDataFormat.read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
} catch (ElasticsearchParseException | IOException ex) {
if (ignoreIndexErrors) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to read metadata for index", snapshotId, index.getName()), ex);
} else {
throw ex;
}
}
}
return metaDataBuilder.build();
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class ZenDiscovery method handleJoinRequest.
void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) {
if (nodeJoinController == null) {
throw new IllegalStateException("discovery module is not yet started");
} else {
// we do this in a couple of places including the cluster update thread. This one here is really just best effort
// to ensure we fail as fast as possible.
MembershipAction.ensureIndexCompatibility(node.getVersion().minimumIndexCompatibilityVersion(), state.getMetaData());
// try and connect to the node, if it fails, we can raise an exception back to the client...
transportService.connectToNode(node);
// node calling the join request
try {
membership.sendValidateJoinRequestBlocking(node, state, joinTimeout);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to validate incoming join request from node [{}]", node), e);
callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e));
return;
}
nodeJoinController.handleJoinRequest(node, callback);
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class ZenDiscovery method handleMasterGone.
private void handleMasterGone(final DiscoveryNode masterNode, final Throwable cause, final String reason) {
if (lifecycleState() != Lifecycle.State.STARTED) {
// not started, ignore a master failure
return;
}
if (localNodeMaster()) {
// we might get this on both a master telling us shutting down, and then the disconnect failure
return;
}
logger.info((Supplier<?>) () -> new ParameterizedMessage("master_left [{}], reason [{}]", masterNode, reason), cause);
clusterService.submitStateUpdateTask("master_failed (" + masterNode + ")", new LocalClusterUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) {
if (!masterNode.equals(currentState.nodes().getMasterNode())) {
// master got switched on us, no need to send anything
return unchanged();
}
// flush any pending cluster states from old master, so it will not be set as master again
publishClusterState.pendingStatesQueue().failAllStatesAndClear(new ElasticsearchException("master left [{}]", reason));
return rejoin(currentState, "master left (reason = " + reason + ")");
}
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("unexpected failure during [{}]", source), e);
}
});
}
Aggregations