use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class Store method renameTempFilesSafe.
/**
* Renames all the given files from the key of the map to the
* value of the map. All successfully renamed files are removed from the map in-place.
*/
public void renameTempFilesSafe(Map<String, String> tempFileMap) throws IOException {
// this works just like a lucene commit - we rename all temp files and once we successfully
// renamed all the segments we rename the commit to ensure we don't leave half baked commits behind.
final Map.Entry<String, String>[] entries = tempFileMap.entrySet().toArray(new Map.Entry[tempFileMap.size()]);
ArrayUtil.timSort(entries, new Comparator<Map.Entry<String, String>>() {
@Override
public int compare(Map.Entry<String, String> o1, Map.Entry<String, String> o2) {
String left = o1.getValue();
String right = o2.getValue();
if (left.startsWith(IndexFileNames.SEGMENTS) || right.startsWith(IndexFileNames.SEGMENTS)) {
if (left.startsWith(IndexFileNames.SEGMENTS) == false) {
return -1;
} else if (right.startsWith(IndexFileNames.SEGMENTS) == false) {
return 1;
}
}
return left.compareTo(right);
}
});
metadataLock.writeLock().lock();
// get exceptions if files are still open.
try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
for (Map.Entry<String, String> entry : entries) {
String tempFile = entry.getKey();
String origFile = entry.getValue();
// first, go and delete the existing ones
try {
directory.deleteFile(origFile);
} catch (FileNotFoundException | NoSuchFileException e) {
} catch (Exception ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", origFile), ex);
}
// now, rename the files... and fail it it won't work
directory.rename(tempFile, origFile);
final String remove = tempFileMap.remove(tempFile);
assert remove != null;
}
directory.syncMetaData();
} finally {
metadataLock.writeLock().unlock();
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class Store method cleanupAndVerify.
/**
* This method deletes every file in this store that is not contained in the given source meta data or is a
* legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
* to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown
*
* @param reason the reason for this cleanup operation logged for each deleted file
* @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
* @throws IOException if an IOException occurs
* @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup.
*/
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
metadataLock.writeLock().lock();
try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
final StoreDirectory dir = directory;
for (String existingFile : dir.listAll()) {
if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
// don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
continue;
}
try {
dir.deleteFile(reason, existingFile);
// FNF should not happen since we hold a write lock?
} catch (IOException ex) {
if (existingFile.startsWith(IndexFileNames.SEGMENTS) || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
// if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
}
logger.debug((Supplier<?>) () -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
// ignore, we don't really care, will get deleted later on
}
}
final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
} finally {
metadataLock.writeLock().unlock();
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RecoveryTarget method closeInternal.
@Override
protected void closeInternal() {
try {
// clean open index outputs
Iterator<Entry<String, IndexOutput>> iterator = openIndexOutputs.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, IndexOutput> entry = iterator.next();
logger.trace("closing IndexOutput file [{}]", entry.getValue());
try {
entry.getValue().close();
} catch (Exception e) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("error while closing recovery output [{}]", entry.getValue()), e);
}
iterator.remove();
}
// trash temporary files
for (String file : tempFileNames.keySet()) {
logger.trace("cleaning temporary file [{}]", file);
store.deleteQuiet(file);
}
} finally {
// free store. increment happens in constructor
store.decRef();
indexShard.recoveryStats().decCurrentAsTarget();
closedLatch.countDown();
}
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class TransportShardMultiTermsVectorAction method shardOperation.
@Override
protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequest request, ShardId shardId) {
final MultiTermVectorsShardResponse response = new MultiTermVectorsShardResponse();
final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
final IndexShard indexShard = indexService.getShard(shardId.id());
for (int i = 0; i < request.locations.size(); i++) {
TermVectorsRequest termVectorsRequest = request.requests.get(i);
try {
TermVectorsResponse termVectorsResponse = TermVectorsService.getTermVectors(indexShard, termVectorsRequest);
response.add(request.locations.get(i), termVectorsResponse);
} catch (Exception t) {
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), t);
response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), t));
}
}
}
return response;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RepositoriesService method applyClusterState.
/**
* Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
* repositories accordingly.
*
* @param event cluster changed event
*/
@Override
public void applyClusterState(ClusterChangedEvent event) {
try {
RepositoriesMetaData oldMetaData = event.previousState().getMetaData().custom(RepositoriesMetaData.TYPE);
RepositoriesMetaData newMetaData = event.state().getMetaData().custom(RepositoriesMetaData.TYPE);
// Check if repositories got changed
if ((oldMetaData == null && newMetaData == null) || (oldMetaData != null && oldMetaData.equals(newMetaData))) {
return;
}
logger.trace("processing new index repositories for state version [{}]", event.state().version());
Map<String, Repository> survivors = new HashMap<>();
// First, remove repositories that are no longer there
for (Map.Entry<String, Repository> entry : repositories.entrySet()) {
if (newMetaData == null || newMetaData.repository(entry.getKey()) == null) {
logger.debug("unregistering repository [{}]", entry.getKey());
closeRepository(entry.getValue());
} else {
survivors.put(entry.getKey(), entry.getValue());
}
}
Map<String, Repository> builder = new HashMap<>();
if (newMetaData != null) {
// Now go through all repositories and update existing or create missing
for (RepositoryMetaData repositoryMetaData : newMetaData.repositories()) {
Repository repository = survivors.get(repositoryMetaData.name());
if (repository != null) {
// Found previous version of this repository
RepositoryMetaData previousMetadata = repository.getMetadata();
if (previousMetadata.type().equals(repositoryMetaData.type()) == false || previousMetadata.settings().equals(repositoryMetaData.settings()) == false) {
// Previous version is different from the version in settings
logger.debug("updating repository [{}]", repositoryMetaData.name());
closeRepository(repository);
repository = null;
try {
repository = createRepository(repositoryMetaData);
} catch (RepositoryException ex) {
// TODO: this catch is bogus, it means the old repo is already closed,
// but we have nothing to replace it
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetaData.name()), ex);
}
}
} else {
try {
repository = createRepository(repositoryMetaData);
} catch (RepositoryException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetaData.name()), ex);
}
}
if (repository != null) {
logger.debug("registering repository [{}]", repositoryMetaData.name());
builder.put(repositoryMetaData.name(), repository);
}
}
}
repositories = Collections.unmodifiableMap(builder);
} catch (Exception ex) {
logger.warn("failure updating cluster state ", ex);
}
}
Aggregations