use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class Translog method recoverFromFiles.
/** recover all translog files found on disk */
private ArrayList<TranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException {
boolean success = false;
ArrayList<TranslogReader> foundTranslogs = new ArrayList<>();
// a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work
final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX);
boolean tempFileRenamed = false;
try (ReleasableLock lock = writeLock.acquire()) {
logger.debug("open uncommitted translog checkpoint {}", checkpoint);
final String checkpointTranslogFile = getFilename(checkpoint.generation);
for (long i = translogGeneration.translogFileGeneration; i < checkpoint.generation; i++) {
Path committedTranslogFile = location.resolve(getFilename(i));
if (Files.exists(committedTranslogFile) == false) {
throw new IllegalStateException("translog file doesn't exist with generation: " + i + " lastCommitted: " + lastCommittedTranslogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint));
Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation));
if (Files.exists(commitCheckpoint)) {
Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint);
if (checkpoint.equals(checkpointFromDisk) == false) {
throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk);
}
} else {
// we first copy this into the temp-file and then fsync it followed by an atomic move into the target file
// that way if we hit a disk-full here we are still in an consistent state.
Files.copy(location.resolve(CHECKPOINT_FILE_NAME), tempFile, StandardCopyOption.REPLACE_EXISTING);
IOUtils.fsync(tempFile, false);
Files.move(tempFile, commitCheckpoint, StandardCopyOption.ATOMIC_MOVE);
tempFileRenamed = true;
// we only fsync the directory the tempFile was already fsynced
IOUtils.fsync(commitCheckpoint.getParent(), true);
}
success = true;
} finally {
if (success == false) {
IOUtils.closeWhileHandlingException(foundTranslogs);
}
if (tempFileRenamed == false) {
try {
Files.delete(tempFile);
} catch (IOException ex) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("failed to delete temp file {}", tempFile), ex);
}
}
}
return foundTranslogs;
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class ClusterServiceIT method testAckedUpdateTaskSameClusterState.
public void testAckedUpdateTaskSameClusterState() throws Exception {
internalCluster().startNode();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class);
final AtomicBoolean allNodesAcked = new AtomicBoolean(false);
final AtomicBoolean ackTimeout = new AtomicBoolean(false);
final AtomicBoolean onFailure = new AtomicBoolean(false);
final AtomicBoolean executed = new AtomicBoolean(false);
final CountDownLatch latch = new CountDownLatch(1);
final CountDownLatch processedLatch = new CountDownLatch(1);
clusterService.submitStateUpdateTask("test", new AckedClusterStateUpdateTask<Void>(null, null) {
@Override
protected Void newResponse(boolean acknowledged) {
return null;
}
@Override
public void onAllNodesAcked(@Nullable Exception e) {
allNodesAcked.set(true);
latch.countDown();
}
@Override
public void onAckTimeout() {
ackTimeout.set(true);
latch.countDown();
}
@Override
public TimeValue ackTimeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public TimeValue timeout() {
return TimeValue.timeValueSeconds(10);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
processedLatch.countDown();
}
@Override
public ClusterState execute(ClusterState currentState) throws Exception {
executed.set(true);
return currentState;
}
@Override
public void onFailure(String source, Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to execute callback in test {}", source), e);
onFailure.set(true);
latch.countDown();
}
});
ensureGreen();
assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true));
assertThat(allNodesAcked.get(), equalTo(true));
assertThat(ackTimeout.get(), equalTo(false));
assertThat(executed.get(), equalTo(true));
assertThat(onFailure.get(), equalTo(false));
assertThat(processedLatch.await(1, TimeUnit.SECONDS), equalTo(true));
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class RemoteClusterService method processRemoteShards.
Function<String, Transport.Connection> processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses, List<ShardIterator> remoteShardIterators, Map<String, AliasFilter> aliasFilterMap) {
Map<String, Supplier<Transport.Connection>> nodeToCluster = new HashMap<>();
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
String clusterName = entry.getKey();
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterName));
}
Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
//add the cluster name to the remote index names for indices disambiguation
//this ends up in the hits returned with the search response
ShardId shardId = clusterSearchShardsGroup.getShardId();
Index remoteIndex = shardId.getIndex();
Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID());
ShardIterator shardIterator = new PlainShardIterator(new ShardId(index, shardId.getId()), Arrays.asList(clusterSearchShardsGroup.getShards()));
remoteShardIterators.add(shardIterator);
AliasFilter aliasFilter;
if (indicesAndFilters == null) {
aliasFilter = new AliasFilter(null, Strings.EMPTY_ARRAY);
} else {
aliasFilter = indicesAndFilters.get(shardId.getIndexName());
assert aliasFilter != null;
}
// here we have to map the filters to the UUID since from now on we use the uuid for the lookup
aliasFilterMap.put(remoteIndex.getUUID(), aliasFilter);
}
}
return (nodeId) -> {
Supplier<Transport.Connection> supplier = nodeToCluster.get(nodeId);
if (supplier == null) {
throw new IllegalArgumentException("unknown remote node: " + nodeId);
}
return supplier.get();
};
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class ClientScrollableHitSource method clearScroll.
@Override
public void clearScroll(String scrollId, Runnable onCompletion) {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(scrollId);
/*
* Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
* if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
*/
client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
logger.debug("Freed [{}] contexts", response.getNumFreed());
onCompletion.run();
}
@Override
public void onFailure(Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to clear scroll [{}]", scrollId), e);
onCompletion.run();
}
});
}
use of org.apache.logging.log4j.util.Supplier in project elasticsearch by elastic.
the class HunspellService method loadDictionary.
/**
* Loads the hunspell dictionary for the given local.
*
* @param locale The locale of the hunspell dictionary to be loaded.
* @param nodeSettings The node level settings
* @param env The node environment (from which the conf path will be resolved)
* @return The loaded Hunspell dictionary
* @throws Exception when loading fails (due to IO errors or malformed dictionary files)
*/
private Dictionary loadDictionary(String locale, Settings nodeSettings, Environment env) throws Exception {
if (logger.isDebugEnabled()) {
logger.debug("Loading hunspell dictionary [{}]...", locale);
}
Path dicDir = hunspellDir.resolve(locale);
if (FileSystemUtils.isAccessibleDirectory(dicDir, logger) == false) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale));
}
// merging node settings with hunspell dictionary specific settings
Settings dictSettings = HUNSPELL_DICTIONARY_OPTIONS.get(nodeSettings);
nodeSettings = loadDictionarySettings(dicDir, dictSettings.getByPrefix(locale + "."));
boolean ignoreCase = nodeSettings.getAsBoolean("ignore_case", defaultIgnoreCase);
Path[] affixFiles = FileSystemUtils.files(dicDir, "*.aff");
if (affixFiles.length == 0) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale));
}
if (affixFiles.length != 1) {
throw new ElasticsearchException(String.format(Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale));
}
InputStream affixStream = null;
Path[] dicFiles = FileSystemUtils.files(dicDir, "*.dic");
List<InputStream> dicStreams = new ArrayList<>(dicFiles.length);
try {
for (int i = 0; i < dicFiles.length; i++) {
dicStreams.add(Files.newInputStream(dicFiles[i]));
}
affixStream = Files.newInputStream(affixFiles[0]);
try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
}
} catch (Exception e) {
logger.error((Supplier<?>) () -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e);
throw e;
} finally {
IOUtils.close(affixStream);
IOUtils.close(dicStreams);
}
}
Aggregations