use of java.util.function.Supplier in project elasticsearch by elastic.
the class ClusterModule method createShardsAllocator.
private static ShardsAllocator createShardsAllocator(Settings settings, ClusterSettings clusterSettings, List<ClusterPlugin> clusterPlugins) {
Map<String, Supplier<ShardsAllocator>> allocators = new HashMap<>();
allocators.put(BALANCED_ALLOCATOR, () -> new BalancedShardsAllocator(settings, clusterSettings));
for (ClusterPlugin plugin : clusterPlugins) {
plugin.getShardsAllocators(settings, clusterSettings).forEach((k, v) -> {
if (allocators.put(k, v) != null) {
throw new IllegalArgumentException("ShardsAllocator [" + k + "] already defined");
}
});
}
String allocatorName = SHARDS_ALLOCATOR_TYPE_SETTING.get(settings);
Supplier<ShardsAllocator> allocatorSupplier = allocators.get(allocatorName);
if (allocatorSupplier == null) {
throw new IllegalArgumentException("Unknown ShardsAllocator [" + allocatorName + "]");
}
return Objects.requireNonNull(allocatorSupplier.get(), "ShardsAllocator factory for [" + allocatorName + "] returned null");
}
use of java.util.function.Supplier in project elasticsearch by elastic.
the class IndicesService method processPendingDeletes.
/**
* Processes all pending deletes for the given index. This method will acquire all locks for the given index and will
* process all pending deletes for this index. Pending deletes might occur if the OS doesn't allow deletion of files because
* they are used by a different process ie. on Windows where files might still be open by a virus scanner. On a shared
* filesystem a replica might not have been closed when the primary is deleted causing problems on delete calls so we
* schedule there deletes later.
* @param index the index to process the pending deletes for
* @param timeout the timeout used for processing pending deletes
*/
@Override
public void processPendingDeletes(Index index, IndexSettings indexSettings, TimeValue timeout) throws IOException, InterruptedException, ShardLockObtainFailedException {
logger.debug("{} processing pending deletes", index);
final long startTimeNS = System.nanoTime();
final List<ShardLock> shardLocks = nodeEnv.lockAllForIndex(index, indexSettings, timeout.millis());
int numRemoved = 0;
try {
Map<ShardId, ShardLock> locks = new HashMap<>();
for (ShardLock lock : shardLocks) {
locks.put(lock.getShardId(), lock);
}
final List<PendingDelete> remove;
synchronized (pendingDeletes) {
remove = pendingDeletes.remove(index);
}
if (remove != null && remove.isEmpty() == false) {
numRemoved = remove.size();
// make sure we delete indices first
CollectionUtil.timSort(remove);
// ensure we retry after 10 sec
final long maxSleepTimeMs = 10 * 1000;
long sleepTime = 10;
do {
if (remove.isEmpty()) {
break;
}
Iterator<PendingDelete> iterator = remove.iterator();
while (iterator.hasNext()) {
PendingDelete delete = iterator.next();
if (delete.deleteIndex) {
assert delete.shardId == -1;
logger.debug("{} deleting index store reason [{}]", index, "pending delete");
try {
nodeEnv.deleteIndexDirectoryUnderLock(index, indexSettings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", index), ex);
}
} else {
assert delete.shardId != -1;
ShardLock shardLock = locks.get(new ShardId(delete.index, delete.shardId));
if (shardLock != null) {
try {
deleteShardStore("pending delete", shardLock, delete.settings);
iterator.remove();
} catch (IOException ex) {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} retry pending delete", shardLock.getShardId()), ex);
}
} else {
logger.warn("{} no shard lock for pending delete", delete.shardId);
iterator.remove();
}
}
}
if (remove.isEmpty() == false) {
logger.warn("{} still pending deletes present for shards {} - retrying", index, remove.toString());
Thread.sleep(sleepTime);
// increase the sleep time gradually
sleepTime = Math.min(maxSleepTimeMs, sleepTime * 2);
logger.debug("{} schedule pending delete retry after {} ms", index, sleepTime);
}
} while ((System.nanoTime() - startTimeNS) < timeout.nanos());
}
} finally {
IOUtils.close(shardLocks);
if (numRemoved > 0) {
int remainingUncompletedDeletes = numUncompletedDeletes.addAndGet(-numRemoved);
assert remainingUncompletedDeletes >= 0;
}
}
}
use of java.util.function.Supplier in project elasticsearch by elastic.
the class ActionModuleTests method testPluginCantOverwriteBuiltinRestHandler.
public void testPluginCantOverwriteBuiltinRestHandler() throws IOException {
ActionPlugin dupsMainAction = new ActionPlugin() {
@Override
public List<RestHandler> getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<DiscoveryNodes> nodesInCluster) {
return singletonList(new RestMainAction(settings, restController));
}
};
SettingsModule settings = new SettingsModule(Settings.EMPTY);
ThreadPool threadPool = new TestThreadPool(getTestName());
try {
ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(Settings.EMPTY), settings.getIndexScopedSettings(), settings.getClusterSettings(), settings.getSettingsFilter(), threadPool, singletonList(dupsMainAction), null, null);
Exception e = expectThrows(IllegalArgumentException.class, () -> actionModule.initRestHandlers(null));
assertThat(e.getMessage(), startsWith("Path [/] already has a value [" + RestMainAction.class.getName()));
} finally {
threadPool.shutdown();
}
}
use of java.util.function.Supplier in project elasticsearch by elastic.
the class ActionModuleTests method testPluginCanRegisterRestHandler.
public void testPluginCanRegisterRestHandler() {
class FakeHandler implements RestHandler {
FakeHandler(RestController restController) {
restController.registerHandler(Method.GET, "/_dummy", this);
}
@Override
public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception {
}
}
ActionPlugin registersFakeHandler = new ActionPlugin() {
@Override
public List<RestHandler> getRestHandlers(Settings settings, RestController restController, ClusterSettings clusterSettings, IndexScopedSettings indexScopedSettings, SettingsFilter settingsFilter, IndexNameExpressionResolver indexNameExpressionResolver, Supplier<DiscoveryNodes> nodesInCluster) {
return singletonList(new FakeHandler(restController));
}
};
SettingsModule settings = new SettingsModule(Settings.EMPTY);
ThreadPool threadPool = new TestThreadPool(getTestName());
try {
ActionModule actionModule = new ActionModule(false, settings.getSettings(), new IndexNameExpressionResolver(Settings.EMPTY), settings.getIndexScopedSettings(), settings.getClusterSettings(), settings.getSettingsFilter(), threadPool, singletonList(registersFakeHandler), null, null);
actionModule.initRestHandlers(null);
// At this point the easiest way to confirm that a handler is loaded is to try to register another one on top of it and to fail
Exception e = expectThrows(IllegalArgumentException.class, () -> actionModule.getRestController().registerHandler(Method.GET, "/_dummy", null));
assertThat(e.getMessage(), startsWith("Path [/_dummy] already has a value [" + FakeHandler.class.getName()));
} finally {
threadPool.shutdown();
}
}
use of java.util.function.Supplier in project OpenGrok by OpenGrok.
the class GitRepository method getHistoryGet.
@Override
public InputStream getHistoryGet(String parent, String basename, String rev) {
String fullpath;
try {
fullpath = new File(parent, basename).getCanonicalPath();
} catch (IOException exp) {
LOGGER.log(Level.SEVERE, exp, new Supplier<String>() {
@Override
public String get() {
return String.format("Failed to get canonical path: %s/%s", parent, basename);
}
});
return null;
}
InputStream ret = getHistoryRev(fullpath, rev);
if (ret == null) {
/*
* If we failed to get the contents it might be that the file was
* renamed so we need to find its original name in that revision
* and retry with the original name.
*/
String origpath;
try {
origpath = findOriginalName(fullpath, rev);
} catch (IOException exp) {
LOGGER.log(Level.SEVERE, exp, new Supplier<String>() {
@Override
public String get() {
return String.format("Failed to get original revision: %s/%s (revision %s)", parent, basename, rev);
}
});
return null;
}
if (origpath != null) {
ret = getHistoryRev(origpath, rev);
}
}
return ret;
}
Aggregations