use of java.util.function.Consumer in project vert.x by eclipse.
the class NetTest method setHandlers.
void setHandlers(NetSocket sock) {
Handler<Message<Buffer>> resumeHandler = m -> sock.resume();
MessageConsumer reg = vertx.eventBus().<Buffer>consumer("client_resume").handler(resumeHandler);
sock.closeHandler(v -> reg.unregister());
}
use of java.util.function.Consumer in project java-design-patterns by iluwatar.
the class DbCustomerDao method getAll.
/**
* @return a lazily populated stream of customers. Note the stream returned must be closed to
* free all the acquired resources. The stream keeps an open connection to the database till
* it is complete or is closed manually.
*/
@Override
public Stream<Customer> getAll() throws Exception {
Connection connection;
try {
connection = getConnection();
PreparedStatement statement = connection.prepareStatement("SELECT * FROM CUSTOMERS");
ResultSet resultSet = statement.executeQuery();
return StreamSupport.stream(new Spliterators.AbstractSpliterator<Customer>(Long.MAX_VALUE, Spliterator.ORDERED) {
@Override
public boolean tryAdvance(Consumer<? super Customer> action) {
try {
if (!resultSet.next()) {
return false;
}
action.accept(createCustomer(resultSet));
return true;
} catch (SQLException e) {
throw new RuntimeException(e);
}
}
}, false).onClose(() -> mutedClose(connection));
} catch (SQLException e) {
throw new Exception(e.getMessage(), e);
}
}
use of java.util.function.Consumer in project neo4j by neo4j.
the class DelayedBufferTest method shouldHandleTheWholeWorkloadShebang.
@Test
public void shouldHandleTheWholeWorkloadShebang() throws Throwable {
// GIVEN
final int size = 1_000;
final long bufferTime = 3;
VerifyingConsumer consumer = new VerifyingConsumer(size);
final Clock clock = Clocks.systemClock();
Supplier<Long> chunkThreshold = clock::millis;
Predicate<Long> safeThreshold = time -> clock.millis() - bufferTime >= time;
final DelayedBuffer<Long> buffer = new DelayedBuffer<>(chunkThreshold, safeThreshold, 10, consumer);
MaintenanceThread maintenance = new MaintenanceThread(buffer, 5);
Race adders = new Race();
final int numberOfAdders = 20;
final byte[] offeredIds = new byte[size];
for (int i = 0; i < numberOfAdders; i++) {
final int finalI = i;
adders.addContestant(new Runnable() {
@Override
public void run() {
for (int j = 0; j < size; j++) {
if (j % numberOfAdders == finalI) {
buffer.offer(j);
offeredIds[j] = 1;
parkNanos(MILLISECONDS.toNanos(current().nextInt(2)));
}
}
}
});
}
// WHEN (multi-threadded) offering of ids
adders.go();
// ... ensuring the test is sane itself (did we really offer all these IDs?)
for (int i = 0; i < size; i++) {
assertEquals("ID " + i, (byte) 1, offeredIds[i]);
}
maintenance.halt();
buffer.close();
// THEN
consumer.assertHaveOnlySeenRange(0, size - 1);
}
use of java.util.function.Consumer in project neo4j by neo4j.
the class RecoveryTest method shouldSeeThatACleanDatabaseShouldNotRequireRecovery.
@Test
public void shouldSeeThatACleanDatabaseShouldNotRequireRecovery() throws Exception {
final PhysicalLogFiles logFiles = new PhysicalLogFiles(directory.directory(), "log", fileSystemRule.get());
File file = logFiles.getLogFileForVersion(logVersion);
writeSomeData(file, new Visitor<Pair<LogEntryWriter, Consumer<LogPositionMarker>>, IOException>() {
@Override
public boolean visit(Pair<LogEntryWriter, Consumer<LogPositionMarker>> pair) throws IOException {
LogEntryWriter writer = pair.first();
Consumer<LogPositionMarker> consumer = pair.other();
LogPositionMarker marker = new LogPositionMarker();
// last committed tx
consumer.accept(marker);
writer.writeStartEntry(0, 1, 2L, 3L, new byte[0]);
writer.writeCommitEntry(4L, 5L);
// check point
consumer.accept(marker);
writer.writeCheckPointEntry(marker.newPosition());
return true;
}
});
LifeSupport life = new LifeSupport();
Recovery.Monitor monitor = mock(Recovery.Monitor.class);
try {
StorageEngine storageEngine = mock(StorageEngine.class);
final LogEntryReader<ReadableClosablePositionAwareChannel> reader = new VersionAwareLogEntryReader<>();
LatestCheckPointFinder finder = new LatestCheckPointFinder(logFiles, fileSystemRule.get(), reader);
TransactionMetadataCache metadataCache = new TransactionMetadataCache(100);
LogHeaderCache logHeaderCache = new LogHeaderCache(10);
LogFile logFile = life.add(new PhysicalLogFile(fileSystemRule.get(), logFiles, 50, () -> transactionIdStore.getLastCommittedTransactionId(), logVersionRepository, mock(PhysicalLogFile.Monitor.class), logHeaderCache));
LogicalTransactionStore txStore = new PhysicalLogicalTransactionStore(logFile, metadataCache, reader);
life.add(new Recovery(new DefaultRecoverySPI(storageEngine, logFiles, fileSystemRule.get(), logVersionRepository, finder, transactionIdStore, txStore, NO_MONITOR) {
@Override
public Visitor<CommittedTransactionRepresentation, Exception> startRecovery() {
fail("Recovery should not be required");
// <-- to satisfy the compiler
return null;
}
}, monitor));
life.start();
verifyZeroInteractions(monitor);
} finally {
life.shutdown();
}
}
use of java.util.function.Consumer in project presto by prestodb.
the class ClusterMemoryManager method updatePools.
private synchronized void updatePools(Map<MemoryPoolId, Integer> queryCounts) {
// Update view of cluster memory and pools
List<MemoryInfo> nodeMemoryInfos = nodes.values().stream().map(RemoteNodeMemory::getInfo).filter(Optional::isPresent).map(Optional::get).collect(toImmutableList());
long totalClusterMemory = nodeMemoryInfos.stream().map(MemoryInfo::getTotalNodeMemory).mapToLong(DataSize::toBytes).sum();
clusterMemoryBytes.set(totalClusterMemory);
Set<MemoryPoolId> activePoolIds = nodeMemoryInfos.stream().flatMap(info -> info.getPools().keySet().stream()).collect(toImmutableSet());
// Make a copy to materialize the set difference
Set<MemoryPoolId> removedPools = ImmutableSet.copyOf(difference(pools.keySet(), activePoolIds));
for (MemoryPoolId removed : removedPools) {
unexport(pools.get(removed));
pools.remove(removed);
if (changeListeners.containsKey(removed)) {
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(removed)) {
listenerExecutor.execute(() -> listener.accept(new MemoryPoolInfo(0, 0, ImmutableMap.of())));
}
}
}
for (MemoryPoolId id : activePoolIds) {
ClusterMemoryPool pool = pools.computeIfAbsent(id, poolId -> {
ClusterMemoryPool newPool = new ClusterMemoryPool(poolId);
String objectName = ObjectNames.builder(ClusterMemoryPool.class, newPool.getId().toString()).build();
try {
exporter.export(objectName, newPool);
} catch (JmxException e) {
log.error(e, "Error exporting memory pool %s", poolId);
}
return newPool;
});
pool.update(nodeMemoryInfos, queryCounts.getOrDefault(pool.getId(), 0));
if (changeListeners.containsKey(id)) {
MemoryPoolInfo info = pool.getInfo();
for (Consumer<MemoryPoolInfo> listener : changeListeners.get(id)) {
listenerExecutor.execute(() -> listener.accept(info));
}
}
}
}
Aggregations