use of java.util.function.Supplier in project spring-framework by spring-projects.
the class BodyExtractors method readWithMessageReaders.
private static <T, S extends Publisher<T>> S readWithMessageReaders(ReactiveHttpInputMessage inputMessage, BodyExtractor.Context context, ResolvableType elementType, Function<HttpMessageReader<T>, S> readerFunction, Function<Throwable, S> unsupportedError) {
MediaType contentType = contentType(inputMessage);
Supplier<Stream<HttpMessageReader<?>>> messageReaders = context.messageReaders();
return messageReaders.get().filter(r -> r.canRead(elementType, contentType)).findFirst().map(BodyExtractors::<T>cast).map(readerFunction).orElseGet(() -> {
List<MediaType> supportedMediaTypes = messageReaders.get().flatMap(reader -> reader.getReadableMediaTypes().stream()).collect(Collectors.toList());
UnsupportedMediaTypeException error = new UnsupportedMediaTypeException(contentType, supportedMediaTypes);
return unsupportedError.apply(error);
});
}
use of java.util.function.Supplier in project presto by prestodb.
the class RcFileFileWriterFactory method createFileWriter.
@Override
public Optional<HiveFileWriter> createFileWriter(Path path, List<String> inputColumnNames, StorageFormat storageFormat, Properties schema, JobConf configuration, ConnectorSession session) {
if (!HiveSessionProperties.isRcfileOptimizedWriterEnabled(session)) {
return Optional.empty();
}
if (!RCFileOutputFormat.class.getName().equals(storageFormat.getOutputFormat())) {
return Optional.empty();
}
RcFileEncoding rcFileEncoding;
if (LazyBinaryColumnarSerDe.class.getName().equals(storageFormat.getSerDe())) {
rcFileEncoding = new BinaryRcFileEncoding();
} else if (ColumnarSerDe.class.getName().equals(storageFormat.getSerDe())) {
rcFileEncoding = createTextVectorEncoding(schema, hiveStorageTimeZone);
} else {
return Optional.empty();
}
Optional<String> codecName = Optional.ofNullable(configuration.get(FileOutputFormat.COMPRESS_CODEC));
// existing tables and partitions may have columns in a different order than the writer is providing, so build
// and index to rearrange columns in the proper order
List<String> fileColumnNames = Splitter.on(',').trimResults().omitEmptyStrings().splitToList(schema.getProperty(META_TABLE_COLUMNS, ""));
List<Type> fileColumnTypes = toHiveTypes(schema.getProperty(META_TABLE_COLUMN_TYPES, "")).stream().map(hiveType -> hiveType.getType(typeManager)).collect(toList());
int[] fileInputColumnIndexes = fileColumnNames.stream().mapToInt(inputColumnNames::indexOf).toArray();
try {
FileSystem fileSystem = hdfsEnvironment.getFileSystem(session.getUser(), path, configuration);
OutputStream outputStream = fileSystem.create(path);
Optional<Supplier<RcFileDataSource>> validationInputFactory = Optional.empty();
if (HiveSessionProperties.isRcfileOptimizedWriterValidate(session)) {
validationInputFactory = Optional.of(() -> {
try {
return new HdfsRcFileDataSource(path.toString(), fileSystem.open(path), fileSystem.getFileStatus(path).getLen());
} catch (IOException e) {
throw Throwables.propagate(e);
}
});
}
return Optional.of(new RcFileFileWriter(outputStream, rcFileEncoding, fileColumnTypes, codecName, fileInputColumnIndexes, ImmutableMap.<String, String>builder().put(HiveMetadata.PRESTO_VERSION_NAME, nodeVersion.toString()).put(HiveMetadata.PRESTO_QUERY_ID_NAME, session.getQueryId()).build(), validationInputFactory));
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of java.util.function.Supplier in project java.webdriver by sayems.
the class MultiSelect method selectFirstElementInTheDropDown.
public void selectFirstElementInTheDropDown(Supplier<By> by) {
final Element element = untilFound(by);
Select dropdown = new Select(element);
String textOption = dropdown.getOptions().parallelStream().filter(el -> !el.getText().equals("")).findFirst().get().getText();
dropdown.selectByVisibleText(textOption);
}
use of java.util.function.Supplier in project neo4j by neo4j.
the class IndexBatchTransactionApplierTest method shouldProvideLabelScanStoreUpdatesSortedByNodeId.
@Test
public void shouldProvideLabelScanStoreUpdatesSortedByNodeId() throws Exception {
// GIVEN
IndexingService indexing = mock(IndexingService.class);
LabelScanWriter writer = new OrderVerifyingLabelScanWriter(10, 15, 20);
WorkSync<Supplier<LabelScanWriter>, LabelUpdateWork> labelScanSync = spy(new WorkSync<>(singletonProvider(writer)));
WorkSync<IndexingService, IndexUpdatesWork> indexUpdatesSync = new WorkSync<>(indexing);
TransactionToApply tx = mock(TransactionToApply.class);
PropertyStore propertyStore = mock(PropertyStore.class);
try (IndexBatchTransactionApplier applier = new IndexBatchTransactionApplier(indexing, labelScanSync, indexUpdatesSync, mock(NodeStore.class), mock(PropertyLoader.class), new PropertyPhysicalToLogicalConverter(propertyStore), TransactionApplicationMode.INTERNAL)) {
try (TransactionApplier txApplier = applier.startTx(tx)) {
// WHEN
txApplier.visitNodeCommand(node(15));
txApplier.visitNodeCommand(node(20));
txApplier.visitNodeCommand(node(10));
}
}
// THEN all assertions happen inside the LabelScanWriter#write and #close
verify(labelScanSync).apply(any());
}
use of java.util.function.Supplier in project neo4j by neo4j.
the class LatestCheckPointFinderTest method logFile.
private LogCreator logFile(Entry... entries) {
return (logVersion, positions) -> {
try {
AtomicLong lastTxId = new AtomicLong();
Supplier<Long> lastTxIdSupplier = () -> lastTxId.get();
LogVersionRepository logVersionRepository = new DeadSimpleLogVersionRepository(logVersion);
LifeSupport life = new LifeSupport();
life.start();
PhysicalLogFile logFile = life.add(new PhysicalLogFile(fsRule.get(), logFiles, mebiBytes(1), lastTxIdSupplier, logVersionRepository, NO_MONITOR, new LogHeaderCache(10)));
try {
FlushablePositionAwareChannel writeChannel = logFile.getWriter();
LogPositionMarker positionMarker = new LogPositionMarker();
LogEntryWriter writer = new LogEntryWriter(writeChannel);
for (Entry entry : entries) {
LogPosition currentPosition = writeChannel.getCurrentPosition(positionMarker).newPosition();
positions.put(entry, currentPosition);
if (entry instanceof StartEntry) {
writer.writeStartEntry(0, 0, 0, 0, new byte[0]);
} else if (entry instanceof CommitEntry) {
CommitEntry commitEntry = (CommitEntry) entry;
writer.writeCommitEntry(commitEntry.txId, 0);
lastTxId.set(commitEntry.txId);
} else if (entry instanceof CheckPointEntry) {
CheckPointEntry checkPointEntry = (CheckPointEntry) entry;
Entry target = checkPointEntry.withPositionOfEntry;
LogPosition logPosition = target != null ? positions.get(target) : currentPosition;
assert logPosition != null : "No registered log position for " + target;
writer.writeCheckPointEntry(logPosition);
} else if (entry instanceof PositionEntry) {
// Don't write anything, this entry is just for registering a position so that
// another CheckPointEntry can refer to it
} else {
throw new IllegalArgumentException("Unknown entry " + entry);
}
}
} finally {
life.shutdown();
}
} catch (IOException e) {
throw new UncheckedIOException(e);
}
};
}
Aggregations