use of java.util.function.Supplier in project metron by apache.
the class ParserTopologyCLITest method testSpoutConfig.
@Test
public void testSpoutConfig() throws Exception {
File extraConfig = File.createTempFile("spoutConfig", "json");
extraConfig.deleteOnExit();
writeMap(extraConfig, new HashMap<String, Object>() {
{
put("extra_config", "from_file");
}
});
EnumMap<ParserTopologyCLI.ParserOptions, String> cliOptions = new EnumMap<ParserTopologyCLI.ParserOptions, String>(ParserTopologyCLI.ParserOptions.class) {
{
put(ParserTopologyCLI.ParserOptions.SPOUT_CONFIG, extraConfig.getAbsolutePath());
}
};
Predicate<ParserInput> cliOverrideExpected = input -> {
return input.getSpoutConfig().get("extra_config").equals("from_file");
};
Predicate<ParserInput> configOverrideExpected = input -> {
return input.getSpoutConfig().get("extra_config").equals("from_zk");
};
Supplier<SensorParserConfig> configSupplier = () -> {
SensorParserConfig config = getBaseConfig();
config.setSpoutConfig(new HashMap<String, Object>() {
{
put("extra_config", "from_zk");
}
});
return config;
};
testConfigOption(cliOptions, cliOverrideExpected, configSupplier, configOverrideExpected);
}
use of java.util.function.Supplier in project cayenne by apache.
the class BatchQueryRow method getValue.
/**
* Used by subclasses to resolve deferred values on demand. This is useful
* when a certain value comes from a generated key of another master object.
*/
protected Object getValue(Map<String, Object> valueMap, DbAttribute attribute) {
Object value = valueMap.get(attribute.getName());
// slight chance that a normal value will implement Factory interface???
if (value instanceof Supplier) {
value = ((Supplier) value).get();
valueMap.put(attribute.getName(), value);
// update replacement id
if (attribute.isPrimaryKey()) {
// sanity check
if (value == null) {
String name = attribute.getEntity() != null ? attribute.getEntity().getName() : "<null>";
throw new CayenneRuntimeException("Failed to generate PK: %s.%s", name, attribute.getName());
}
ObjectId id = getObjectId();
if (id != null) {
// always override with fresh value as this is what's in the
// DB
id.getReplacementIdMap().put(attribute.getName(), value);
}
}
}
return value;
}
use of java.util.function.Supplier in project pravega by pravega.
the class StreamSegmentMapperTests method testGetOrAssignStreamSegmentId.
/**
* Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
* retrieving existing attributes.
*/
@Test
public void testGetOrAssignStreamSegmentId() {
final long minSegmentLength = 1;
final int segmentCount = 10;
final int transactionsPerSegment = 5;
final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId : currentSegmentId.get();
Function<String, Long> getSegmentLength = segmentName -> minSegmentLength + (long) MathHelpers.abs(segmentName.hashCode());
Function<String, Long> getSegmentStartOffset = segmentName -> getSegmentLength.apply(segmentName) / 2;
@Cleanup TestContext context = new TestContext();
HashSet<String> storageSegments = new HashSet<>();
for (int i = 0; i < segmentCount; i++) {
String segmentName = getName(i);
storageSegments.add(segmentName);
setSavedState(segmentName, nextSegmentId.get(), getSegmentStartOffset.apply(segmentName), storageSegments.size() % ATTRIBUTE_COUNT, context);
for (int j = 0; j < transactionsPerSegment; j++) {
// There is a small chance of a name conflict here, but we don't care. As long as we get at least one
// Transaction per segment, we should be fine.
String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID());
storageSegments.add(transactionName);
setSavedState(transactionName, nextSegmentId.get(), getSegmentStartOffset.apply(transactionName), storageSegments.size() % ATTRIBUTE_COUNT, context);
}
}
// We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
setupOperationLog(context);
Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
setupStorageGetHandler(context, storageSegments, segmentName -> StreamSegmentInformation.builder().name(segmentName).length(getSegmentLength.apply(segmentName)).sealed(isSealed.test(segmentName)).build());
// First, map all the parents (stand-alone segments).
for (String name : storageSegments) {
if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for StreamSegment " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for StreamSegment " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
long expectedStartOffset = segmentState == null ? 0 : segmentState.getStartOffset();
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, expectedStartOffset, sm.getStartOffset());
}
}
// Now, map all the Transactions.
for (String name : storageSegments) {
String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
if (parentName != null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for Transaction " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for Transaction " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);
// For transactions we do not expect to see any non-zero start offsets.
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, 0, sm.getStartOffset());
// Check parenthood.
Assert.assertNotEquals("No parent defined in metadata for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
long parentId = context.metadata.getStreamSegmentId(parentName, false);
Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId, sm.getParentId());
}
}
}
use of java.util.function.Supplier in project pravega by pravega.
the class StorageWriterTests method testWithStorageCorruptionErrors.
/**
* Tests the StorageWriter in a Scenario where the Storage component throws data corruption exceptions (i.e., badOffset,
* and after reconciliation, the data is still corrupt).
*/
@Test
public void testWithStorageCorruptionErrors() throws Exception {
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// Create a bunch of segments and Transactions.
ArrayList<Long> segmentIds = createSegments(context);
// Append data.
HashMap<Long, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendDataBreadthFirst(segmentIds, segmentContents, context);
// Corrupt (one segment should suffice).
byte[] corruptionData = "foo".getBytes();
SegmentHandle corruptedSegmentHandle = InMemoryStorage.newHandle(context.metadata.getStreamSegmentMetadata(segmentIds.get(0)).getName(), false);
Supplier<Exception> exceptionSupplier = () -> {
// Corrupt data. We use an internal method (append) to atomically write data at the end of the segment.
// GetLength+Write would not work well because there may be concurrent writes that modify the data between
// requesting the length and attempting to write, thus causing the corruption to fail.
// NOTE: this is a synchronous call, but append() is also a sync method. If append() would become async,
// care must be taken not to block a thread while waiting for it.
context.storage.append(corruptedSegmentHandle, new ByteArrayInputStream(corruptionData), corruptionData.length);
// Return some other kind of exception.
return new TimeoutException("Intentional");
};
// We only try to corrupt data once.
AtomicBoolean corruptionHappened = new AtomicBoolean();
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(c -> !corruptionHappened.getAndSet(true), exceptionSupplier));
AssertExtensions.assertThrows("StorageWriter did not fail when a fatal data corruption error occurred.", () -> {
// The Corruption may happen early enough so the "awaitRunning" isn't complete yet. In that case,
// the writer will never reach its 'Running' state. As such, we need to make sure at least one of these
// will throw (either start or, if the failure happened after start, make sure it eventually fails and shuts down).
context.writer.startAsync().awaitRunning();
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, true);
}, ex -> ex instanceof IllegalStateException);
ServiceListeners.awaitShutdown(context.writer, TIMEOUT, false);
Assert.assertTrue("Unexpected failure cause for StorageWriter.", Exceptions.unwrap(context.writer.failureCause()) instanceof ReconciliationFailureException);
}
use of java.util.function.Supplier in project pravega by pravega.
the class AsyncReadResultProcessorTests method testFutureReads.
/**
* Tests the AsyncReadResultProcessor on Future Reads (that are not yet available in memory, but soon would be).
*/
@Test
public void testFutureReads() throws Exception {
// Pre-generate some entries.
ArrayList<byte[]> entries = new ArrayList<>();
int totalLength = generateEntries(entries);
// Setup an entry provider supplier.
AtomicInteger currentIndex = new AtomicInteger();
StreamSegmentReadResult.NextEntrySupplier supplier = (offset, length) -> {
int idx = currentIndex.getAndIncrement();
if (idx >= entries.size()) {
return null;
}
Supplier<ReadResultEntryContents> entryContentsSupplier = () -> new ReadResultEntryContents(new ByteArrayInputStream(entries.get(idx)), entries.get(idx).length);
return new TestFutureReadResultEntry(offset, length, entryContentsSupplier, executorService());
};
// Start an AsyncReadResultProcessor.
@Cleanup StreamSegmentReadResult rr = new StreamSegmentReadResult(0, totalLength, supplier, "");
TestReadResultHandler testReadResultHandler = new TestReadResultHandler(entries);
try (AsyncReadResultProcessor rp = AsyncReadResultProcessor.process(rr, testReadResultHandler, executorService())) {
// Wait for it to complete, and then verify that no errors have been recorded via the callbacks.
testReadResultHandler.completed.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
if (testReadResultHandler.error.get() != null) {
Assert.fail("Read failure: " + testReadResultHandler.error.toString());
}
Assert.assertEquals("Unexpected number of reads processed.", entries.size(), testReadResultHandler.readCount.get());
}
Assert.assertTrue("ReadResult was not closed when the AsyncReadResultProcessor was closed.", rr.isClosed());
}
Aggregations