use of org.junit.rules.Timeout in project evosuite by EvoSuite.
the class Scaffolding method generateTimeoutRule.
/**
* Hanging tests have very, very high negative impact. They can mess up
* everything (eg when running "mvn test"). As such, we should always have
* timeouts. Adding timeouts only in certain conditions is too risky
*
* @param bd
*/
private void generateTimeoutRule(StringBuilder bd) {
bd.append(METHOD_SPACE);
bd.append("@org.junit.Rule \n");
bd.append(METHOD_SPACE);
int timeout = Properties.TIMEOUT + 1000;
bd.append("public " + Timeout.class.getName() + " globalTimeout = new " + Timeout.class.getName() + "(" + timeout);
boolean useNew = false;
try {
// FIXME: this check does not seem to work properly :(
Class<?> timeoutOfSUTJunit = TestGenerationContext.getInstance().getClassLoaderForSUT().loadClass(Timeout.class.getName());
Constructor c = timeoutOfSUTJunit.getDeclaredConstructor(Long.TYPE, TimeUnit.class);
useNew = true;
} catch (ClassNotFoundException e) {
logger.error("Failed to load Timeout rule from SUT classloader: {}", e.getMessage(), e);
} catch (NoSuchMethodException e) {
logger.warn("SUT is using an old version of JUnit");
useNew = false;
}
if (useNew) {
// TODO: put back once above check works
// bd.append(", " + TimeUnit.class.getName() + ".MILLISECONDS");
}
bd.append("); \n");
bd.append("\n");
}
use of org.junit.rules.Timeout in project flink by apache.
the class AsyncWaitOperatorTest method testRestartWithFullQueue.
/**
* Tests that the AsyncWaitOperator can restart if checkpointed queue was full.
*
* <p>See FLINK-7949
*/
@Test(timeout = 10000)
public void testRestartWithFullQueue() throws Exception {
final int capacity = 10;
// 1. create the snapshot which contains capacity + 1 elements
final CompletableFuture<Void> trigger = new CompletableFuture<>();
final OneInputStreamOperatorTestHarness<Integer, Integer> snapshotHarness = createTestHarness(new ControllableAsyncFunction<>(// the NoOpAsyncFunction is like a blocking function
trigger), 1000L, capacity, AsyncDataStream.OutputMode.ORDERED);
snapshotHarness.open();
final OperatorSubtaskState snapshot;
final ArrayList<Integer> expectedOutput = new ArrayList<>(capacity);
try {
synchronized (snapshotHarness.getCheckpointLock()) {
for (int i = 0; i < capacity; i++) {
snapshotHarness.processElement(i, 0L);
expectedOutput.add(i);
}
}
synchronized (snapshotHarness.getCheckpointLock()) {
// execute the snapshot within the checkpoint lock, because then it is guaranteed
// that the lastElementWriter has written the exceeding element
snapshot = snapshotHarness.snapshot(0L, 0L);
}
// trigger the computation to make the close call finish
trigger.complete(null);
} finally {
synchronized (snapshotHarness.getCheckpointLock()) {
snapshotHarness.close();
}
}
// 2. restore the snapshot and check that we complete
final OneInputStreamOperatorTestHarness<Integer, Integer> recoverHarness = createTestHarness(new ControllableAsyncFunction<>(CompletableFuture.completedFuture(null)), 1000L, capacity, AsyncDataStream.OutputMode.ORDERED);
recoverHarness.initializeState(snapshot);
synchronized (recoverHarness.getCheckpointLock()) {
recoverHarness.open();
}
synchronized (recoverHarness.getCheckpointLock()) {
recoverHarness.endInput();
recoverHarness.close();
}
final ConcurrentLinkedQueue<Object> output = recoverHarness.getOutput();
final List<Integer> outputElements = output.stream().map(r -> ((StreamRecord<Integer>) r).getValue()).collect(Collectors.toList());
assertThat(outputElements, Matchers.equalTo(expectedOutput));
}
use of org.junit.rules.Timeout in project flink by apache.
the class ThreadInfoRequestCoordinatorTest method createMockTaskManagerGateway.
private static CompletableFuture<TaskExecutorThreadInfoGateway> createMockTaskManagerGateway(CompletionType completionType) {
final CompletableFuture<TaskThreadInfoResponse> responseFuture = new CompletableFuture<>();
switch(completionType) {
case SUCCESSFULLY:
ThreadInfoSample sample = JvmUtils.createThreadInfoSample(Thread.currentThread().getId(), 100).get();
responseFuture.complete(new TaskThreadInfoResponse(Collections.singletonList(sample)));
break;
case EXCEPTIONALLY:
responseFuture.completeExceptionally(new RuntimeException("Request failed."));
break;
case TIMEOUT:
executorService.schedule(() -> responseFuture.completeExceptionally(new TimeoutException(REQUEST_TIMEOUT_MESSAGE)), REQUEST_TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
break;
case NEVER_COMPLETE:
// do nothing
break;
default:
throw new RuntimeException("Unknown completion type.");
}
final TaskExecutorThreadInfoGateway executorGateway = (taskExecutionAttemptId, requestParams, timeout) -> responseFuture;
return CompletableFuture.completedFuture(executorGateway);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class BookKeeperLogTests method testRemoveEmptyLedgers.
/**
* Tests the ability of BookKeeperLog to automatically remove empty ledgers during initialization.
*/
@Test
public void testRemoveEmptyLedgers() throws Exception {
final int count = 100;
final int writeEvery = count / 10;
final Predicate<Integer> shouldAppendAnything = i -> i % writeEvery == 0;
val allLedgers = new ArrayList<Map.Entry<Long, LedgerMetadata.Status>>();
final Predicate<Integer> shouldExist = index -> (index >= allLedgers.size() - Ledgers.MIN_FENCE_LEDGER_COUNT) || (allLedgers.get(index).getValue() != LedgerMetadata.Status.Empty);
for (int i = 0; i < count; i++) {
try (BookKeeperLog log = (BookKeeperLog) createDurableDataLog()) {
log.initialize(TIMEOUT);
boolean shouldAppend = shouldAppendAnything.test(i);
val currentMetadata = log.loadMetadata();
val lastLedger = currentMetadata.getLedgers().get(currentMetadata.getLedgers().size() - 1);
allLedgers.add(new AbstractMap.SimpleImmutableEntry<>(lastLedger.getLedgerId(), shouldAppend ? LedgerMetadata.Status.NotEmpty : LedgerMetadata.Status.Empty));
val metadataLedgers = currentMetadata.getLedgers().stream().map(LedgerMetadata::getLedgerId).collect(Collectors.toSet());
// Verify Log Metadata does not contain old empty ledgers.
for (int j = 0; j < allLedgers.size(); j++) {
val e = allLedgers.get(j);
val expectedExist = shouldExist.test(j);
Assert.assertEquals("Unexpected state for metadata. AllLedgerCount=" + allLedgers.size() + ", LedgerIndex=" + j + ", LedgerStatus=" + e.getValue(), expectedExist, metadataLedgers.contains(e.getKey()));
}
// Append some data to this Ledger, if needed.
if (shouldAppend) {
log.append(new CompositeByteArraySegment(getWriteData()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
}
}
// Verify that these ledgers have also been deleted from BookKeeper.
for (int i = 0; i < allLedgers.size(); i++) {
val e = allLedgers.get(i);
if (shouldExist.test(i)) {
// This should not throw any exceptions.
Ledgers.openFence(e.getKey(), this.factory.get().getBookKeeperClient(), this.config.get());
} else {
AssertExtensions.assertThrows("Ledger not deleted from BookKeeper.", () -> Ledgers.openFence(e.getKey(), this.factory.get().getBookKeeperClient(), this.config.get()), ex -> true);
}
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class StreamSegmentContainerTests method testAttributes.
/**
* Tests the ability to set attributes (via append() or updateAttributes()), then fetch them back using getAttributes(),
* emphasizing on Extended Attributes that are dumped into Storage and cleared from memory.
*/
@Test
public void testAttributes() throws Exception {
final AttributeId coreAttribute = Attributes.EVENT_COUNT;
final int variableAttributeIdLength = 4;
final List<AttributeId> extendedAttributesUUID = Arrays.asList(AttributeId.randomUUID(), AttributeId.randomUUID());
final List<AttributeId> extendedAttributesVariable = Arrays.asList(AttributeId.random(variableAttributeIdLength), AttributeId.random(variableAttributeIdLength));
final List<AttributeId> allAttributesWithUUID = Stream.concat(extendedAttributesUUID.stream(), Stream.of(coreAttribute)).collect(Collectors.toList());
final List<AttributeId> allAttributesWithVariable = Stream.concat(extendedAttributesVariable.stream(), Stream.of(coreAttribute)).collect(Collectors.toList());
final AttributeId segmentLengthAttributeUUID = AttributeId.randomUUID();
final AttributeId segmentLengthAttributeVariable = AttributeId.random(variableAttributeIdLength);
final long expectedAttributeValue = APPENDS_PER_SEGMENT + ATTRIBUTE_UPDATES_PER_SEGMENT;
final TestContainerConfig containerConfig = new TestContainerConfig();
containerConfig.setSegmentMetadataExpiration(Duration.ofMillis(EVICTION_SEGMENT_EXPIRATION_MILLIS_SHORT));
containerConfig.setMaxCachedExtendedAttributeCount(SEGMENT_COUNT * allAttributesWithUUID.size());
@Cleanup TestContext context = createContext();
OperationLogFactory localDurableLogFactory = new DurableLogFactory(FREQUENT_TRUNCATIONS_DURABLE_LOG_CONFIG, context.dataLogFactory, executorService());
@Cleanup MetadataCleanupContainer localContainer = new MetadataCleanupContainer(CONTAINER_ID, containerConfig, localDurableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
localContainer.startAsync().awaitRunning();
// 1. Create the StreamSegments.
val segmentNames = IntStream.range(0, SEGMENT_COUNT).boxed().collect(Collectors.toMap(StreamSegmentContainerTests::getSegmentName, i -> i % 2 == 0 ? variableAttributeIdLength : 0));
ArrayList<CompletableFuture<Void>> opFutures = new ArrayList<>();
for (val sn : segmentNames.entrySet()) {
opFutures.add(localContainer.createStreamSegment(sn.getKey(), SegmentType.STREAM_SEGMENT, AttributeUpdateCollection.from(new AttributeUpdate(Attributes.ATTRIBUTE_ID_LENGTH, AttributeUpdateType.None, sn.getValue())), TIMEOUT));
}
Futures.allOf(opFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Predicate<Map.Entry<String, Integer>> isUUIDOnly = e -> e.getValue() == 0;
// 2. Add some appends.
for (val sn : segmentNames.entrySet()) {
boolean isUUID = isUUIDOnly.test(sn);
for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
AttributeUpdateCollection attributeUpdates = (isUUID ? allAttributesWithUUID : allAttributesWithVariable).stream().map(attributeId -> new AttributeUpdate(attributeId, AttributeUpdateType.Accumulate, 1)).collect(Collectors.toCollection(AttributeUpdateCollection::new));
opFutures.add(Futures.toVoid(localContainer.append(sn.getKey(), getAppendData(sn.getKey(), i), attributeUpdates, TIMEOUT)));
}
}
// 2.1 Update some of the attributes.
for (val sn : segmentNames.entrySet()) {
boolean isUUID = isUUIDOnly.test(sn);
for (int i = 0; i < ATTRIBUTE_UPDATES_PER_SEGMENT; i++) {
AttributeUpdateCollection attributeUpdates = (isUUID ? allAttributesWithUUID : allAttributesWithVariable).stream().map(attributeId -> new AttributeUpdate(attributeId, AttributeUpdateType.Accumulate, 1)).collect(Collectors.toCollection(AttributeUpdateCollection::new));
opFutures.add(localContainer.updateAttributes(sn.getKey(), attributeUpdates, TIMEOUT));
}
// Verify that we are not allowed to update attributes of the wrong type.
val badUpdate = new AttributeUpdate(isUUID ? AttributeId.random(variableAttributeIdLength) : AttributeId.randomUUID(), AttributeUpdateType.Accumulate, 1);
AssertExtensions.assertSuppliedFutureThrows("updateAttributes allowed updating attributes with wrong type and/or length.", () -> localContainer.updateAttributes(sn.getKey(), AttributeUpdateCollection.from(badUpdate), TIMEOUT), ex -> ex instanceof AttributeIdLengthMismatchException);
}
Futures.allOf(opFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// 2.2 Dynamic attributes.
for (val sn : segmentNames.entrySet()) {
boolean isUUID = isUUIDOnly.test(sn);
val dynamicId = isUUID ? segmentLengthAttributeUUID : segmentLengthAttributeVariable;
val dynamicAttributes = AttributeUpdateCollection.from(new DynamicAttributeUpdate(dynamicId, AttributeUpdateType.Replace, DynamicAttributeValue.segmentLength(10)));
val appendData = getAppendData(sn.getKey(), 1000);
val lastOffset = localContainer.append(sn.getKey(), appendData, dynamicAttributes, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val expectedValue = lastOffset - appendData.getLength() + 10;
Assert.assertEquals(expectedValue, (long) localContainer.getAttributes(sn.getKey(), Collections.singleton(dynamicId), false, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(dynamicId));
}
// 3. getSegmentInfo
for (val sn : segmentNames.entrySet()) {
val segmentName = sn.getKey();
val allAttributes = isUUIDOnly.test(sn) ? allAttributesWithUUID : allAttributesWithVariable;
val allAttributeValues = localContainer.getAttributes(segmentName, allAttributes, false, TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes retrieved via getAttributes().", allAttributes.size(), allAttributeValues.size());
// Verify all attribute values.
SegmentProperties sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).join();
for (val attributeId : allAttributes) {
Assert.assertEquals("Unexpected value for attribute " + attributeId + " via getInfo() for segment " + segmentName, expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
Assert.assertEquals("Unexpected value for attribute " + attributeId + " via getAttributes() for segment " + segmentName, expectedAttributeValue, (long) allAttributeValues.getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
}
// Verify we can't request wrong lengths/types.
val badId = isUUIDOnly.test(sn) ? AttributeId.random(variableAttributeIdLength) : AttributeId.randomUUID();
AssertExtensions.assertSuppliedFutureThrows("getAttributes allowed getting attributes with wrong type and/or length.", () -> localContainer.getAttributes(segmentName, Collections.singleton(badId), true, TIMEOUT), ex -> ex instanceof IllegalArgumentException);
}
// Force these segments out of memory, so that we may verify that extended attributes are still recoverable.
localContainer.triggerMetadataCleanup(segmentNames.keySet()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
for (val sn : segmentNames.entrySet()) {
val segmentName = sn.getKey();
val allAttributes = isUUIDOnly.test(sn) ? allAttributesWithUUID : allAttributesWithVariable;
val allAttributeValues = localContainer.getAttributes(segmentName, allAttributes, false, TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes retrieved via getAttributes() after recovery for segment " + segmentName, allAttributes.size(), allAttributeValues.size());
// Verify all attribute values. Core attributes should still be loaded in memory, while extended attributes can
// only be fetched via their special API.
SegmentProperties sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).join();
for (val attributeId : allAttributes) {
Assert.assertEquals("Unexpected value for attribute " + attributeId + " via getAttributes() after recovery for segment " + segmentName, expectedAttributeValue, (long) allAttributeValues.getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
if (Attributes.isCoreAttribute(attributeId)) {
Assert.assertEquals("Expecting core attribute to be loaded in memory.", expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
} else {
Assert.assertEquals("Not expecting extended attribute to be loaded in memory.", Attributes.NULL_ATTRIBUTE_VALUE, (long) sp.getAttributes().getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
}
}
// Now instruct the Container to cache missing values (do it a few times so we make sure it's idempotent).
// Also introduce some random new attribute to fetch. We want to make sure we can properly handle caching
// missing attribute values.
val missingAttributeId = isUUIDOnly.test(sn) ? AttributeId.randomUUID() : AttributeId.random(variableAttributeIdLength);
val attributesToCache = new ArrayList<>(allAttributes);
attributesToCache.add(missingAttributeId);
val attributesToCacheValues = new HashMap<>(allAttributeValues);
attributesToCacheValues.put(missingAttributeId, Attributes.NULL_ATTRIBUTE_VALUE);
Map<AttributeId, Long> allAttributeValuesWithCache;
for (int i = 0; i < 2; i++) {
allAttributeValuesWithCache = localContainer.getAttributes(segmentName, attributesToCache, true, TIMEOUT).join();
AssertExtensions.assertMapEquals("Inconsistent results from getAttributes(cache=true, attempt=" + i + ").", attributesToCacheValues, allAttributeValuesWithCache);
sp = localContainer.getStreamSegmentInfo(segmentName, TIMEOUT).join();
for (val attributeId : allAttributes) {
Assert.assertEquals("Expecting all attributes to be loaded in memory.", expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeId, Attributes.NULL_ATTRIBUTE_VALUE));
}
Assert.assertEquals("Unexpected value for missing Attribute Id", Attributes.NULL_ATTRIBUTE_VALUE, (long) sp.getAttributes().get(missingAttributeId));
}
}
// 4. Make an update, then immediately seal the segment, then verify the update updated the root pointer.
AttributeId attr = Attributes.ATTRIBUTE_SEGMENT_ROOT_POINTER;
val oldRootPointers = new HashMap<String, Long>();
for (val sn : segmentNames.entrySet()) {
val segmentName = sn.getKey();
val newAttributeId = isUUIDOnly.test(sn) ? AttributeId.randomUUID() : AttributeId.random(variableAttributeIdLength);
// Get the old root pointer, then make a random attribute update, then immediately seal the segment.
localContainer.getAttributes(segmentName, Collections.singleton(attr), false, TIMEOUT).thenCompose(values -> {
oldRootPointers.put(segmentName, values.get(attr));
return CompletableFuture.allOf(localContainer.updateAttributes(segmentName, AttributeUpdateCollection.from(new AttributeUpdate(newAttributeId, AttributeUpdateType.Replace, 1L)), TIMEOUT), localContainer.sealStreamSegment(segmentName, TIMEOUT));
}).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
// which indicates the StorageWriter was able to successfully record it after its final Attribute Index update.
for (String segmentName : segmentNames.keySet()) {
Long oldValue = oldRootPointers.get(segmentName);
TestUtils.await(() -> {
val newVal = localContainer.getAttributes(segmentName, Collections.singleton(attr), false, TIMEOUT).join().get(attr);
return oldValue < newVal;
}, 10, TIMEOUT.toMillis());
}
waitForSegmentsInStorage(segmentNames.keySet(), localContainer, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
localContainer.stopAsync().awaitTerminated();
}
Aggregations