use of org.junit.rules.Timeout in project pravega by pravega.
the class StreamSegmentMapperTests method testGetOrAssignStreamSegmentId.
/**
* Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
* retrieving existing attributes.
*/
@Test
public void testGetOrAssignStreamSegmentId() {
final long minSegmentLength = 1;
final int segmentCount = 10;
final int transactionsPerSegment = 5;
final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId : currentSegmentId.get();
Function<String, Long> getSegmentLength = segmentName -> minSegmentLength + (long) MathHelpers.abs(segmentName.hashCode());
Function<String, Long> getSegmentStartOffset = segmentName -> getSegmentLength.apply(segmentName) / 2;
@Cleanup TestContext context = new TestContext();
HashSet<String> storageSegments = new HashSet<>();
for (int i = 0; i < segmentCount; i++) {
String segmentName = getName(i);
storageSegments.add(segmentName);
setSavedState(segmentName, nextSegmentId.get(), getSegmentStartOffset.apply(segmentName), storageSegments.size() % ATTRIBUTE_COUNT, context);
for (int j = 0; j < transactionsPerSegment; j++) {
// There is a small chance of a name conflict here, but we don't care. As long as we get at least one
// Transaction per segment, we should be fine.
String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName, UUID.randomUUID());
storageSegments.add(transactionName);
setSavedState(transactionName, nextSegmentId.get(), getSegmentStartOffset.apply(transactionName), storageSegments.size() % ATTRIBUTE_COUNT, context);
}
}
// We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
setupOperationLog(context);
Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
setupStorageGetHandler(context, storageSegments, segmentName -> StreamSegmentInformation.builder().name(segmentName).length(getSegmentLength.apply(segmentName)).sealed(isSealed.test(segmentName)).build());
// First, map all the parents (stand-alone segments).
for (String name : storageSegments) {
if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for StreamSegment " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for StreamSegment " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
long expectedStartOffset = segmentState == null ? 0 : segmentState.getStartOffset();
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, expectedStartOffset, sm.getStartOffset());
}
}
// Now, map all the Transactions.
for (String name : storageSegments) {
String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
if (parentName != null) {
long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
Assert.assertNotEquals("No id was assigned for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
long expectedLength = getSegmentLength.apply(name);
boolean expectedSeal = isSealed.test(name);
Assert.assertEquals("Metadata does not have the expected length for Transaction " + name, expectedLength, sm.getLength());
Assert.assertEquals("Metadata does not have the expected value for isSealed for Transaction " + name, expectedSeal, sm.isSealed());
val segmentState = context.stateStore.get(name, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);
// For transactions we do not expect to see any non-zero start offsets.
Assert.assertEquals("Unexpected StartOffset in metadata for " + name, 0, sm.getStartOffset());
// Check parenthood.
Assert.assertNotEquals("No parent defined in metadata for Transaction " + name, ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
long parentId = context.metadata.getStreamSegmentId(parentName, false);
Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId, sm.getParentId());
}
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SequentialAsyncProcessorTests method testRunAsync.
/**
* Tests the runAsync() method.
*/
@Test(timeout = TIMEOUT_MILLIS)
public void testRunAsync() throws Exception {
final int invocationCount = 10;
val count = new AtomicInteger();
val wasInvoked = new Semaphore(0);
val waitOn = new CompletableFuture<Void>();
val retry = Retry.withExpBackoff(1, 2, 3).retryWhen(t -> true).throwingOn(Exception.class);
val error = new AtomicReference<Throwable>();
val p = new SequentialAsyncProcessor(() -> {
count.incrementAndGet();
wasInvoked.release();
waitOn.join();
}, retry, error::set, executorService());
// Invoke it a number of times.
for (int i = 0; i < invocationCount; i++) {
p.runAsync();
}
// Wait for at least one invocation to happen.
wasInvoked.acquire();
Assert.assertEquals("Task seems to have been executed concurrently.", 1, count.get());
// Now complete the first task and ensure the subsequent requests only result in on one extra invocations.
waitOn.complete(null);
wasInvoked.acquire();
Assert.assertEquals("Unexpected number of final invocations.", 2, count.get());
}
use of org.junit.rules.Timeout in project hono by eclipse.
the class AbstractRequestResponseClientTest method testCancelRequestFailsResponseHandler.
/**
* Verifies that the client cancels and fails a request for which no response
* has been received after a certain amount of time. The request is then
* failed with a {@link ServerErrorException}.
*
* @param ctx The vert.x test context.
*/
@SuppressWarnings("unchecked")
@Test
public void testCancelRequestFailsResponseHandler(final TestContext ctx) {
// GIVEN a request-response client which times out requests after 200 ms
client.setRequestTimeout(200);
// WHEN no response is received for a request sent to the peer
doAnswer(invocation -> {
// do not wait 200ms before running the timeout task but instead
// run it immediately
Handler<Long> task = invocation.getArgument(1);
task.handle(1L);
return null;
}).when(vertx).setTimer(anyLong(), any(Handler.class));
final Async requestFailure = ctx.async();
client.createAndSendRequest("request", null, (JsonObject) null, ctx.asyncAssertFailure(t -> {
ctx.assertTrue(ServerErrorException.class.isInstance(t));
requestFailure.complete();
}));
// THEN the request handler is failed
requestFailure.await();
}
use of org.junit.rules.Timeout in project hono by eclipse.
the class AbstractRequestResponseClientTest method testCreateAndSendRequestAddsResponseToCache.
/**
* Verifies that the adapter puts the response from the service to the cache
* using the default cache timeout if the response does not contain a
* <em>no-cache</em> cache directive.
*
* @param ctx The vert.x test context.
*/
@SuppressWarnings("unchecked")
@Test
public void testCreateAndSendRequestAddsResponseToCache(final TestContext ctx) {
// GIVEN an adapter with an empty cache
client.setResponseCache(cache);
// WHEN sending a request
client.createAndSendRequest("get", (JsonObject) null, ctx.asyncAssertSuccess(result -> {
// THEN the response has been put to the cache
verify(cache).put(eq("cacheKey"), any(SimpleRequestResponseResult.class), eq(Duration.ofSeconds(RequestResponseClientConfigProperties.DEFAULT_RESPONSE_CACHE_TIMEOUT)));
}), "cacheKey");
final ArgumentCaptor<Message> messageCaptor = ArgumentCaptor.forClass(Message.class);
verify(sender).send(messageCaptor.capture(), any(Handler.class));
final Message response = ProtonHelper.message("result");
MessageHelper.addProperty(response, MessageHelper.APP_PROPERTY_STATUS, HttpURLConnection.HTTP_OK);
response.setCorrelationId(messageCaptor.getValue().getMessageId());
final ProtonDelivery delivery = mock(ProtonDelivery.class);
client.handleResponse(delivery, response);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class HDFSStorageTest method testZombieFencing.
// region Fencing tests
/**
* A special test case of fencing to verify the behavior of HDFSStorage in the presence of an instance that has
* been fenced out. This case verifies that any ongoing writes properly fail upon fencing. Specifically, we have a
* fenced-out instance that keeps writing and we verify that the write fails once the ownership changes.
* The HDFS behavior is such in this case is that ongoing writes that execute before the rename
* complete successfully.
*/
@Test(timeout = 60000)
public void testZombieFencing() throws Exception {
final long epochCount = 30;
final int writeSize = 1000;
final String segmentName = "Segment";
@Cleanup val writtenData = new ByteBufferOutputStream();
final Random rnd = new Random(0);
int currentEpoch = 1;
// Create initial adapter.
val currentStorage = new AtomicReference<Storage>();
currentStorage.set(createStorage());
currentStorage.get().initialize(currentEpoch);
// Create the Segment and open it for the first time.
val currentHandle = new AtomicReference<SegmentHandle>(currentStorage.get().create(segmentName, TIMEOUT).thenCompose(v -> currentStorage.get().openWrite(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
// Run a number of epochs.
while (currentEpoch <= epochCount) {
val oldStorage = currentStorage.get();
val handle = currentHandle.get();
val writeBuffer = new byte[writeSize];
val appends = Futures.loop(() -> true, () -> {
rnd.nextBytes(writeBuffer);
return oldStorage.write(handle, writtenData.size(), new ByteArrayInputStream(writeBuffer), writeBuffer.length, TIMEOUT).thenRun(() -> writtenData.write(writeBuffer));
}, executorService());
// Create a new Storage adapter with a new epoch and open-write the Segment, remembering its handle.
val newStorage = createStorage();
try {
newStorage.initialize(++currentEpoch);
currentHandle.set(newStorage.openWrite(segmentName).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS));
} catch (Exception ex) {
newStorage.close();
throw ex;
}
currentStorage.set(newStorage);
try {
appends.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.fail("Continuous appends on older epoch Adapter did not fail.");
} catch (Exception ex) {
val cause = Exceptions.unwrap(ex);
if (!(cause instanceof StorageNotPrimaryException || cause instanceof StreamSegmentSealedException || cause instanceof StreamSegmentNotExistsException)) {
// We only expect the appends to fail because they were fenced out or the Segment was sealed.
Assert.fail("Unexpected exception " + cause);
}
} finally {
oldStorage.close();
}
}
byte[] expectedData = writtenData.getData().getCopy();
byte[] readData = new byte[expectedData.length];
@Cleanup val readStorage = createStorage();
readStorage.initialize(++currentEpoch);
int bytesRead = readStorage.openRead(segmentName).thenCompose(handle -> readStorage.read(handle, 0, readData, 0, readData.length, TIMEOUT)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes read.", readData.length, bytesRead);
Assert.assertArrayEquals("Unexpected data read back.", expectedData, readData);
}
Aggregations