Search in sources :

Example 16 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class SegmentStoreAdapter method attemptReconcile.

@SneakyThrows
private Void attemptReconcile(Throwable ex, String segmentName, Duration timeout) {
    ex = Exceptions.unwrap(ex);
    boolean reconciled = false;
    if (isPossibleEndOfSegment(ex)) {
        // If we get a Sealed/Merged/NotExists exception, verify that the segment really is in that state.
        try {
            SegmentProperties sp = this.streamSegmentStore.getStreamSegmentInfo(segmentName, false, timeout).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
            reconciled = sp.isSealed() || sp.isDeleted();
        } catch (Throwable ex2) {
            reconciled = isPossibleEndOfSegment(Exceptions.unwrap(ex2));
        }
    }
    if (reconciled) {
        return null;
    } else {
        throw ex;
    }
}
Also used : SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) SneakyThrows(lombok.SneakyThrows)

Example 17 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class GetInfoOperationTests method testGetInfo.

/**
 * Tests general GetInfoOperation behavior.
 */
@Test
public void testGetInfo() throws Exception {
    @Cleanup val fs = new MockFileSystem();
    long expectedLength = 0;
    new CreateOperation(SEGMENT_NAME, newContext(0, fs)).call();
    for (int i = 0; i < WRITE_COUNT; i++) {
        val context = newContext(i, fs);
        val handle = new OpenWriteOperation(SEGMENT_NAME, context).call();
        byte[] data = new byte[i + 1];
        new WriteOperation(handle, expectedLength, new ByteArrayInputStream(data), data.length, context).run();
        expectedLength += data.length;
    }
    val getInfoContext = newContext(WRITE_COUNT, fs);
    SegmentProperties result = new GetInfoOperation(SEGMENT_NAME, getInfoContext).call();
    checkResult("pre-seal", result, expectedLength, false);
    // Seal.
    val sealHandle = new OpenWriteOperation(SEGMENT_NAME, getInfoContext).call();
    new SealOperation(sealHandle, getInfoContext).run();
    result = new GetInfoOperation(SEGMENT_NAME, getInfoContext).call();
    checkResult("post-seal", result, expectedLength, true);
    // Inexistent segment.
    fs.clear();
    AssertExtensions.assertThrows("GetInfo succeeded on missing segment.", new GetInfoOperation(SEGMENT_NAME, getInfoContext)::call, ex -> ex instanceof FileNotFoundException);
}
Also used : lombok.val(lombok.val) FileNotFoundException(java.io.FileNotFoundException) Cleanup(lombok.Cleanup) ByteArrayInputStream(java.io.ByteArrayInputStream) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Test(org.junit.Test)

Example 18 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class StorageTestBase method testConcat.

/**
 * Tests the concat() method.
 *
 * @throws Exception if an unexpected error occurred.
 */
@Test
public void testConcat() throws Exception {
    final String context = "Concat";
    try (Storage s = createStorage()) {
        s.initialize(DEFAULT_EPOCH);
        HashMap<String, ByteArrayOutputStream> appendData = populate(s, context);
        // Check invalid segment name.
        val firstSegmentName = getSegmentName(0, context);
        val firstSegmentHandle = s.openWrite(firstSegmentName).join();
        val sealedSegmentName = "SealedSegment";
        createSegment(sealedSegmentName, s);
        val sealedSegmentHandle = s.openWrite(sealedSegmentName).join();
        s.write(sealedSegmentHandle, 0, new ByteArrayInputStream(new byte[1]), 1, TIMEOUT).join();
        s.seal(sealedSegmentHandle, TIMEOUT).join();
        AtomicLong firstSegmentLength = new AtomicLong(s.getStreamSegmentInfo(firstSegmentName, TIMEOUT).join().getLength());
        assertThrows("concat() did not throw for non-existent target segment name.", () -> s.concat(createInexistentSegmentHandle(s, false), 0, sealedSegmentName, TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        assertThrows("concat() did not throw for invalid source StreamSegment name.", () -> s.concat(firstSegmentHandle, firstSegmentLength.get(), "foo2", TIMEOUT), ex -> ex instanceof StreamSegmentNotExistsException);
        ArrayList<String> concatOrder = new ArrayList<>();
        concatOrder.add(firstSegmentName);
        for (String sourceSegment : appendData.keySet()) {
            if (sourceSegment.equals(firstSegmentName)) {
                // FirstSegment is where we'll be concatenating to.
                continue;
            }
            assertThrows("Concat allowed when source segment is not sealed.", () -> s.concat(firstSegmentHandle, firstSegmentLength.get(), sourceSegment, TIMEOUT), ex -> ex instanceof IllegalStateException);
            // Seal the source segment and then re-try the concat
            val sourceWriteHandle = s.openWrite(sourceSegment).join();
            s.seal(sourceWriteHandle, TIMEOUT).join();
            SegmentProperties preConcatTargetProps = s.getStreamSegmentInfo(firstSegmentName, TIMEOUT).join();
            SegmentProperties sourceProps = s.getStreamSegmentInfo(sourceSegment, TIMEOUT).join();
            s.concat(firstSegmentHandle, firstSegmentLength.get(), sourceSegment, TIMEOUT).join();
            concatOrder.add(sourceSegment);
            SegmentProperties postConcatTargetProps = s.getStreamSegmentInfo(firstSegmentName, TIMEOUT).join();
            Assert.assertFalse("concat() did not delete source segment", s.exists(sourceSegment, TIMEOUT).join());
            // Only check lengths here; we'll check the contents at the end.
            Assert.assertEquals("Unexpected target StreamSegment.length after concatenation.", preConcatTargetProps.getLength() + sourceProps.getLength(), postConcatTargetProps.getLength());
            firstSegmentLength.set(postConcatTargetProps.getLength());
        }
        // Check the contents of the first StreamSegment. We already validated that the length is correct.
        SegmentProperties segmentProperties = s.getStreamSegmentInfo(firstSegmentName, TIMEOUT).join();
        byte[] readBuffer = new byte[(int) segmentProperties.getLength()];
        // Read the entire StreamSegment.
        int bytesRead = s.read(firstSegmentHandle, 0, readBuffer, 0, readBuffer.length, TIMEOUT).join();
        Assert.assertEquals("Unexpected number of bytes read.", readBuffer.length, bytesRead);
        // Check, concat-by-concat, that the final data is correct.
        int offset = 0;
        for (String segmentName : concatOrder) {
            byte[] concatData = appendData.get(segmentName).toByteArray();
            AssertExtensions.assertArrayEquals("Unexpected concat data.", concatData, 0, readBuffer, offset, concatData.length);
            offset += concatData.length;
        }
        Assert.assertEquals("Concat included more bytes than expected.", offset, readBuffer.length);
    }
}
Also used : lombok.val(lombok.val) ArrayList(java.util.ArrayList) ByteArrayOutputStream(java.io.ByteArrayOutputStream) StreamSegmentNotExistsException(io.pravega.segmentstore.contracts.StreamSegmentNotExistsException) AtomicLong(java.util.concurrent.atomic.AtomicLong) ByteArrayInputStream(java.io.ByteArrayInputStream) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) Test(org.junit.Test)

Example 19 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class StreamSegmentContainerTests method testMetadataCleanup.

/**
 * Tests the ability to clean up SegmentMetadata for those segments which have not been used recently.
 * This test does the following:
 * 1. Sets up a custom SegmentContainer with a hook into the metadataCleanup task
 * 2. Creates a segment and appends something to it, each time updating attributes (and verifies they were updated correctly).
 * 3. Waits for the segment to be forgotten (evicted).
 * 4. Requests info on the segment, validates it, then makes another append, seals it, at each step verifying it was done
 * correctly (checking Metadata, Attributes and Storage).
 * 5. Deletes the segment, waits for metadata to be cleared (via forcing another log truncation), re-creates the
 * same segment and validates that the old attributes did not "bleed in".
 */
@Test
public void testMetadataCleanup() throws Exception {
    final String segmentName = "segment";
    final UUID[] attributes = new UUID[] { Attributes.CREATION_TIME, UUID.randomUUID(), UUID.randomUUID(), UUID.randomUUID() };
    final byte[] appendData = "hello".getBytes();
    final Map<UUID, Long> expectedAttributes = new HashMap<>();
    // We need a special DL config so that we can force truncations after every operation - this will speed up metadata
    // eviction eligibility.
    final DurableLogConfig durableLogConfig = DurableLogConfig.builder().with(DurableLogConfig.CHECKPOINT_MIN_COMMIT_COUNT, 1).with(DurableLogConfig.CHECKPOINT_COMMIT_COUNT, 5).with(DurableLogConfig.CHECKPOINT_TOTAL_COMMIT_LENGTH, 10 * 1024 * 1024L).build();
    final TestContainerConfig containerConfig = new TestContainerConfig();
    containerConfig.setSegmentMetadataExpiration(Duration.ofMillis(250));
    @Cleanup TestContext context = new TestContext(containerConfig);
    OperationLogFactory localDurableLogFactory = new DurableLogFactory(durableLogConfig, context.dataLogFactory, executorService());
    @Cleanup MetadataCleanupContainer localContainer = new MetadataCleanupContainer(CONTAINER_ID, containerConfig, localDurableLogFactory, context.readIndexFactory, context.writerFactory, context.storageFactory, executorService());
    localContainer.startAsync().awaitRunning();
    // Create segment with initial attributes and verify they were set correctly.
    val initialAttributes = createAttributeUpdates(attributes);
    applyAttributes(initialAttributes, expectedAttributes);
    localContainer.createStreamSegment(segmentName, initialAttributes, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentProperties sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after segment creation.", expectedAttributes, sp);
    // Add one append with some attribute changes and verify they were set correctly.
    val appendAttributes = createAttributeUpdates(attributes);
    applyAttributes(appendAttributes, expectedAttributes);
    localContainer.append(segmentName, appendData, appendAttributes, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after append.", expectedAttributes, sp);
    // Wait until the segment is forgotten.
    localContainer.triggerMetadataCleanup(Collections.singleton(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Now get attributes again and verify them.
    sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after eviction & resurrection.", expectedAttributes, sp);
    // Append again, and make sure we can append at the right offset.
    val secondAppendAttributes = createAttributeUpdates(attributes);
    applyAttributes(secondAppendAttributes, expectedAttributes);
    localContainer.append(segmentName, appendData.length, appendData, secondAppendAttributes, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertEquals("Unexpected length from segment after eviction & resurrection.", 2 * appendData.length, sp.getLength());
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after eviction & resurrection.", expectedAttributes, sp);
    // Seal (this should clear out non-dynamic attributes).
    expectedAttributes.keySet().removeIf(Attributes::isDynamic);
    localContainer.sealStreamSegment(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after seal.", expectedAttributes, sp);
    // Verify the segment actually made to Storage in one piece.
    waitForSegmentInStorage(sp, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    val storageInfo = context.storage.getStreamSegmentInfo(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    Assert.assertEquals("Unexpected length in storage for segment.", sp.getLength(), storageInfo.getLength());
    // Delete segment and wait until it is forgotten again (we need to create another dummy segment so that we can
    // force a Metadata Truncation in order to facilitate that; this is the purpose of segment2).
    localContainer.deleteStreamSegment(segmentName, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Wait for the segment to be forgotten again.
    localContainer.triggerMetadataCleanup(Collections.singleton(segmentName)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // Now Create the Segment again and verify the old attributes were not "remembered".
    val newAttributes = createAttributeUpdates(attributes);
    applyAttributes(newAttributes, expectedAttributes);
    localContainer.createStreamSegment(segmentName, newAttributes, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    sp = localContainer.getStreamSegmentInfo(segmentName, true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    SegmentMetadataComparer.assertSameAttributes("Unexpected attributes after deletion and re-creation.", expectedAttributes, sp);
}
Also used : lombok.val(lombok.val) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) Attributes(io.pravega.segmentstore.contracts.Attributes) Cleanup(lombok.Cleanup) OperationLogFactory(io.pravega.segmentstore.server.OperationLogFactory) DurableLogFactory(io.pravega.segmentstore.server.logs.DurableLogFactory) DurableLogConfig(io.pravega.segmentstore.server.logs.DurableLogConfig) AtomicLong(java.util.concurrent.atomic.AtomicLong) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) UUID(java.util.UUID) Test(org.junit.Test)

Example 20 with SegmentProperties

use of io.pravega.segmentstore.contracts.SegmentProperties in project pravega by pravega.

the class StreamSegmentContainerTests method testConcurrentSegmentActivation.

/**
 * Tests the ability for the StreamSegmentContainer to handle concurrent actions on a Segment that it does not know
 * anything about, and handling the resulting concurrency.
 * Note: this is tested with a single segment. It could be tested with multiple segments, but different segments
 * are mostly independent of each other, so we would not be gaining much by doing so.
 */
@Test
public void testConcurrentSegmentActivation() throws Exception {
    final UUID attributeAccumulate = UUID.randomUUID();
    final long expectedAttributeValue = APPENDS_PER_SEGMENT + ATTRIBUTE_UPDATES_PER_SEGMENT;
    final int appendLength = 10;
    @Cleanup TestContext context = new TestContext();
    context.container.startAsync().awaitRunning();
    // 1. Create the StreamSegments.
    String segmentName = createSegments(context).get(0);
    // 2. Add some appends.
    List<CompletableFuture<Void>> opFutures = Collections.synchronizedList(new ArrayList<>());
    AtomicLong expectedLength = new AtomicLong();
    @Cleanup("shutdown") ExecutorService testExecutor = newScheduledThreadPool(Math.min(20, APPENDS_PER_SEGMENT), "testConcurrentSegmentActivation");
    val submitFutures = new ArrayList<Future<?>>();
    for (int i = 0; i < APPENDS_PER_SEGMENT; i++) {
        final byte fillValue = (byte) i;
        submitFutures.add(testExecutor.submit(() -> {
            Collection<AttributeUpdate> attributeUpdates = Collections.singleton(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
            byte[] appendData = new byte[appendLength];
            Arrays.fill(appendData, (byte) (fillValue + 1));
            opFutures.add(context.container.append(segmentName, appendData, attributeUpdates, TIMEOUT));
            expectedLength.addAndGet(appendData.length);
        }));
    }
    // 2.1 Update the attribute.
    for (int i = 0; i < ATTRIBUTE_UPDATES_PER_SEGMENT; i++) {
        submitFutures.add(testExecutor.submit(() -> {
            Collection<AttributeUpdate> attributeUpdates = new ArrayList<>();
            attributeUpdates.add(new AttributeUpdate(attributeAccumulate, AttributeUpdateType.Accumulate, 1));
            opFutures.add(context.container.updateAttributes(segmentName, attributeUpdates, TIMEOUT));
        }));
    }
    // Wait for the submittal of tasks to complete.
    submitFutures.forEach(this::await);
    // Now wait for all the appends to finish.
    Futures.allOf(opFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
    // 3. getSegmentInfo: verify final state of the attribute.
    SegmentProperties sp = context.container.getStreamSegmentInfo(segmentName, false, TIMEOUT).join();
    Assert.assertEquals("Unexpected length for segment " + segmentName, expectedLength.get(), sp.getLength());
    Assert.assertFalse("Unexpected value for isDeleted for segment " + segmentName, sp.isDeleted());
    Assert.assertFalse("Unexpected value for isSealed for segment " + segmentName, sp.isDeleted());
    // Verify all attribute values.
    Assert.assertEquals("Unexpected value for attribute " + attributeAccumulate + " for segment " + segmentName, expectedAttributeValue, (long) sp.getAttributes().getOrDefault(attributeAccumulate, SegmentMetadata.NULL_ATTRIBUTE_VALUE));
    checkActiveSegments(context.container, 1);
    // 4. Written data.
    waitForOperationsInReadIndex(context.container);
    byte[] actualData = new byte[(int) expectedLength.get()];
    int offset = 0;
    @Cleanup ReadResult readResult = context.container.read(segmentName, 0, actualData.length, TIMEOUT).join();
    while (readResult.hasNext()) {
        ReadResultEntry readEntry = readResult.next();
        ReadResultEntryContents readEntryContents = readEntry.getContent().join();
        AssertExtensions.assertLessThanOrEqual("Too much to read.", actualData.length, offset + actualData.length);
        StreamHelpers.readAll(readEntryContents.getData(), actualData, offset, actualData.length);
        offset += actualData.length;
    }
    Assert.assertEquals("Unexpected number of bytes read.", actualData.length, offset);
    Assert.assertTrue("Unexpected number of bytes read (multiple of appendLength).", actualData.length % appendLength == 0);
    boolean[] observedValues = new boolean[APPENDS_PER_SEGMENT + 1];
    for (int i = 0; i < actualData.length; i += appendLength) {
        byte value = actualData[i];
        Assert.assertFalse("Append with value " + value + " was written multiple times.", observedValues[value]);
        observedValues[value] = true;
        for (int j = 1; j < appendLength; j++) {
            Assert.assertEquals("Append was not written atomically at offset " + (i + j), value, actualData[i + j]);
        }
    }
    // Verify all the appends made it (we purposefully did not write 0, since that's the default fill value in an array).
    Assert.assertFalse("Not expecting 0 as a value.", observedValues[0]);
    for (int i = 1; i < observedValues.length; i++) {
        Assert.assertTrue("Append with value " + i + " was not written.", observedValues[i]);
    }
    context.container.stopAsync().awaitTerminated();
}
Also used : lombok.val(lombok.val) AttributeUpdate(io.pravega.segmentstore.contracts.AttributeUpdate) ReadResultEntryContents(io.pravega.segmentstore.contracts.ReadResultEntryContents) ArrayList(java.util.ArrayList) ReadResult(io.pravega.segmentstore.contracts.ReadResult) Cleanup(lombok.Cleanup) CompletableFuture(java.util.concurrent.CompletableFuture) AtomicLong(java.util.concurrent.atomic.AtomicLong) ReadResultEntry(io.pravega.segmentstore.contracts.ReadResultEntry) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) ExecutorService(java.util.concurrent.ExecutorService) Collection(java.util.Collection) SegmentProperties(io.pravega.segmentstore.contracts.SegmentProperties) UUID(java.util.UUID) Test(org.junit.Test)

Aggregations

SegmentProperties (io.pravega.segmentstore.contracts.SegmentProperties)43 Test (org.junit.Test)24 Cleanup (lombok.Cleanup)22 AtomicLong (java.util.concurrent.atomic.AtomicLong)19 StreamSegmentNotExistsException (io.pravega.segmentstore.contracts.StreamSegmentNotExistsException)18 lombok.val (lombok.val)18 HashMap (java.util.HashMap)17 CompletableFuture (java.util.concurrent.CompletableFuture)17 ArrayList (java.util.ArrayList)16 ByteArrayOutputStream (java.io.ByteArrayOutputStream)15 SegmentMetadata (io.pravega.segmentstore.server.SegmentMetadata)13 BadOffsetException (io.pravega.segmentstore.contracts.BadOffsetException)11 UUID (java.util.UUID)11 Exceptions (io.pravega.common.Exceptions)10 Duration (java.time.Duration)10 Map (java.util.Map)10 AtomicReference (java.util.concurrent.atomic.AtomicReference)10 StorageOperation (io.pravega.segmentstore.server.logs.operations.StorageOperation)9 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)9 Futures (io.pravega.common.concurrent.Futures)8