Search in sources :

Example 1 with EntryImpl

use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.

the class PersistentStickyKeyDispatcherMultipleConsumersTest method testMessageRedelivery.

@Test(timeOut = 30000)
public void testMessageRedelivery() throws Exception {
    final Queue<Position> actualEntriesToConsumer1 = new ConcurrentLinkedQueue<>();
    final Queue<Position> actualEntriesToConsumer2 = new ConcurrentLinkedQueue<>();
    final Queue<Position> expectedEntriesToConsumer1 = new ConcurrentLinkedQueue<>();
    expectedEntriesToConsumer1.add(PositionImpl.get(1, 1));
    final Queue<Position> expectedEntriesToConsumer2 = new ConcurrentLinkedQueue<>();
    expectedEntriesToConsumer2.add(PositionImpl.get(1, 2));
    expectedEntriesToConsumer2.add(PositionImpl.get(1, 3));
    final AtomicInteger remainingEntriesNum = new AtomicInteger(expectedEntriesToConsumer1.size() + expectedEntriesToConsumer2.size());
    // Messages with key1 are routed to consumer1 and messages with key2 are routed to consumer2
    final List<Entry> allEntries = new ArrayList<>();
    allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key2")));
    allEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1")));
    allEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key1")));
    allEntries.forEach(entry -> ((EntryImpl) entry).retain());
    final List<Entry> redeliverEntries = new ArrayList<>();
    // message1
    redeliverEntries.add(allEntries.get(0));
    final List<Entry> readEntries = new ArrayList<>();
    // message3
    readEntries.add(allEntries.get(2));
    final Consumer consumer1 = mock(Consumer.class);
    doReturn("consumer1").when(consumer1).consumerName();
    // Change availablePermits of consumer1 to 0 and then back to normal
    when(consumer1.getAvailablePermits()).thenReturn(0).thenReturn(10);
    doReturn(true).when(consumer1).isWritable();
    doAnswer(invocationOnMock -> {
        @SuppressWarnings("unchecked") List<Entry> entries = (List<Entry>) invocationOnMock.getArgument(0);
        for (Entry entry : entries) {
            remainingEntriesNum.decrementAndGet();
            actualEntriesToConsumer1.add(entry.getPosition());
        }
        return channelMock;
    }).when(consumer1).sendMessages(anyList(), any(EntryBatchSizes.class), any(EntryBatchIndexesAcks.class), anyInt(), anyLong(), anyLong(), any(RedeliveryTracker.class));
    final Consumer consumer2 = mock(Consumer.class);
    doReturn("consumer2").when(consumer2).consumerName();
    when(consumer2.getAvailablePermits()).thenReturn(10);
    doReturn(true).when(consumer2).isWritable();
    doAnswer(invocationOnMock -> {
        @SuppressWarnings("unchecked") List<Entry> entries = (List<Entry>) invocationOnMock.getArgument(0);
        for (Entry entry : entries) {
            remainingEntriesNum.decrementAndGet();
            actualEntriesToConsumer2.add(entry.getPosition());
        }
        return channelMock;
    }).when(consumer2).sendMessages(anyList(), any(EntryBatchSizes.class), any(EntryBatchIndexesAcks.class), anyInt(), anyLong(), anyLong(), any(RedeliveryTracker.class));
    persistentDispatcher.addConsumer(consumer1);
    persistentDispatcher.addConsumer(consumer2);
    final Field totalAvailablePermitsField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("totalAvailablePermits");
    totalAvailablePermitsField.setAccessible(true);
    totalAvailablePermitsField.set(persistentDispatcher, 1000);
    final Field redeliveryMessagesField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("redeliveryMessages");
    redeliveryMessagesField.setAccessible(true);
    MessageRedeliveryController redeliveryMessages = (MessageRedeliveryController) redeliveryMessagesField.get(persistentDispatcher);
    redeliveryMessages.add(allEntries.get(0).getLedgerId(), allEntries.get(0).getEntryId(), // message1
    getStickyKeyHash(allEntries.get(0)));
    redeliveryMessages.add(allEntries.get(1).getLedgerId(), allEntries.get(1).getEntryId(), // message2
    getStickyKeyHash(allEntries.get(1)));
    // Mock Cursor#asyncReplayEntries
    doAnswer(invocationOnMock -> {
        @SuppressWarnings("unchecked") Set<Position> positions = (Set<Position>) invocationOnMock.getArgument(0);
        List<Entry> entries = allEntries.stream().filter(entry -> positions.contains(entry.getPosition())).collect(Collectors.toList());
        if (!entries.isEmpty()) {
            ((PersistentStickyKeyDispatcherMultipleConsumers) invocationOnMock.getArgument(1)).readEntriesComplete(entries, PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay);
        }
        return Collections.emptySet();
    }).when(cursorMock).asyncReplayEntries(anySet(), any(PersistentStickyKeyDispatcherMultipleConsumers.class), eq(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay), anyBoolean());
    // Mock Cursor#asyncReadEntriesOrWait
    doAnswer(invocationOnMock -> {
        ((PersistentStickyKeyDispatcherMultipleConsumers) invocationOnMock.getArgument(2)).readEntriesComplete(readEntries, PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Normal);
        return null;
    }).when(cursorMock).asyncReadEntriesOrWait(anyInt(), anyLong(), any(PersistentStickyKeyDispatcherMultipleConsumers.class), eq(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Normal), any());
    // (1) Run sendMessagesToConsumers
    // (2) Attempts to send message1 to consumer1 but skipped because availablePermits is 0
    // (3) Change availablePermits of consumer1 to 10
    // (4) Run readMoreEntries internally
    // (5) Run sendMessagesToConsumers internally
    // (6) Attempts to send message3 to consumer2 but skipped because redeliveryMessages contains message2
    persistentDispatcher.sendMessagesToConsumers(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay, redeliverEntries);
    while (remainingEntriesNum.get() > 0) {
        // (7) Run readMoreEntries and resend message1 to consumer1 and message2-3 to consumer2
        persistentDispatcher.readMoreEntries();
    }
    assertEquals(actualEntriesToConsumer1, expectedEntriesToConsumer1);
    assertEquals(actualEntriesToConsumer2, expectedEntriesToConsumer2);
    allEntries.forEach(entry -> entry.release());
}
Also used : Test(org.testng.annotations.Test) Commands.serializeMetadataAndPayload(org.apache.pulsar.common.protocol.Commands.serializeMetadataAndPayload) Mockito.argThat(org.mockito.Mockito.argThat) Unpooled(io.netty.buffer.Unpooled) ChannelPromise(io.netty.channel.ChannelPromise) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Mockito.anyBoolean(org.mockito.Mockito.anyBoolean) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Mockito.doReturn(org.mockito.Mockito.doReturn) PositionImpl(org.apache.bookkeeper.mledger.impl.PositionImpl) BeforeMethod(org.testng.annotations.BeforeMethod) Set(java.util.Set) Position(org.apache.bookkeeper.mledger.Position) BrokerService(org.apache.pulsar.broker.service.BrokerService) Collectors(java.util.stream.Collectors) HierarchyTopicPolicies(org.apache.pulsar.common.policies.data.HierarchyTopicPolicies) ManagedCursorImpl(org.apache.bookkeeper.mledger.impl.ManagedCursorImpl) List(java.util.List) MockedStatic(org.mockito.MockedStatic) MessageMetadata(org.apache.pulsar.common.api.proto.MessageMetadata) EntryBatchSizes(org.apache.pulsar.broker.service.EntryBatchSizes) Mockito.anyList(org.mockito.Mockito.anyList) Optional(java.util.Optional) Queue(java.util.Queue) Mockito.any(org.mockito.Mockito.any) StickyKeyConsumerSelector(org.apache.pulsar.broker.service.StickyKeyConsumerSelector) Mockito.eq(org.mockito.Mockito.eq) Mockito.mock(org.mockito.Mockito.mock) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Mockito.mockStatic(org.mockito.Mockito.mockStatic) Entry(org.apache.bookkeeper.mledger.Entry) EntryImpl(org.apache.bookkeeper.mledger.impl.EntryImpl) Assert.assertEquals(org.testng.Assert.assertEquals) Consumer(org.apache.pulsar.broker.service.Consumer) KeySharedMeta(org.apache.pulsar.common.api.proto.KeySharedMeta) ArrayList(java.util.ArrayList) Commands(org.apache.pulsar.common.protocol.Commands) ArgumentCaptor(org.mockito.ArgumentCaptor) ByteBuf(io.netty.buffer.ByteBuf) Assert(org.testng.Assert) Mockito.anyLong(org.mockito.Mockito.anyLong) Mockito.anyString(org.mockito.Mockito.anyString) EventLoopGroup(io.netty.channel.EventLoopGroup) UTF_8(java.nio.charset.StandardCharsets.UTF_8) ServiceConfiguration(org.apache.pulsar.broker.ServiceConfiguration) EntryBatchIndexesAcks(org.apache.pulsar.broker.service.EntryBatchIndexesAcks) Assert.fail(org.testng.Assert.fail) Mockito.times(org.mockito.Mockito.times) Mockito.when(org.mockito.Mockito.when) Field(java.lang.reflect.Field) PulsarService(org.apache.pulsar.broker.PulsarService) Mockito.verify(org.mockito.Mockito.verify) RedeliveryTracker(org.apache.pulsar.broker.service.RedeliveryTracker) KeySharedMode(org.apache.pulsar.common.api.proto.KeySharedMode) Mockito.anySet(org.mockito.Mockito.anySet) Mockito.anyInt(org.mockito.Mockito.anyInt) Collections(java.util.Collections) Markers(org.apache.pulsar.common.protocol.Markers) Set(java.util.Set) Mockito.anySet(org.mockito.Mockito.anySet) Position(org.apache.bookkeeper.mledger.Position) RedeliveryTracker(org.apache.pulsar.broker.service.RedeliveryTracker) EntryBatchSizes(org.apache.pulsar.broker.service.EntryBatchSizes) ArrayList(java.util.ArrayList) Field(java.lang.reflect.Field) Entry(org.apache.bookkeeper.mledger.Entry) Consumer(org.apache.pulsar.broker.service.Consumer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) List(java.util.List) Mockito.anyList(org.mockito.Mockito.anyList) ArrayList(java.util.ArrayList) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) EntryBatchIndexesAcks(org.apache.pulsar.broker.service.EntryBatchIndexesAcks) Test(org.testng.annotations.Test)

Example 2 with EntryImpl

use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.

the class BlobStoreManagedLedgerOffloaderStreamingTest method testInvalidEntryIds.

@Test
public void testInvalidEntryIds() throws Exception {
    LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
            put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
        }
    });
    ManagedLedger ml = createMockManagedLedger();
    UUID uuid = UUID.randomUUID();
    long beginLedger = 0;
    long beginEntry = 0;
    Map<String, String> driverMeta = new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
        }
    };
    OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
    // Segment should closed because size in bytes full
    final LinkedList<Entry> entries = new LinkedList<>();
    for (int i = 0; i < 10; i++) {
        final byte[] data = new byte[100];
        random.nextBytes(data);
        final EntryImpl entry = EntryImpl.create(0, i, data);
        offloadHandle.offerEntry(entry);
        entries.add(entry);
    }
    final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
    assertEquals(offloadResult.endLedger, 0);
    assertEquals(offloadResult.endEntry, 9);
    final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
    contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
    final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
    try {
        readHandle.read(-1, -1);
        Assert.fail("Shouldn't be able to read anything");
    } catch (Exception e) {
    }
    try {
        readHandle.read(0, 20);
        Assert.fail("Shouldn't be able to read anything");
    } catch (Exception e) {
    }
}
Also used : HashMap(java.util.HashMap) ManagedLedger(org.apache.bookkeeper.mledger.ManagedLedger) EntryImpl(org.apache.bookkeeper.mledger.impl.EntryImpl) LinkedList(java.util.LinkedList) OffloadContext(org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext) IOException(java.io.IOException) ReadHandle(org.apache.bookkeeper.client.api.ReadHandle) Entry(org.apache.bookkeeper.mledger.Entry) LedgerEntry(org.apache.bookkeeper.client.api.LedgerEntry) LedgerOffloader(org.apache.bookkeeper.mledger.LedgerOffloader) OffloadHandle(org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle) UUID(java.util.UUID) Test(org.testng.annotations.Test)

Example 3 with EntryImpl

use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.

the class BlobStoreManagedLedgerOffloaderStreamingTest method testRandomRead.

@Test
public void testRandomRead() throws Exception {
    LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
            put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
        }
    });
    LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
            put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
        }
    });
    ManagedLedger ml = createMockManagedLedger();
    UUID uuid = UUID.randomUUID();
    UUID uuid2 = UUID.randomUUID();
    long beginLedger = 0;
    long beginEntry = 0;
    Map<String, String> driverMeta = new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
        }
    };
    OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
    // Segment should closed because size in bytes full
    final LinkedList<Entry> entries = new LinkedList<>();
    for (int i = 0; i < 10; i++) {
        final byte[] data = new byte[100];
        random.nextBytes(data);
        final EntryImpl entry = EntryImpl.create(0, i, data);
        offloadHandle.offerEntry(entry);
        entries.add(entry);
    }
    final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
    assertEquals(offloadResult.endLedger, 0);
    assertEquals(offloadResult.endEntry, 9);
    // Segment should closed because size in bytes full
    OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
    for (int i = 0; i < 10; i++) {
        final byte[] data = new byte[100];
        random.nextBytes(data);
        final EntryImpl entry = EntryImpl.create(0, i + 10, data);
        offloadHandle2.offerEntry(entry);
        entries.add(entry);
    }
    final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
    assertEquals(offloadResult2.endLedger, 0);
    assertEquals(offloadResult2.endEntry, 19);
    final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
    contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
    final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
    for (int i = 0; i <= 19; i++) {
        Random seed = new Random(0);
        int begin = seed.nextInt(20);
        int end = seed.nextInt(20);
        if (begin >= end) {
            int temp = begin;
            begin = end;
            end = temp;
        }
        final LedgerEntries ledgerEntries = readHandle.readAsync(begin, end).get();
        for (LedgerEntry ledgerEntry : ledgerEntries) {
            final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
            final byte[] storedData = storedEntry.getData();
            final byte[] entryBytes = ledgerEntry.getEntryBytes();
            assertEquals(storedData, entryBytes);
        }
    }
}
Also used : HashMap(java.util.HashMap) ManagedLedger(org.apache.bookkeeper.mledger.ManagedLedger) EntryImpl(org.apache.bookkeeper.mledger.impl.EntryImpl) LinkedList(java.util.LinkedList) OffloadContext(org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext) ReadHandle(org.apache.bookkeeper.client.api.ReadHandle) Entry(org.apache.bookkeeper.mledger.Entry) LedgerEntry(org.apache.bookkeeper.client.api.LedgerEntry) Random(java.util.Random) LedgerOffloader(org.apache.bookkeeper.mledger.LedgerOffloader) LedgerEntries(org.apache.bookkeeper.client.api.LedgerEntries) LedgerEntry(org.apache.bookkeeper.client.api.LedgerEntry) OffloadHandle(org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle) UUID(java.util.UUID) Test(org.testng.annotations.Test)

Example 4 with EntryImpl

use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.

the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWrite.

@Test
public void testReadAndWrite() throws Exception {
    LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
            put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
            put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
        }
    });
    ManagedLedger ml = createMockManagedLedger();
    UUID uuid = UUID.randomUUID();
    long beginLedger = 0;
    long beginEntry = 0;
    Map<String, String> driverMeta = new HashMap<String, String>() {

        {
            put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
        }
    };
    OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
    // Segment should closed because size in bytes full
    final LinkedList<Entry> entries = new LinkedList<>();
    for (int i = 0; i < 10; i++) {
        final byte[] data = new byte[100];
        random.nextBytes(data);
        final EntryImpl entry = EntryImpl.create(0, i, data);
        offloadHandle.offerEntry(entry);
        entries.add(entry);
    }
    final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
    assertEquals(offloadResult.endLedger, 0);
    assertEquals(offloadResult.endEntry, 9);
    final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
    contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
    final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
    final LedgerEntries ledgerEntries = readHandle.readAsync(0, 9).get();
    for (LedgerEntry ledgerEntry : ledgerEntries) {
        final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
        final byte[] storedData = storedEntry.getData();
        final byte[] entryBytes = ledgerEntry.getEntryBytes();
        assertEquals(storedData, entryBytes);
    }
}
Also used : HashMap(java.util.HashMap) ManagedLedger(org.apache.bookkeeper.mledger.ManagedLedger) EntryImpl(org.apache.bookkeeper.mledger.impl.EntryImpl) LinkedList(java.util.LinkedList) OffloadContext(org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext) ReadHandle(org.apache.bookkeeper.client.api.ReadHandle) Entry(org.apache.bookkeeper.mledger.Entry) LedgerEntry(org.apache.bookkeeper.client.api.LedgerEntry) LedgerOffloader(org.apache.bookkeeper.mledger.LedgerOffloader) LedgerEntries(org.apache.bookkeeper.client.api.LedgerEntries) LedgerEntry(org.apache.bookkeeper.client.api.LedgerEntry) OffloadHandle(org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle) UUID(java.util.UUID) Test(org.testng.annotations.Test)

Example 5 with EntryImpl

use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.

the class BufferedOffloadStreamTest method testWithPadding.

private void testWithPadding(int paddingLen) throws Exception {
    int blockSize = StreamingDataBlockHeaderImpl.getDataStartOffset();
    List<Entry> entryBuffer = new LinkedList<>();
    final UUID uuid = UUID.randomUUID();
    OffloadSegmentInfoImpl segmentInfo = new OffloadSegmentInfoImpl(uuid, 0, 0, "", new HashMap<>());
    final int entryCount = 10;
    List<Entry> entries = new ArrayList<>();
    for (int i = 0; i < entryCount; i++) {
        final byte[] bytes = new byte[random.nextInt(10)];
        final EntryImpl entry = EntryImpl.create(0, i, bytes);
        entries.add(entry);
        entry.retain();
        entryBuffer.add(entry);
        blockSize += BufferedOffloadStream.ENTRY_HEADER_SIZE + entry.getLength();
    }
    segmentInfo.closeSegment(0, 9);
    blockSize += paddingLen;
    final BufferedOffloadStream inputStream = new BufferedOffloadStream(blockSize, entryBuffer, segmentInfo.beginLedgerId, segmentInfo.beginEntryId);
    Assert.assertEquals(inputStream.getLedgerId(), 0);
    Assert.assertEquals(inputStream.getBeginEntryId(), 0);
    Assert.assertEquals(inputStream.getBlockSize(), blockSize);
    byte[] headerB = new byte[DataBlockHeaderImpl.getDataStartOffset()];
    ByteStreams.readFully(inputStream, headerB);
    StreamingDataBlockHeaderImpl headerRead = StreamingDataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB));
    assertEquals(headerRead.getBlockLength(), blockSize);
    assertEquals(headerRead.getFirstEntryId(), 0);
    int left = blockSize - DataBlockHeaderImpl.getDataStartOffset();
    for (int i = 0; i < entryCount; i++) {
        byte[] lengthBuf = new byte[4];
        byte[] entryIdBuf = new byte[8];
        byte[] content = new byte[entries.get(i).getLength()];
        left -= lengthBuf.length + entryIdBuf.length + content.length;
        inputStream.read(lengthBuf);
        inputStream.read(entryIdBuf);
        inputStream.read(content);
        assertEquals(entries.get(i).getLength(), Ints.fromByteArray(lengthBuf));
        assertEquals(i, Longs.fromByteArray(entryIdBuf));
        assertEquals(entries.get(i).getData(), content);
    }
    Assert.assertEquals(left, paddingLen);
    byte[] padding = new byte[left];
    inputStream.read(padding);
    ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding);
    for (int i = 0; i < paddingBuf.capacity() / 4; i++) {
        assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD));
    }
    // 4. reach end.
    assertEquals(inputStream.read(), -1);
    assertEquals(inputStream.read(), -1);
    inputStream.close();
}
Also used : ArrayList(java.util.ArrayList) EntryImpl(org.apache.bookkeeper.mledger.impl.EntryImpl) OffloadSegmentInfoImpl(org.apache.bookkeeper.mledger.impl.OffloadSegmentInfoImpl) ByteBuf(io.netty.buffer.ByteBuf) LinkedList(java.util.LinkedList) Entry(org.apache.bookkeeper.mledger.Entry) ByteArrayInputStream(java.io.ByteArrayInputStream) UUID(java.util.UUID)

Aggregations

EntryImpl (org.apache.bookkeeper.mledger.impl.EntryImpl)25 Entry (org.apache.bookkeeper.mledger.Entry)24 Test (org.testng.annotations.Test)22 LinkedList (java.util.LinkedList)21 UUID (java.util.UUID)21 HashMap (java.util.HashMap)16 LedgerEntry (org.apache.bookkeeper.client.api.LedgerEntry)15 ReadHandle (org.apache.bookkeeper.client.api.ReadHandle)15 LedgerOffloader (org.apache.bookkeeper.mledger.LedgerOffloader)15 OffloadHandle (org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle)15 ManagedLedger (org.apache.bookkeeper.mledger.ManagedLedger)15 OffloadContext (org.apache.bookkeeper.mledger.proto.MLDataFormats.OffloadContext)15 LedgerEntries (org.apache.bookkeeper.client.api.LedgerEntries)12 ByteBuf (io.netty.buffer.ByteBuf)10 ArrayList (java.util.ArrayList)9 ByteArrayInputStream (java.io.ByteArrayInputStream)6 OffloadSegmentInfoImpl (org.apache.bookkeeper.mledger.impl.OffloadSegmentInfoImpl)6 Unpooled (io.netty.buffer.Unpooled)3 ChannelPromise (io.netty.channel.ChannelPromise)3 EventLoopGroup (io.netty.channel.EventLoopGroup)3