use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.
the class PersistentStickyKeyDispatcherMultipleConsumersTest method testMessageRedelivery.
@Test(timeOut = 30000)
public void testMessageRedelivery() throws Exception {
final Queue<Position> actualEntriesToConsumer1 = new ConcurrentLinkedQueue<>();
final Queue<Position> actualEntriesToConsumer2 = new ConcurrentLinkedQueue<>();
final Queue<Position> expectedEntriesToConsumer1 = new ConcurrentLinkedQueue<>();
expectedEntriesToConsumer1.add(PositionImpl.get(1, 1));
final Queue<Position> expectedEntriesToConsumer2 = new ConcurrentLinkedQueue<>();
expectedEntriesToConsumer2.add(PositionImpl.get(1, 2));
expectedEntriesToConsumer2.add(PositionImpl.get(1, 3));
final AtomicInteger remainingEntriesNum = new AtomicInteger(expectedEntriesToConsumer1.size() + expectedEntriesToConsumer2.size());
// Messages with key1 are routed to consumer1 and messages with key2 are routed to consumer2
final List<Entry> allEntries = new ArrayList<>();
allEntries.add(EntryImpl.create(1, 1, createMessage("message1", 1, "key2")));
allEntries.add(EntryImpl.create(1, 2, createMessage("message2", 2, "key1")));
allEntries.add(EntryImpl.create(1, 3, createMessage("message3", 3, "key1")));
allEntries.forEach(entry -> ((EntryImpl) entry).retain());
final List<Entry> redeliverEntries = new ArrayList<>();
// message1
redeliverEntries.add(allEntries.get(0));
final List<Entry> readEntries = new ArrayList<>();
// message3
readEntries.add(allEntries.get(2));
final Consumer consumer1 = mock(Consumer.class);
doReturn("consumer1").when(consumer1).consumerName();
// Change availablePermits of consumer1 to 0 and then back to normal
when(consumer1.getAvailablePermits()).thenReturn(0).thenReturn(10);
doReturn(true).when(consumer1).isWritable();
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked") List<Entry> entries = (List<Entry>) invocationOnMock.getArgument(0);
for (Entry entry : entries) {
remainingEntriesNum.decrementAndGet();
actualEntriesToConsumer1.add(entry.getPosition());
}
return channelMock;
}).when(consumer1).sendMessages(anyList(), any(EntryBatchSizes.class), any(EntryBatchIndexesAcks.class), anyInt(), anyLong(), anyLong(), any(RedeliveryTracker.class));
final Consumer consumer2 = mock(Consumer.class);
doReturn("consumer2").when(consumer2).consumerName();
when(consumer2.getAvailablePermits()).thenReturn(10);
doReturn(true).when(consumer2).isWritable();
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked") List<Entry> entries = (List<Entry>) invocationOnMock.getArgument(0);
for (Entry entry : entries) {
remainingEntriesNum.decrementAndGet();
actualEntriesToConsumer2.add(entry.getPosition());
}
return channelMock;
}).when(consumer2).sendMessages(anyList(), any(EntryBatchSizes.class), any(EntryBatchIndexesAcks.class), anyInt(), anyLong(), anyLong(), any(RedeliveryTracker.class));
persistentDispatcher.addConsumer(consumer1);
persistentDispatcher.addConsumer(consumer2);
final Field totalAvailablePermitsField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("totalAvailablePermits");
totalAvailablePermitsField.setAccessible(true);
totalAvailablePermitsField.set(persistentDispatcher, 1000);
final Field redeliveryMessagesField = PersistentDispatcherMultipleConsumers.class.getDeclaredField("redeliveryMessages");
redeliveryMessagesField.setAccessible(true);
MessageRedeliveryController redeliveryMessages = (MessageRedeliveryController) redeliveryMessagesField.get(persistentDispatcher);
redeliveryMessages.add(allEntries.get(0).getLedgerId(), allEntries.get(0).getEntryId(), // message1
getStickyKeyHash(allEntries.get(0)));
redeliveryMessages.add(allEntries.get(1).getLedgerId(), allEntries.get(1).getEntryId(), // message2
getStickyKeyHash(allEntries.get(1)));
// Mock Cursor#asyncReplayEntries
doAnswer(invocationOnMock -> {
@SuppressWarnings("unchecked") Set<Position> positions = (Set<Position>) invocationOnMock.getArgument(0);
List<Entry> entries = allEntries.stream().filter(entry -> positions.contains(entry.getPosition())).collect(Collectors.toList());
if (!entries.isEmpty()) {
((PersistentStickyKeyDispatcherMultipleConsumers) invocationOnMock.getArgument(1)).readEntriesComplete(entries, PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay);
}
return Collections.emptySet();
}).when(cursorMock).asyncReplayEntries(anySet(), any(PersistentStickyKeyDispatcherMultipleConsumers.class), eq(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay), anyBoolean());
// Mock Cursor#asyncReadEntriesOrWait
doAnswer(invocationOnMock -> {
((PersistentStickyKeyDispatcherMultipleConsumers) invocationOnMock.getArgument(2)).readEntriesComplete(readEntries, PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Normal);
return null;
}).when(cursorMock).asyncReadEntriesOrWait(anyInt(), anyLong(), any(PersistentStickyKeyDispatcherMultipleConsumers.class), eq(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Normal), any());
// (1) Run sendMessagesToConsumers
// (2) Attempts to send message1 to consumer1 but skipped because availablePermits is 0
// (3) Change availablePermits of consumer1 to 10
// (4) Run readMoreEntries internally
// (5) Run sendMessagesToConsumers internally
// (6) Attempts to send message3 to consumer2 but skipped because redeliveryMessages contains message2
persistentDispatcher.sendMessagesToConsumers(PersistentStickyKeyDispatcherMultipleConsumers.ReadType.Replay, redeliverEntries);
while (remainingEntriesNum.get() > 0) {
// (7) Run readMoreEntries and resend message1 to consumer1 and message2-3 to consumer2
persistentDispatcher.readMoreEntries();
}
assertEquals(actualEntriesToConsumer1, expectedEntriesToConsumer1);
assertEquals(actualEntriesToConsumer2, expectedEntriesToConsumer2);
allEntries.forEach(entry -> entry.release());
}
use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testInvalidEntryIds.
@Test
public void testInvalidEntryIds() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
try {
readHandle.read(-1, -1);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
try {
readHandle.read(0, 20);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
}
use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testRandomRead.
@Test
public void testRandomRead() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
// Segment should closed because size in bytes full
OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i + 10, data);
offloadHandle2.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
assertEquals(offloadResult2.endLedger, 0);
assertEquals(offloadResult2.endEntry, 19);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
for (int i = 0; i <= 19; i++) {
Random seed = new Random(0);
int begin = seed.nextInt(20);
int end = seed.nextInt(20);
if (begin >= end) {
int temp = begin;
begin = end;
end = temp;
}
final LedgerEntries ledgerEntries = readHandle.readAsync(begin, end).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
}
use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWrite.
@Test
public void testReadAndWrite() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 9).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
use of org.apache.bookkeeper.mledger.impl.EntryImpl in project pulsar by apache.
the class BufferedOffloadStreamTest method testWithPadding.
private void testWithPadding(int paddingLen) throws Exception {
int blockSize = StreamingDataBlockHeaderImpl.getDataStartOffset();
List<Entry> entryBuffer = new LinkedList<>();
final UUID uuid = UUID.randomUUID();
OffloadSegmentInfoImpl segmentInfo = new OffloadSegmentInfoImpl(uuid, 0, 0, "", new HashMap<>());
final int entryCount = 10;
List<Entry> entries = new ArrayList<>();
for (int i = 0; i < entryCount; i++) {
final byte[] bytes = new byte[random.nextInt(10)];
final EntryImpl entry = EntryImpl.create(0, i, bytes);
entries.add(entry);
entry.retain();
entryBuffer.add(entry);
blockSize += BufferedOffloadStream.ENTRY_HEADER_SIZE + entry.getLength();
}
segmentInfo.closeSegment(0, 9);
blockSize += paddingLen;
final BufferedOffloadStream inputStream = new BufferedOffloadStream(blockSize, entryBuffer, segmentInfo.beginLedgerId, segmentInfo.beginEntryId);
Assert.assertEquals(inputStream.getLedgerId(), 0);
Assert.assertEquals(inputStream.getBeginEntryId(), 0);
Assert.assertEquals(inputStream.getBlockSize(), blockSize);
byte[] headerB = new byte[DataBlockHeaderImpl.getDataStartOffset()];
ByteStreams.readFully(inputStream, headerB);
StreamingDataBlockHeaderImpl headerRead = StreamingDataBlockHeaderImpl.fromStream(new ByteArrayInputStream(headerB));
assertEquals(headerRead.getBlockLength(), blockSize);
assertEquals(headerRead.getFirstEntryId(), 0);
int left = blockSize - DataBlockHeaderImpl.getDataStartOffset();
for (int i = 0; i < entryCount; i++) {
byte[] lengthBuf = new byte[4];
byte[] entryIdBuf = new byte[8];
byte[] content = new byte[entries.get(i).getLength()];
left -= lengthBuf.length + entryIdBuf.length + content.length;
inputStream.read(lengthBuf);
inputStream.read(entryIdBuf);
inputStream.read(content);
assertEquals(entries.get(i).getLength(), Ints.fromByteArray(lengthBuf));
assertEquals(i, Longs.fromByteArray(entryIdBuf));
assertEquals(entries.get(i).getData(), content);
}
Assert.assertEquals(left, paddingLen);
byte[] padding = new byte[left];
inputStream.read(padding);
ByteBuf paddingBuf = Unpooled.wrappedBuffer(padding);
for (int i = 0; i < paddingBuf.capacity() / 4; i++) {
assertEquals(Integer.toHexString(paddingBuf.readInt()), Integer.toHexString(0xFEDCDEAD));
}
// 4. reach end.
assertEquals(inputStream.read(), -1);
assertEquals(inputStream.read(), -1);
inputStream.close();
}
Aggregations