use of org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testInvalidEntryIds.
@Test
public void testInvalidEntryIds() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
try {
readHandle.read(-1, -1);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
try {
readHandle.read(0, 20);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testRandomRead.
@Test
public void testRandomRead() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
// Segment should closed because size in bytes full
OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i + 10, data);
offloadHandle2.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
assertEquals(offloadResult2.endLedger, 0);
assertEquals(offloadResult2.endEntry, 19);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
for (int i = 0; i <= 19; i++) {
Random seed = new Random(0);
int begin = seed.nextInt(20);
int end = seed.nextInt(20);
if (begin >= end) {
int temp = begin;
begin = end;
end = temp;
}
final LedgerEntries ledgerEntries = readHandle.readAsync(begin, end).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWrite.
@Test
public void testReadAndWrite() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 9).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle in project pulsar by yahoo.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWrite.
@Test
public void testReadAndWrite() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 9).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader.OffloadHandle in project pulsar by yahoo.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWriteAcrossSegment.
@Test
public void testReadAndWriteAcrossSegment() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
// Segment should closed because size in bytes full
OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i + 10, data);
offloadHandle2.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
assertEquals(offloadResult2.endLedger, 0);
assertEquals(offloadResult2.endEntry, 19);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 19).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
Aggregations