use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by yahoo.
the class AdminApiOffloadTest method testOffload.
private void testOffload(boolean isPartitioned) throws Exception {
String topicName = testTopic + UUID.randomUUID().toString();
int partitionNum = 3;
// 1 create topic
if (isPartitioned) {
admin.topics().createPartitionedTopic(topicName, partitionNum);
} else {
admin.topics().createNonPartitionedTopic(topicName);
}
pulsarClient.newProducer().topic(topicName).enableBatching(false).create().close();
// 2 namespace level policy should use NullLedgerOffloader by default
if (isPartitioned) {
for (int i = 0; i < partitionNum; i++) {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(TopicName.get(topicName).getPartition(i).toString()).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "NullLedgerOffloader");
}
} else {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "NullLedgerOffloader");
}
// 3 construct a topic level offloadPolicies
OffloadPoliciesImpl offloadPolicies = new OffloadPoliciesImpl();
offloadPolicies.setOffloadersDirectory(".");
offloadPolicies.setManagedLedgerOffloadDriver("mock");
offloadPolicies.setManagedLedgerOffloadPrefetchRounds(10);
offloadPolicies.setManagedLedgerOffloadThresholdInBytes(1024L);
LedgerOffloader topicOffloader = mock(LedgerOffloader.class);
when(topicOffloader.getOffloadDriverName()).thenReturn("mock");
doReturn(topicOffloader).when(pulsar).createManagedLedgerOffloader(any());
// 4 set topic level offload policies
admin.topics().setOffloadPolicies(topicName, offloadPolicies);
Awaitility.await().untilAsserted(() -> assertNotNull(admin.topics().getOffloadPolicies(topicName)));
// 5 name of offload should become "mock"
if (isPartitioned) {
for (int i = 0; i < partitionNum; i++) {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(TopicName.get(topicName).getPartition(i).toString(), false).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "mock");
}
} else {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "mock");
}
// 6 remove topic level offload policy, offloader should become namespaceOffloader
LedgerOffloader namespaceOffloader = mock(LedgerOffloader.class);
when(namespaceOffloader.getOffloadDriverName()).thenReturn("s3");
Map<NamespaceName, LedgerOffloader> map = new HashMap<>();
map.put(TopicName.get(topicName).getNamespaceObject(), namespaceOffloader);
doReturn(map).when(pulsar).getLedgerOffloaderMap();
doReturn(namespaceOffloader).when(pulsar).getManagedLedgerOffloader(TopicName.get(topicName).getNamespaceObject(), null);
admin.topics().removeOffloadPolicies(topicName);
Awaitility.await().untilAsserted(() -> assertNull(admin.topics().getOffloadPolicies(topicName)));
// topic level offloader should be closed
if (isPartitioned) {
verify(topicOffloader, times(partitionNum)).close();
} else {
verify(topicOffloader).close();
}
if (isPartitioned) {
for (int i = 0; i < partitionNum; i++) {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopicIfExists(TopicName.get(topicName).getPartition(i).toString()).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "s3");
}
} else {
PersistentTopic topic = (PersistentTopic) pulsar.getBrokerService().getTopic(topicName, false).get().get();
assertNotNull(topic.getManagedLedger().getConfig().getLedgerOffloader());
assertEquals(topic.getManagedLedger().getConfig().getLedgerOffloader().getOffloadDriverName(), "s3");
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by yahoo.
the class AdminApiOffloadTest method testOffload.
private void testOffload(String topicName, String mlName) throws Exception {
LedgerOffloader offloader = mock(LedgerOffloader.class);
when(offloader.getOffloadDriverName()).thenReturn("mock");
doReturn(offloader).when(pulsar).getManagedLedgerOffloader(any(), any());
CompletableFuture<Void> promise = new CompletableFuture<>();
doReturn(promise).when(offloader).offload(any(), any(), any());
MessageId currentId = MessageId.latest;
try (Producer<byte[]> p = pulsarClient.newProducer().topic(topicName).enableBatching(false).create()) {
for (int i = 0; i < 15; i++) {
currentId = p.send("Foobar".getBytes());
}
}
ManagedLedgerInfo info = pulsar.getManagedLedgerFactory().getManagedLedgerInfo(mlName);
assertEquals(info.ledgers.size(), 2);
assertEquals(admin.topics().offloadStatus(topicName).getStatus(), LongRunningProcessStatus.Status.NOT_RUN);
admin.topics().triggerOffload(topicName, currentId);
assertEquals(admin.topics().offloadStatus(topicName).getStatus(), LongRunningProcessStatus.Status.RUNNING);
try {
admin.topics().triggerOffload(topicName, currentId);
Assert.fail("Should have failed");
} catch (ConflictException e) {
// expected
}
// fail first time
promise.completeExceptionally(new Exception("Some random failure"));
assertEquals(admin.topics().offloadStatus(topicName).getStatus(), LongRunningProcessStatus.Status.ERROR);
Assert.assertTrue(admin.topics().offloadStatus(topicName).getLastError().contains("Some random failure"));
// Try again
doReturn(CompletableFuture.completedFuture(null)).when(offloader).offload(any(), any(), any());
admin.topics().triggerOffload(topicName, currentId);
Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().offloadStatus(topicName).getStatus(), LongRunningProcessStatus.Status.SUCCESS));
MessageId firstUnoffloaded = admin.topics().offloadStatus(topicName).getFirstUnoffloadedMessage();
assertTrue(firstUnoffloaded instanceof MessageIdImpl);
MessageIdImpl firstUnoffloadedMessage = (MessageIdImpl) firstUnoffloaded;
// First unoffloaded is the first entry of current ledger
assertEquals(firstUnoffloadedMessage.getLedgerId(), info.ledgers.get(1).ledgerId);
assertEquals(firstUnoffloadedMessage.getEntryId(), 0);
verify(offloader, times(2)).offload(any(), any(), any());
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by yahoo.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWrite.
@Test
public void testReadAndWrite() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 9).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by yahoo.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testReadAndWriteAcrossSegment.
@Test
public void testReadAndWriteAcrossSegment() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
// Segment should closed because size in bytes full
OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i + 10, data);
offloadHandle2.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
assertEquals(offloadResult2.endLedger, 0);
assertEquals(offloadResult2.endEntry, 19);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
final LedgerEntries ledgerEntries = readHandle.readAsync(0, 19).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by yahoo.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testHappyCase.
@Test
public void testHappyCase() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
log.error("try begin offload");
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, new HashMap<>()).get();
// Segment should closed because size in bytes full
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final OffloadHandle.OfferEntryResult offerEntryResult = offloadHandle.offerEntry(EntryImpl.create(0, i, data));
log.info("offer result: {}", offerEntryResult);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
log.info("Offload reasult: {}", offloadResult);
}
Aggregations