use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by apache.
the class AdminApiOffloadTest method testOffloadPoliciesAppliedApi.
@Test
public void testOffloadPoliciesAppliedApi() throws Exception {
final String topicName = testTopic + UUID.randomUUID().toString();
admin.topics().createPartitionedTopic(topicName, 3);
pulsarClient.newProducer().topic(topicName).create().close();
OffloadPoliciesImpl offloadPolicies = (OffloadPoliciesImpl) admin.topics().getOffloadPolicies(topicName, true);
OffloadPoliciesImpl brokerPolicies = OffloadPoliciesImpl.mergeConfiguration(null, null, pulsar.getConfiguration().getProperties());
assertEquals(offloadPolicies, brokerPolicies);
// Since off loader is not really set, avoid code exceptions
LedgerOffloader topicOffloaded = mock(LedgerOffloader.class);
when(topicOffloaded.getOffloadDriverName()).thenReturn("mock");
doReturn(topicOffloaded).when(pulsar).createManagedLedgerOffloader(any());
OffloadPoliciesImpl namespacePolicies = new OffloadPoliciesImpl();
namespacePolicies.setManagedLedgerOffloadThresholdInBytes(100L);
namespacePolicies.setManagedLedgerOffloadDeletionLagInMillis(200L);
namespacePolicies.setManagedLedgerOffloadDriver("s3");
namespacePolicies.setManagedLedgerOffloadBucket("buck");
admin.namespaces().setOffloadPolicies(myNamespace, namespacePolicies);
Awaitility.await().untilAsserted(() -> assertEquals(admin.namespaces().getOffloadPolicies(myNamespace), namespacePolicies));
assertEquals(admin.topics().getOffloadPolicies(topicName, true), namespacePolicies);
OffloadPoliciesImpl topicPolicies = new OffloadPoliciesImpl();
topicPolicies.setManagedLedgerOffloadThresholdInBytes(200L);
topicPolicies.setManagedLedgerOffloadDeletionLagInMillis(400L);
topicPolicies.setManagedLedgerOffloadDriver("s3");
topicPolicies.setManagedLedgerOffloadBucket("buck2");
admin.topics().setOffloadPolicies(topicName, topicPolicies);
Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().getOffloadPolicies(topicName, true), topicPolicies));
admin.topics().removeOffloadPolicies(topicName);
Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().getOffloadPolicies(topicName, true), namespacePolicies));
admin.namespaces().removeOffloadPolicies(myNamespace);
Awaitility.await().untilAsserted(() -> assertEquals(admin.topics().getOffloadPolicies(topicName, true), brokerPolicies));
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by apache.
the class PersistentTopicsBase method internalUpdateOffloadPolicies.
private CompletableFuture<Void> internalUpdateOffloadPolicies(OffloadPoliciesImpl offloadPolicies, TopicName topicName) {
return pulsar().getBrokerService().getTopicIfExists(topicName.toString()).thenAccept(optionalTopic -> {
try {
if (!optionalTopic.isPresent() || !topicName.isPersistent()) {
return;
}
PersistentTopic persistentTopic = (PersistentTopic) optionalTopic.get();
ManagedLedgerConfig managedLedgerConfig = persistentTopic.getManagedLedger().getConfig();
if (offloadPolicies == null) {
LedgerOffloader namespaceOffloader = pulsar().getLedgerOffloaderMap().get(topicName.getNamespaceObject());
LedgerOffloader topicOffloader = managedLedgerConfig.getLedgerOffloader();
if (topicOffloader != null && topicOffloader != namespaceOffloader) {
topicOffloader.close();
}
managedLedgerConfig.setLedgerOffloader(namespaceOffloader);
} else {
managedLedgerConfig.setLedgerOffloader(pulsar().createManagedLedgerOffloader(offloadPolicies));
}
persistentTopic.getManagedLedger().setConfig(managedLedgerConfig);
} catch (PulsarServerException e) {
throw new RestException(e);
}
});
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by apache.
the class PulsarConnectorCache method getManagedLedgerConfig.
public ManagedLedgerConfig getManagedLedgerConfig(NamespaceName namespaceName, OffloadPoliciesImpl offloadPolicies, PulsarConnectorConfig pulsarConnectorConfig) {
ManagedLedgerConfig managedLedgerConfig = new ManagedLedgerConfig();
if (offloadPolicies == null) {
managedLedgerConfig.setLedgerOffloader(this.defaultOffloader);
} else {
LedgerOffloader ledgerOffloader = offloaderMap.compute(namespaceName, (ns, offloader) -> {
if (offloader != null && Objects.equals(offloader.getOffloadPolicies(), offloadPolicies)) {
return offloader;
} else {
if (offloader != null) {
offloader.close();
}
return initManagedLedgerOffloader(offloadPolicies, pulsarConnectorConfig);
}
});
managedLedgerConfig.setLedgerOffloader(ledgerOffloader);
}
return managedLedgerConfig;
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testInvalidEntryIds.
@Test
public void testInvalidEntryIds() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
try {
readHandle.read(-1, -1);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
try {
readHandle.read(0, 20);
Assert.fail("Shouldn't be able to read anything");
} catch (Exception e) {
}
}
use of org.apache.bookkeeper.mledger.LedgerOffloader in project pulsar by apache.
the class BlobStoreManagedLedgerOffloaderStreamingTest method testRandomRead.
@Test
public void testRandomRead() throws Exception {
LedgerOffloader offloader = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
LedgerOffloader offloader2 = getOffloader(new HashMap<String, String>() {
{
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_SIZE_IN_BYTES, "1000");
put(config.getKeys(TieredStorageConfiguration.METADATA_FIELD_MAX_BLOCK_SIZE).get(0), "5242880");
put(TieredStorageConfiguration.MAX_OFFLOAD_SEGMENT_ROLLOVER_TIME_SEC, "600");
}
});
ManagedLedger ml = createMockManagedLedger();
UUID uuid = UUID.randomUUID();
UUID uuid2 = UUID.randomUUID();
long beginLedger = 0;
long beginEntry = 0;
Map<String, String> driverMeta = new HashMap<String, String>() {
{
put(TieredStorageConfiguration.METADATA_FIELD_BUCKET, BUCKET);
}
};
OffloadHandle offloadHandle = offloader.streamingOffload(ml, uuid, beginLedger, beginEntry, driverMeta).get();
// Segment should closed because size in bytes full
final LinkedList<Entry> entries = new LinkedList<>();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i, data);
offloadHandle.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult = offloadHandle.getOffloadResultAsync().get();
assertEquals(offloadResult.endLedger, 0);
assertEquals(offloadResult.endEntry, 9);
// Segment should closed because size in bytes full
OffloadHandle offloadHandle2 = offloader2.streamingOffload(ml, uuid2, beginLedger, 10, driverMeta).get();
for (int i = 0; i < 10; i++) {
final byte[] data = new byte[100];
random.nextBytes(data);
final EntryImpl entry = EntryImpl.create(0, i + 10, data);
offloadHandle2.offerEntry(entry);
entries.add(entry);
}
final LedgerOffloader.OffloadResult offloadResult2 = offloadHandle2.getOffloadResultAsync().get();
assertEquals(offloadResult2.endLedger, 0);
assertEquals(offloadResult2.endEntry, 19);
final OffloadContext.Builder contextBuilder = OffloadContext.newBuilder();
contextBuilder.addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid.getLeastSignificantBits()).setUidMsb(uuid.getMostSignificantBits()).setComplete(true).setEndEntryId(9).build()).addOffloadSegment(MLDataFormats.OffloadSegment.newBuilder().setUidLsb(uuid2.getLeastSignificantBits()).setUidMsb(uuid2.getMostSignificantBits()).setComplete(true).setEndEntryId(19).build());
final ReadHandle readHandle = offloader.readOffloaded(0, contextBuilder.build(), driverMeta).get();
for (int i = 0; i <= 19; i++) {
Random seed = new Random(0);
int begin = seed.nextInt(20);
int end = seed.nextInt(20);
if (begin >= end) {
int temp = begin;
begin = end;
end = temp;
}
final LedgerEntries ledgerEntries = readHandle.readAsync(begin, end).get();
for (LedgerEntry ledgerEntry : ledgerEntries) {
final EntryImpl storedEntry = (EntryImpl) entries.get((int) ledgerEntry.getEntryId());
final byte[] storedData = storedEntry.getData();
final byte[] entryBytes = ledgerEntry.getEntryBytes();
assertEquals(storedData, entryBytes);
}
}
}
Aggregations