use of io.pravega.common.util.CompositeByteArraySegment in project pravega by pravega.
the class DataRecoveryTest method testForceMetadataOverWrite.
@Test
public void testForceMetadataOverWrite() throws Exception {
int instanceId = 0;
int bookieCount = 3;
int containerCount = 1;
@Cleanup TestUtils.PravegaRunner pravegaRunner = new TestUtils.PravegaRunner(bookieCount, containerCount);
pravegaRunner.startBookKeeperRunner(instanceId);
val bkConfig = BookKeeperConfig.builder().with(BookKeeperConfig.ZK_ADDRESS, "localhost:" + pravegaRunner.getBookKeeperRunner().getBkPort()).with(BookKeeperConfig.BK_LEDGER_PATH, pravegaRunner.getBookKeeperRunner().getLedgerPath()).with(BookKeeperConfig.ZK_METADATA_PATH, pravegaRunner.getBookKeeperRunner().getLogMetaNamespace()).with(BookKeeperConfig.BK_ENSEMBLE_SIZE, 1).with(BookKeeperConfig.BK_WRITE_QUORUM_SIZE, 1).with(BookKeeperConfig.BK_ACK_QUORUM_SIZE, 1).build();
this.factory = new BookKeeperLogFactory(bkConfig, pravegaRunner.getBookKeeperRunner().getZkClient().get(), this.executorService());
pravegaRunner.startControllerAndSegmentStore(this.storageFactory, this.factory);
String streamName = "testDataRecoveryCommand";
TestUtils.createScopeStream(pravegaRunner.getControllerRunner().getController(), SCOPE, streamName, config);
try (val clientRunner = new TestUtils.ClientRunner(pravegaRunner.getControllerRunner(), SCOPE)) {
// Write events to the streams.
TestUtils.writeEvents(streamName, clientRunner.getClientFactory());
}
// Shut down services, we assume that the cluster is in very bad shape in this test.
pravegaRunner.shutDownControllerRunner();
pravegaRunner.shutDownSegmentStoreRunner();
// set Pravega properties for the test
STATE.set(new AdminCommandState());
Properties pravegaProperties = new Properties();
pravegaProperties.setProperty("pravegaservice.container.count", "1");
pravegaProperties.setProperty("pravegaservice.storage.impl.name", "FILESYSTEM");
pravegaProperties.setProperty("pravegaservice.storage.layout", "ROLLING_STORAGE");
pravegaProperties.setProperty("pravegaservice.zk.connect.uri", "localhost:" + pravegaRunner.getBookKeeperRunner().getBkPort());
pravegaProperties.setProperty("bookkeeper.ledger.path", pravegaRunner.getBookKeeperRunner().getLedgerPath());
pravegaProperties.setProperty("bookkeeper.zk.metadata.path", pravegaRunner.getBookKeeperRunner().getLogMetaNamespace());
pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
pravegaProperties.setProperty("filesystem.root", this.baseDir.getAbsolutePath());
STATE.get().getConfigBuilder().include(pravegaProperties);
// Execute basic command workflow for repairing DurableLog.
CommandArgs args = new CommandArgs(List.of("0"), STATE.get());
DurableDataLogRepairCommand command = Mockito.spy(new DurableDataLogRepairCommand(args));
// Test the DurableLogWrapper options to get, overwrite and destroy logs.
@Cleanup val newFactory = new BookKeeperLogFactory(bkConfig, pravegaRunner.getBookKeeperRunner().getZkClient().get(), this.executorService());
newFactory.initialize();
@Cleanup DebugBookKeeperLogWrapper debugLogWrapper0 = newFactory.createDebugLogWrapper(0);
int container0LogEntries = command.readDurableDataLogWithCustomCallback((a, b) -> {
}, 0, debugLogWrapper0.asReadOnly());
Assert.assertTrue(container0LogEntries > 0);
ReadOnlyBookkeeperLogMetadata metadata0 = debugLogWrapper0.fetchMetadata();
Assert.assertNotNull(metadata0);
// Create a Repair log with some random content.
@Cleanup DurableDataLog repairLog = newFactory.createDurableDataLog(this.factory.getRepairLogId());
repairLog.initialize(TIMEOUT);
repairLog.append(new CompositeByteArraySegment(new byte[0]), TIMEOUT).join();
@Cleanup DebugBookKeeperLogWrapper debugLogWrapperRepair = newFactory.createDebugLogWrapper(0);
// Overwrite metadata of repair container with metadata of container 0.
debugLogWrapperRepair.forceMetadataOverWrite(metadata0);
// Now the amount of log entries read should be equal to the ones of container 0.
int newContainerRepairLogEntries = command.readDurableDataLogWithCustomCallback((a, b) -> {
}, this.factory.getRepairLogId(), debugLogWrapperRepair.asReadOnly());
ReadOnlyBookkeeperLogMetadata newMetadata1 = debugLogWrapperRepair.fetchMetadata();
Assert.assertEquals(container0LogEntries, newContainerRepairLogEntries);
Assert.assertEquals(metadata0.getLedgers(), newMetadata1.getLedgers());
// Destroy contents of Container 0.
debugLogWrapper0.deleteDurableLogMetadata();
Assert.assertNull(debugLogWrapper0.fetchMetadata());
}
use of io.pravega.common.util.CompositeByteArraySegment in project pravega by pravega.
the class BookKeeperLogTests method testAutoCloseOnBookieFailure.
/**
* Tests the ability to auto-close upon a permanent write failure caused by BookKeeper.
*
* @throws Exception If one got thrown.
*/
@Test
public void testAutoCloseOnBookieFailure() throws Exception {
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
try {
// Suspend a bookie (this will trigger write errors).
stopFirstBookie();
// First write should fail. Either a DataLogNotAvailableException (insufficient bookies) or
// WriteFailureException (general unable to write) should be thrown.
AssertExtensions.assertSuppliedFutureThrows("First write did not fail with the appropriate exception.", () -> log.append(new CompositeByteArraySegment(getWriteData()), TIMEOUT), ex -> ex instanceof RetriesExhaustedException && (ex.getCause() instanceof DataLogNotAvailableException || isLedgerClosedException(ex.getCause())) || ex instanceof ObjectClosedException || ex instanceof CancellationException);
// Subsequent writes should be rejected since the BookKeeperLog is now closed.
AssertExtensions.assertSuppliedFutureThrows("Second write did not fail with the appropriate exception.", () -> log.append(new CompositeByteArraySegment(getWriteData()), TIMEOUT), ex -> ex instanceof ObjectClosedException || ex instanceof CancellationException);
} finally {
// Don't forget to resume the bookie.
restartFirstBookie();
}
}
}
use of io.pravega.common.util.CompositeByteArraySegment in project pravega by pravega.
the class BookKeeperLogTests method testRemoveEmptyLedgers.
/**
* Tests the ability of BookKeeperLog to automatically remove empty ledgers during initialization.
*/
@Test
public void testRemoveEmptyLedgers() throws Exception {
final int count = 100;
final int writeEvery = count / 10;
final Predicate<Integer> shouldAppendAnything = i -> i % writeEvery == 0;
val allLedgers = new ArrayList<Map.Entry<Long, LedgerMetadata.Status>>();
final Predicate<Integer> shouldExist = index -> (index >= allLedgers.size() - Ledgers.MIN_FENCE_LEDGER_COUNT) || (allLedgers.get(index).getValue() != LedgerMetadata.Status.Empty);
for (int i = 0; i < count; i++) {
try (BookKeeperLog log = (BookKeeperLog) createDurableDataLog()) {
log.initialize(TIMEOUT);
boolean shouldAppend = shouldAppendAnything.test(i);
val currentMetadata = log.loadMetadata();
val lastLedger = currentMetadata.getLedgers().get(currentMetadata.getLedgers().size() - 1);
allLedgers.add(new AbstractMap.SimpleImmutableEntry<>(lastLedger.getLedgerId(), shouldAppend ? LedgerMetadata.Status.NotEmpty : LedgerMetadata.Status.Empty));
val metadataLedgers = currentMetadata.getLedgers().stream().map(LedgerMetadata::getLedgerId).collect(Collectors.toSet());
// Verify Log Metadata does not contain old empty ledgers.
for (int j = 0; j < allLedgers.size(); j++) {
val e = allLedgers.get(j);
val expectedExist = shouldExist.test(j);
Assert.assertEquals("Unexpected state for metadata. AllLedgerCount=" + allLedgers.size() + ", LedgerIndex=" + j + ", LedgerStatus=" + e.getValue(), expectedExist, metadataLedgers.contains(e.getKey()));
}
// Append some data to this Ledger, if needed.
if (shouldAppend) {
log.append(new CompositeByteArraySegment(getWriteData()), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
}
}
// Verify that these ledgers have also been deleted from BookKeeper.
for (int i = 0; i < allLedgers.size(); i++) {
val e = allLedgers.get(i);
if (shouldExist.test(i)) {
// This should not throw any exceptions.
Ledgers.openFence(e.getKey(), this.factory.get().getBookKeeperClient(), this.config.get());
} else {
AssertExtensions.assertThrows("Ledger not deleted from BookKeeper.", () -> Ledgers.openFence(e.getKey(), this.factory.get().getBookKeeperClient(), this.config.get()), ex -> true);
}
}
}
use of io.pravega.common.util.CompositeByteArraySegment in project pravega by pravega.
the class WriteQueueTests method testRemoveFinishedWrites.
/**
* Tests the removeFinishedWrites() method.
*/
@Test
public void testRemoveFinishedWrites() {
// Just over 1ms.
final int timeIncrement = 1234 * 1000;
AtomicLong time = new AtomicLong();
val q = new WriteQueue(time::get);
val writes = new ArrayDeque<Write>();
for (int i = 0; i < ITEM_COUNT; i++) {
time.addAndGet(timeIncrement);
val w = new Write(new CompositeByteArraySegment(i), new TestWriteLedger(i), new CompletableFuture<>());
if (i % 2 == 0) {
// Complete 1 out of two writes.
w.setEntryId(i);
w.complete();
}
q.add(w);
writes.addLast(w);
}
while (!writes.isEmpty()) {
val write = writes.pollFirst();
if (!write.isDone()) {
val result1 = q.removeFinishedWrites();
Assert.assertEquals("Unexpected value from removeFinishedWrites when there were writes left in the queue.", WriteQueue.CleanupStatus.QueueNotEmpty, result1.getStatus());
val stats1 = q.getStatistics();
Assert.assertEquals("Unexpected size after removeFinishedWrites with no effect.", writes.size() + 1, stats1.getSize());
// Complete this write.
write.setEntryId(time.get());
write.complete();
}
// Estimate the Expected elapsed time based on the removals.
long expectedElapsed = write.getQueueAddedTimestamp();
int removed = 1;
while (!writes.isEmpty() && writes.peekFirst().isDone()) {
expectedElapsed += writes.pollFirst().getQueueAddedTimestamp();
removed++;
}
expectedElapsed = (time.get() * removed - expectedElapsed) / AbstractTimer.NANOS_TO_MILLIS / removed;
val result2 = q.removeFinishedWrites();
val expectedResult = writes.isEmpty() ? WriteQueue.CleanupStatus.QueueEmpty : WriteQueue.CleanupStatus.QueueNotEmpty;
Assert.assertEquals("Unexpected result from removeFinishedWrites.", expectedResult, result2.getStatus());
val stats2 = q.getStatistics();
Assert.assertEquals("Unexpected size after removeFinishedWrites.", writes.size(), stats2.getSize());
Assert.assertEquals("Unexpected getExpectedProcessingTimeMillis after clear.", expectedElapsed, stats2.getExpectedProcessingTimeMillis());
}
// Verify that it does report failed writes when encountered.
val w3 = new Write(new CompositeByteArraySegment(1), new TestWriteLedger(0), new CompletableFuture<>());
q.add(w3);
w3.fail(new IntentionalException(), true);
val result3 = q.removeFinishedWrites();
Assert.assertEquals("Unexpected value from removeFinishedWrites when there were failed writes.", WriteQueue.CleanupStatus.WriteFailed, result3.getStatus());
}
use of io.pravega.common.util.CompositeByteArraySegment in project pravega by pravega.
the class DurableDataLogTestBase method populate.
protected TreeMap<LogAddress, byte[]> populate(DurableDataLog log, int writeCount) {
TreeMap<LogAddress, byte[]> writtenData = new TreeMap<>(Comparator.comparingLong(LogAddress::getSequence));
val data = new ArrayList<byte[]>();
val futures = new ArrayList<CompletableFuture<LogAddress>>();
for (int i = 0; i < writeCount; i++) {
byte[] writeData = getWriteData();
futures.add(log.append(new CompositeByteArraySegment(writeData), TIMEOUT));
data.add(writeData);
}
val addresses = Futures.allOfWithResults(futures).join();
for (int i = 0; i < data.size(); i++) {
writtenData.put(addresses.get(i), data.get(i));
}
return writtenData;
}
Aggregations