use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class AuditorPeriodicBookieCheckTest method testPeriodicBookieCheckInterval.
/**
* Test that the periodic bookie checker works.
*/
@Test
public void testPeriodicBookieCheckInterval() throws Exception {
bsConfs.get(0).setZkServers(zkUtil.getZooKeeperConnectString());
runFunctionWithLedgerManagerFactory(bsConfs.get(0), mFactory -> {
try (LedgerManager ledgerManager = mFactory.newLedgerManager()) {
@Cleanup final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager();
LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes());
LedgerMetadata md = LedgerHandleAdapter.getLedgerMetadata(lh);
List<BookieSocketAddress> ensemble = md.getEnsembles().get(0L);
ensemble.set(0, new BookieSocketAddress("1.1.1.1", 1000));
TestCallbacks.GenericCallbackFuture<Void> cb = new TestCallbacks.GenericCallbackFuture<Void>();
ledgerManager.writeLedgerMetadata(lh.getId(), md, cb);
cb.get();
long underReplicatedLedger = -1;
for (int i = 0; i < 10; i++) {
underReplicatedLedger = underReplicationManager.pollLedgerToRereplicate();
if (underReplicatedLedger != -1) {
break;
}
Thread.sleep(CHECK_INTERVAL * 1000);
}
assertEquals("Ledger should be under replicated", lh.getId(), underReplicatedLedger);
} catch (Exception e) {
throw new UncheckedExecutionException(e.getMessage(), e);
}
return null;
});
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class AuditorRollingRestartTest method testAuditingDuringRollingRestart.
private void testAuditingDuringRollingRestart(LedgerManagerFactory mFactory) throws Exception {
final LedgerUnderreplicationManager underReplicationManager = mFactory.newLedgerUnderreplicationManager();
LedgerHandle lh = bkc.createLedger(3, 3, DigestType.CRC32, "passwd".getBytes());
for (int i = 0; i < 10; i++) {
lh.asyncAddEntry("foobar".getBytes(), new TestCallbacks.AddCallbackFuture(i), null);
}
lh.addEntry("foobar".getBytes());
lh.close();
assertEquals("shouldn't be anything under replicated", underReplicationManager.pollLedgerToRereplicate(), -1);
underReplicationManager.disableLedgerReplication();
BookieSocketAddress auditor = AuditorElector.getCurrentAuditor(baseConf, zkc);
ServerConfiguration conf = killBookie(auditor);
Thread.sleep(2000);
startBookie(conf);
// give it time to run
Thread.sleep(2000);
assertEquals("shouldn't be anything under replicated", -1, underReplicationManager.pollLedgerToRereplicate());
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class Auditor method checkAllLedgers.
/**
* List all the ledgers and check them individually. This should not
* be run very often.
*/
void checkAllLedgers() throws BKAuditException, BKException, IOException, InterruptedException, KeeperException {
ZooKeeper newzk = ZooKeeperClient.newBuilder().connectString(conf.getZkServers()).sessionTimeoutMs(conf.getZkTimeout()).build();
final BookKeeper client = new BookKeeper(new ClientConfiguration(conf), newzk);
final BookKeeperAdmin admin = new BookKeeperAdmin(client, statsLogger);
try {
final LedgerChecker checker = new LedgerChecker(client);
final AtomicInteger returnCode = new AtomicInteger(BKException.Code.OK);
final CountDownLatch processDone = new CountDownLatch(1);
Processor<Long> checkLedgersProcessor = new Processor<Long>() {
@Override
public void process(final Long ledgerId, final AsyncCallback.VoidCallback callback) {
try {
if (!ledgerUnderreplicationManager.isLedgerReplicationEnabled()) {
LOG.info("Ledger rereplication has been disabled, aborting periodic check");
processDone.countDown();
return;
}
} catch (ReplicationException.UnavailableException ue) {
LOG.error("Underreplication manager unavailable running periodic check", ue);
processDone.countDown();
return;
}
LedgerHandle lh = null;
try {
lh = admin.openLedgerNoRecovery(ledgerId);
checker.checkLedger(lh, new ProcessLostFragmentsCb(lh, callback), conf.getAuditorLedgerVerificationPercentage());
// we collect the following stats to get a measure of the
// distribution of a single ledger within the bk cluster
// the higher the number of fragments/bookies, the more distributed it is
numFragmentsPerLedger.registerSuccessfulValue(lh.getNumFragments());
numBookiesPerLedger.registerSuccessfulValue(lh.getNumBookies());
numLedgersChecked.inc();
} catch (BKException.BKNoSuchLedgerExistsException bknsle) {
if (LOG.isDebugEnabled()) {
LOG.debug("Ledger was deleted before we could check it", bknsle);
}
callback.processResult(BKException.Code.OK, null, null);
return;
} catch (BKException bke) {
LOG.error("Couldn't open ledger " + ledgerId, bke);
callback.processResult(BKException.Code.BookieHandleNotAvailableException, null, null);
return;
} catch (InterruptedException ie) {
LOG.error("Interrupted opening ledger", ie);
Thread.currentThread().interrupt();
callback.processResult(BKException.Code.InterruptedException, null, null);
return;
} finally {
if (lh != null) {
try {
lh.close();
} catch (BKException bke) {
LOG.warn("Couldn't close ledger " + ledgerId, bke);
} catch (InterruptedException ie) {
LOG.warn("Interrupted closing ledger " + ledgerId, ie);
Thread.currentThread().interrupt();
}
}
}
}
};
ledgerManager.asyncProcessLedgers(checkLedgersProcessor, new AsyncCallback.VoidCallback() {
@Override
public void processResult(int rc, String s, Object obj) {
returnCode.set(rc);
processDone.countDown();
}
}, null, BKException.Code.OK, BKException.Code.ReadException);
try {
processDone.await();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new BKAuditException("Exception while checking ledgers", e);
}
if (returnCode.get() != BKException.Code.OK) {
throw BKException.create(returnCode.get());
}
} finally {
admin.close();
client.close();
newzk.close();
}
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class TestRollLogSegments method testCaughtUpReaderOnLogSegmentRolling.
@FlakyTest
@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testCaughtUpReaderOnLogSegmentRolling() throws Exception {
String name = "distrlog-caughtup-reader-on-logsegment-rolling";
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.loadConf(conf);
confLocal.setPeriodicFlushFrequencyMilliSeconds(0);
confLocal.setImmediateFlushEnabled(false);
confLocal.setOutputBufferSize(4 * 1024 * 1024);
confLocal.setTraceReadAheadMetadataChanges(true);
confLocal.setEnsembleSize(1);
confLocal.setWriteQuorumSize(1);
confLocal.setAckQuorumSize(1);
confLocal.setReadLACLongPollTimeout(99999999);
confLocal.setReaderIdleWarnThresholdMillis(2 * 99999999 + 1);
confLocal.setBKClientReadTimeout(99999999 + 1);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKSyncLogWriter writer = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
// 1) writer added 5 entries.
final int numEntries = 5;
for (int i = 1; i <= numEntries; i++) {
writer.write(DLMTestUtil.getLogRecordInstance(i));
writer.flush();
writer.commit();
}
BKDistributedLogManager readDLM = (BKDistributedLogManager) createNewDLM(confLocal, name);
final BKAsyncLogReader reader = (BKAsyncLogReader) readDLM.getAsyncLogReader(DLSN.InitialDLSN);
// 2) reader should be able to read 5 entries.
for (long i = 1; i <= numEntries; i++) {
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
assertEquals(i, record.getTransactionId());
assertEquals(record.getTransactionId() - 1, record.getSequenceId());
}
BKLogSegmentWriter perStreamWriter = writer.segmentWriter;
BookKeeperClient bkc = DLMTestUtil.getBookKeeperClient(readDLM);
LedgerHandle readLh = bkc.get().openLedgerNoRecovery(getLedgerHandle(perStreamWriter).getId(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
// Writer moved to lac = 9, while reader knows lac = 8 and moving to wait on 9
checkAndWaitWriterReaderPosition(perStreamWriter, 9, reader, 9, readLh, 8);
// write 6th record
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 1));
writer.flush();
// Writer moved to lac = 10, while reader knows lac = 9 and moving to wait on 10
checkAndWaitWriterReaderPosition(perStreamWriter, 10, reader, 10, readLh, 9);
// write records without commit to simulate similar failure cases
writer.write(DLMTestUtil.getLogRecordInstance(numEntries + 2));
writer.flush();
// Writer moved to lac = 11, while reader knows lac = 10 and moving to wait on 11
checkAndWaitWriterReaderPosition(perStreamWriter, 11, reader, 11, readLh, 10);
while (true) {
BKLogSegmentEntryReader entryReader = (BKLogSegmentEntryReader) reader.getReadAheadReader().getCurrentSegmentReader().getEntryReader();
if (null != entryReader && null != entryReader.getOutstandingLongPoll()) {
break;
}
Thread.sleep(1000);
}
logger.info("Waiting for long poll getting interrupted with metadata changed");
// simulate a recovery without closing ledger causing recording wrong last dlsn
BKLogWriteHandler writeHandler = writer.getCachedWriteHandler();
writeHandler.completeAndCloseLogSegment(writeHandler.inprogressZNodeName(perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLogSegmentSequenceNumber()), perStreamWriter.getLogSegmentSequenceNumber(), perStreamWriter.getLogSegmentId(), perStreamWriter.getStartTxId(), perStreamWriter.getLastTxId(), perStreamWriter.getPositionWithinLogSegment() - 1, 9, 0);
BKSyncLogWriter anotherWriter = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
anotherWriter.write(DLMTestUtil.getLogRecordInstance(numEntries + 3));
anotherWriter.flush();
anotherWriter.commit();
anotherWriter.closeAndComplete();
for (long i = numEntries + 1; i <= numEntries + 3; i++) {
LogRecordWithDLSN record = Utils.ioResult(reader.readNext());
DLMTestUtil.verifyLogRecord(record);
assertEquals(i, record.getTransactionId());
}
Utils.close(reader);
readDLM.close();
}
use of org.apache.bookkeeper.client.LedgerHandle in project bookkeeper by apache.
the class DLMTestUtil method injectLogSegmentWithGivenLogSegmentSeqNo.
public static void injectLogSegmentWithGivenLogSegmentSeqNo(DistributedLogManager manager, DistributedLogConfiguration conf, long logSegmentSeqNo, long startTxID, boolean writeEntries, long segmentSize, boolean completeLogSegment) throws Exception {
BKDistributedLogManager dlm = (BKDistributedLogManager) manager;
BKLogWriteHandler writeHandler = dlm.createWriteHandler(false);
Utils.ioResult(writeHandler.lockHandler());
// Start a log segment with a given ledger seq number.
BookKeeperClient bkc = getBookKeeperClient(dlm);
LedgerHandle lh = bkc.get().createLedger(conf.getEnsembleSize(), conf.getWriteQuorumSize(), conf.getAckQuorumSize(), BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes());
String inprogressZnodeName = writeHandler.inprogressZNodeName(lh.getId(), startTxID, logSegmentSeqNo);
String znodePath = writeHandler.inprogressZNode(lh.getId(), startTxID, logSegmentSeqNo);
int logSegmentMetadataVersion = conf.getDLLedgerMetadataLayoutVersion();
LogSegmentMetadata l = new LogSegmentMetadata.LogSegmentMetadataBuilder(znodePath, logSegmentMetadataVersion, lh.getId(), startTxID).setLogSegmentSequenceNo(logSegmentSeqNo).setEnvelopeEntries(LogSegmentMetadata.supportsEnvelopedEntries(logSegmentMetadataVersion)).build();
l.write(getZooKeeperClient(dlm));
writeHandler.maxTxId.update(Version.ANY, startTxID);
writeHandler.addLogSegmentToCache(inprogressZnodeName, l);
BKLogSegmentWriter writer = new BKLogSegmentWriter(writeHandler.getFullyQualifiedName(), inprogressZnodeName, conf, conf.getDLLedgerMetadataLayoutVersion(), new BKLogSegmentEntryWriter(lh), writeHandler.lock, startTxID, logSegmentSeqNo, writeHandler.scheduler, writeHandler.statsLogger, writeHandler.statsLogger, writeHandler.alertStatsLogger, PermitLimiter.NULL_PERMIT_LIMITER, new SettableFeatureProvider("", 0), ConfUtils.getConstDynConf(conf));
if (writeEntries) {
long txid = startTxID;
for (long j = 1; j <= segmentSize; j++) {
writer.write(DLMTestUtil.getLogRecordInstance(txid++));
}
Utils.ioResult(writer.flushAndCommit());
}
if (completeLogSegment) {
Utils.ioResult(writeHandler.completeAndCloseLogSegment(writer));
}
Utils.ioResult(writeHandler.unlockHandler());
}
Aggregations