use of com.twitter.distributedlog.LogSegmentMetadata in project distributedlog by twitter.
the class TestLogSegmentCache method testSameLogSegment.
@Test(timeout = 60000)
public void testSameLogSegment() throws Exception {
LogSegmentCache cache = new LogSegmentCache("test-same-log-segment");
List<LogSegmentMetadata> expectedList = Lists.newArrayListWithExpectedSize(2);
LogSegmentMetadata inprogress = DLMTestUtil.inprogressLogSegment("/inprogress-1", 1L, 1L, 1L);
expectedList.add(inprogress);
cache.add(DLMTestUtil.inprogressZNodeName(1L), inprogress);
LogSegmentMetadata completed = DLMTestUtil.completedLogSegment("/segment-1", 1L, 1L, 100L, 100, 1L, 99L, 0L);
expectedList.add(completed);
cache.add(DLMTestUtil.completedLedgerZNodeNameWithLogSegmentSequenceNumber(1L), completed);
List<LogSegmentMetadata> retrievedList = cache.getLogSegments(LogSegmentMetadata.COMPARATOR);
assertEquals("Should get both log segments in ascending order", expectedList.size(), retrievedList.size());
for (int i = 0; i < expectedList.size(); i++) {
assertEqualsWithoutSequenceId(expectedList.get(i), retrievedList.get(i));
}
assertEquals("inprogress log segment should see start sequence id : 0", 0L, retrievedList.get(0).getStartSequenceId());
Collections.reverse(expectedList);
retrievedList = cache.getLogSegments(LogSegmentMetadata.DESC_COMPARATOR);
assertEquals("Should get both log segments in descending order", expectedList.size(), retrievedList.size());
for (int i = 0; i < expectedList.size(); i++) {
assertEqualsWithoutSequenceId(expectedList.get(i), retrievedList.get(i));
}
assertEquals("inprogress log segment should see start sequence id : 0", 0L, retrievedList.get(1).getStartSequenceId());
}
use of com.twitter.distributedlog.LogSegmentMetadata in project distributedlog by twitter.
the class TestLogSegmentCache method testDiff.
@Test(timeout = 60000)
public void testDiff() {
LogSegmentCache cache = new LogSegmentCache("test-diff");
// add 5 completed log segments
for (int i = 1; i <= 5; i++) {
LogSegmentMetadata metadata = DLMTestUtil.completedLogSegment("/segment" + i, i, i, i * 100L, 100, i, 99L, 0L);
String name = DLMTestUtil.completedLedgerZNodeNameWithLogSegmentSequenceNumber(i);
cache.add(name, metadata);
}
// add one inprogress log segment
LogSegmentMetadata inprogress = DLMTestUtil.inprogressLogSegment("/inprogress-6", 6, 600L, 6);
String name = DLMTestUtil.inprogressZNodeName(6);
cache.add(name, inprogress);
// deleted first 2 completed log segments and completed the last one
Set<String> segmentRemoved = Sets.newHashSet();
for (int i = 1; i <= 2; i++) {
segmentRemoved.add(DLMTestUtil.completedLedgerZNodeNameWithLogSegmentSequenceNumber(i));
}
segmentRemoved.add((DLMTestUtil.inprogressZNodeName(6)));
Set<String> segmentReceived = Sets.newHashSet();
Set<String> segmentAdded = Sets.newHashSet();
for (int i = 3; i <= 6; i++) {
segmentReceived.add(DLMTestUtil.completedLedgerZNodeNameWithLogSegmentSequenceNumber(i));
if (i == 6) {
segmentAdded.add(DLMTestUtil.completedLedgerZNodeNameWithLogSegmentSequenceNumber(i));
}
}
Pair<Set<String>, Set<String>> segmentChanges = cache.diff(segmentReceived);
assertTrue("Should remove " + segmentRemoved + ", but removed " + segmentChanges.getRight(), Sets.difference(segmentRemoved, segmentChanges.getRight()).isEmpty());
assertTrue("Should add " + segmentAdded + ", but added " + segmentChanges.getLeft(), Sets.difference(segmentAdded, segmentChanges.getLeft()).isEmpty());
}
use of com.twitter.distributedlog.LogSegmentMetadata in project distributedlog by twitter.
the class DistributedLogAdmin method fixInprogressSegmentWithLowerSequenceNumber.
/**
* Fix inprogress segment with lower ledger sequence number.
*
* @param factory
* dlm factory.
* @param metadataUpdater
* metadata updater.
* @param streamName
* stream name.
* @param verbose
* print verbose messages.
* @param interactive
* is confirmation needed before executing actual action.
* @throws IOException
*/
public static void fixInprogressSegmentWithLowerSequenceNumber(final com.twitter.distributedlog.DistributedLogManagerFactory factory, final MetadataUpdater metadataUpdater, final String streamName, final boolean verbose, final boolean interactive) throws IOException {
DistributedLogManager dlm = factory.createDistributedLogManagerWithSharedClients(streamName);
try {
List<LogSegmentMetadata> segments = dlm.getLogSegments();
if (verbose) {
System.out.println("LogSegments for " + streamName + " : ");
for (LogSegmentMetadata segment : segments) {
System.out.println(segment.getLogSegmentSequenceNumber() + "\t: " + segment);
}
}
LOG.info("Get log segments for {} : {}", streamName, segments);
// validate log segments
long maxCompletedLogSegmentSequenceNumber = -1L;
LogSegmentMetadata inprogressSegment = null;
for (LogSegmentMetadata segment : segments) {
if (!segment.isInProgress()) {
maxCompletedLogSegmentSequenceNumber = Math.max(maxCompletedLogSegmentSequenceNumber, segment.getLogSegmentSequenceNumber());
} else {
// we already found an inprogress segment
if (null != inprogressSegment) {
throw new DLIllegalStateException("Multiple inprogress segments found for stream " + streamName + " : " + segments);
}
inprogressSegment = segment;
}
}
if (null == inprogressSegment || inprogressSegment.getLogSegmentSequenceNumber() > maxCompletedLogSegmentSequenceNumber) {
// nothing to fix
return;
}
final long newLogSegmentSequenceNumber = maxCompletedLogSegmentSequenceNumber + 1;
if (interactive && !IOUtils.confirmPrompt("Confirm to fix (Y/N), Ctrl+C to break : ")) {
return;
}
final LogSegmentMetadata newSegment = FutureUtils.result(metadataUpdater.changeSequenceNumber(inprogressSegment, newLogSegmentSequenceNumber));
LOG.info("Fixed {} : {} -> {} ", new Object[] { streamName, inprogressSegment, newSegment });
if (verbose) {
System.out.println("Fixed " + streamName + " : " + inprogressSegment.getZNodeName() + " -> " + newSegment.getZNodeName());
System.out.println("\t old: " + inprogressSegment);
System.out.println("\t new: " + newSegment);
System.out.println();
}
} finally {
dlm.close();
}
}
use of com.twitter.distributedlog.LogSegmentMetadata in project distributedlog by twitter.
the class LedgerReadBenchmark method benchmark.
@Override
protected void benchmark(DistributedLogNamespace namespace, String logName, StatsLogger statsLogger) {
DistributedLogManager dlm = null;
while (null == dlm) {
try {
dlm = namespace.openLog(streamName);
} catch (IOException ioe) {
logger.warn("Failed to create dlm for stream {} : ", streamName, ioe);
}
if (null == dlm) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
logger.info("Created dlm for stream {}.", streamName);
List<LogSegmentMetadata> segments = null;
while (null == segments) {
try {
segments = dlm.getLogSegments();
} catch (IOException ioe) {
logger.warn("Failed to get log segments for stream {} : ", streamName, ioe);
}
if (null == segments) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
final Counter readCounter = statsLogger.getCounter("reads");
logger.info("Reading from log segments : {}", segments);
ZooKeeperClient zkc = ZooKeeperClientBuilder.newBuilder().uri(uri).name("benchmark-zkc").sessionTimeoutMs(conf.getZKSessionTimeoutMilliseconds()).zkAclId(null).build();
BKDLConfig bkdlConfig;
try {
bkdlConfig = BKDLConfig.resolveDLConfig(zkc, uri);
} catch (IOException e) {
return;
}
BookKeeper bk;
try {
bk = BookKeeperClientBuilder.newBuilder().name("benchmark-bkc").dlConfig(conf).zkServers(bkdlConfig.getBkZkServersForReader()).ledgersPath(bkdlConfig.getBkLedgersPath()).build().get();
} catch (IOException e) {
return;
}
final int readConcurrency = conf.getInt("ledger_read_concurrency", 1000);
boolean streamRead = conf.getBoolean("ledger_stream_read", true);
try {
for (LogSegmentMetadata segment : segments) {
Stopwatch stopwatch = Stopwatch.createStarted();
long lid = segment.getLedgerId();
LedgerHandle lh = bk.openLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32, conf.getBKDigestPW().getBytes(UTF_8));
logger.info("It took {} ms to open log segment {}", new Object[] { stopwatch.elapsed(TimeUnit.MILLISECONDS), (lh.getLastAddConfirmed() + 1), segment });
stopwatch.reset().start();
Runnable reader;
if (streamRead) {
reader = new LedgerStreamReader(lh, new BookkeeperInternalCallbacks.ReadEntryListener() {
@Override
public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) {
readCounter.inc();
}
}, readConcurrency);
} else {
reader = new LedgerStreamReader(lh, new BookkeeperInternalCallbacks.ReadEntryListener() {
@Override
public void onEntryComplete(int rc, LedgerHandle lh, LedgerEntry entry, Object ctx) {
readCounter.inc();
}
}, readConcurrency);
}
reader.run();
logger.info("It took {} ms to complete reading {} entries from log segment {}", new Object[] { stopwatch.elapsed(TimeUnit.MILLISECONDS), (lh.getLastAddConfirmed() + 1), segment });
}
} catch (Exception e) {
logger.error("Error on reading bk ", e);
}
}
use of com.twitter.distributedlog.LogSegmentMetadata in project distributedlog by twitter.
the class DistributedLogInputFormat method getSplits.
@Override
public List<InputSplit> getSplits(JobContext jobContext) throws IOException, InterruptedException {
List<LogSegmentMetadata> segments = dlm.getLogSegments();
List<InputSplit> inputSplits = Lists.newArrayListWithCapacity(segments.size());
BookKeeper bk = namespace.getReaderBKC().get();
LedgerManager lm = BookKeeperAccessor.getLedgerManager(bk);
final AtomicInteger rcHolder = new AtomicInteger(0);
final AtomicReference<LedgerMetadata> metadataHolder = new AtomicReference<LedgerMetadata>(null);
for (LogSegmentMetadata segment : segments) {
final CountDownLatch latch = new CountDownLatch(1);
lm.readLedgerMetadata(segment.getLedgerId(), new BookkeeperInternalCallbacks.GenericCallback<LedgerMetadata>() {
@Override
public void operationComplete(int rc, LedgerMetadata ledgerMetadata) {
metadataHolder.set(ledgerMetadata);
rcHolder.set(rc);
latch.countDown();
}
});
latch.await();
if (BKException.Code.OK != rcHolder.get()) {
throw new IOException("Faild to get log segment metadata for " + segment + " : " + BKException.getMessage(rcHolder.get()));
}
inputSplits.add(new LogSegmentSplit(segment, metadataHolder.get()));
}
return inputSplits;
}
Aggregations