use of com.twitter.distributedlog.LogRecord in project distributedlog by twitter.
the class BulkWriteOp method asRecordList.
private List<LogRecord> asRecordList(List<ByteBuffer> buffers, Sequencer sequencer) {
List<LogRecord> records = new ArrayList<LogRecord>(buffers.size());
for (ByteBuffer buffer : buffers) {
byte[] payload = new byte[buffer.remaining()];
buffer.get(payload);
records.add(new LogRecord(sequencer.nextId(), payload));
}
return records;
}
use of com.twitter.distributedlog.LogRecord in project distributedlog by twitter.
the class TestDistributedLogAdmin method testChangeSequenceNumber.
/**
* {@link https://issues.apache.org/jira/browse/DL-44}
*/
@DistributedLogAnnotations.FlakyTest
@Ignore
@Test(timeout = 60000)
@SuppressWarnings("deprecation")
public void testChangeSequenceNumber() throws Exception {
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setLogSegmentSequenceNumberValidationEnabled(false);
URI uri = createDLMURI("/change-sequence-number");
zooKeeperClient.get().create(uri.getPath(), new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
com.twitter.distributedlog.DistributedLogManagerFactory factory = new com.twitter.distributedlog.DistributedLogManagerFactory(confLocal, uri);
String streamName = "change-sequence-number";
// create completed log segments
DistributedLogManager dlm = factory.createDistributedLogManagerWithSharedClients(streamName);
DLMTestUtil.generateCompletedLogSegments(dlm, confLocal, 4, 10);
DLMTestUtil.injectLogSegmentWithGivenLogSegmentSeqNo(dlm, confLocal, 5, 41, false, 10, true);
dlm.close();
// create a reader
DistributedLogManager readDLM = factory.createDistributedLogManagerWithSharedClients(streamName);
AsyncLogReader reader = readDLM.getAsyncLogReader(DLSN.InitialDLSN);
// read the records
long expectedTxId = 1L;
for (int i = 0; i < 4 * 10; i++) {
LogRecord record = Await.result(reader.readNext());
assertNotNull(record);
DLMTestUtil.verifyLogRecord(record);
assertEquals(expectedTxId, record.getTransactionId());
expectedTxId++;
}
dlm = factory.createDistributedLogManagerWithSharedClients(streamName);
DLMTestUtil.injectLogSegmentWithGivenLogSegmentSeqNo(dlm, confLocal, 3L, 5 * 10 + 1, true, 10, false);
// Wait for reader to be aware of new log segments
TimeUnit.SECONDS.sleep(2);
DLSN dlsn = readDLM.getLastDLSN();
assertTrue(dlsn.compareTo(new DLSN(5, Long.MIN_VALUE, Long.MIN_VALUE)) < 0);
assertTrue(dlsn.compareTo(new DLSN(4, -1, Long.MIN_VALUE)) > 0);
// there isn't records should be read
Future<LogRecordWithDLSN> readFuture = reader.readNext();
try {
Await.result(readFuture, Duration.fromMilliseconds(1000));
fail("Should fail reading next when there is a corrupted log segment");
} catch (TimeoutException te) {
// expected
}
// Dryrun
DistributedLogAdmin.fixInprogressSegmentWithLowerSequenceNumber(factory, new DryrunLogSegmentMetadataStoreUpdater(confLocal, getLogSegmentMetadataStore(factory)), streamName, false, false);
// Wait for reader to be aware of new log segments
TimeUnit.SECONDS.sleep(2);
dlsn = readDLM.getLastDLSN();
assertTrue(dlsn.compareTo(new DLSN(5, Long.MIN_VALUE, Long.MIN_VALUE)) < 0);
assertTrue(dlsn.compareTo(new DLSN(4, -1, Long.MIN_VALUE)) > 0);
// there isn't records should be read
try {
Await.result(readFuture, Duration.fromMilliseconds(1000));
fail("Should fail reading next when there is a corrupted log segment");
} catch (TimeoutException te) {
// expected
}
// Actual run
DistributedLogAdmin.fixInprogressSegmentWithLowerSequenceNumber(factory, LogSegmentMetadataStoreUpdater.createMetadataUpdater(confLocal, getLogSegmentMetadataStore(factory)), streamName, false, false);
// Wait for reader to be aware of new log segments
TimeUnit.SECONDS.sleep(2);
expectedTxId = 51L;
LogRecord record = Await.result(readFuture);
assertNotNull(record);
DLMTestUtil.verifyLogRecord(record);
assertEquals(expectedTxId, record.getTransactionId());
expectedTxId++;
for (int i = 1; i < 10; i++) {
record = Await.result(reader.readNext());
assertNotNull(record);
DLMTestUtil.verifyLogRecord(record);
assertEquals(expectedTxId, record.getTransactionId());
expectedTxId++;
}
dlsn = readDLM.getLastDLSN();
LOG.info("LastDLSN after fix inprogress segment : {}", dlsn);
assertTrue(dlsn.compareTo(new DLSN(7, Long.MIN_VALUE, Long.MIN_VALUE)) < 0);
assertTrue(dlsn.compareTo(new DLSN(6, -1, Long.MIN_VALUE)) > 0);
Utils.close(reader);
readDLM.close();
dlm.close();
factory.close();
}
use of com.twitter.distributedlog.LogRecord in project distributedlog by twitter.
the class SyncReaderBenchmark method benchmark.
@Override
protected void benchmark(DistributedLogNamespace namespace, String streamName, StatsLogger statsLogger) {
DistributedLogManager dlm = null;
while (null == dlm) {
try {
dlm = namespace.openLog(streamName);
} catch (IOException ioe) {
logger.warn("Failed to create dlm for stream {} : ", streamName, ioe);
}
if (null == dlm) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
OpStatsLogger openReaderStats = statsLogger.getOpStatsLogger("open_reader");
OpStatsLogger nonBlockingReadStats = statsLogger.getOpStatsLogger("non_blocking_read");
OpStatsLogger blockingReadStats = statsLogger.getOpStatsLogger("blocking_read");
Counter nullReadCounter = statsLogger.getCounter("null_read");
logger.info("Created dlm for stream {}.", streamName);
LogReader reader = null;
Long lastTxId = null;
while (null == reader) {
// initialize the last txid
if (null == lastTxId) {
switch(readMode) {
case OLDEST:
lastTxId = 0L;
break;
case LATEST:
try {
lastTxId = dlm.getLastTxId();
} catch (IOException ioe) {
continue;
}
break;
case REWIND:
lastTxId = System.currentTimeMillis() - rewindMs;
break;
case POSITION:
lastTxId = fromTxId;
break;
default:
logger.warn("Unsupported mode {}", readMode);
printUsage();
System.exit(0);
break;
}
logger.info("Reading from transaction id {}", lastTxId);
}
// Open the reader
Stopwatch stopwatch = Stopwatch.createStarted();
try {
reader = dlm.getInputStream(lastTxId);
long elapsedMs = stopwatch.elapsed(TimeUnit.MICROSECONDS);
openReaderStats.registerSuccessfulEvent(elapsedMs);
logger.info("It took {} ms to position the reader to transaction id {}", lastTxId);
} catch (IOException ioe) {
openReaderStats.registerFailedEvent(stopwatch.elapsed(TimeUnit.MICROSECONDS));
logger.warn("Failed to create reader for stream {} reading from {}.", streamName, lastTxId);
}
if (null == reader) {
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
continue;
}
// read loop
LogRecord record;
boolean nonBlocking = false;
stopwatch = Stopwatch.createUnstarted();
while (true) {
try {
stopwatch.start();
record = reader.readNext(nonBlocking);
if (null != record) {
long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
if (nonBlocking) {
nonBlockingReadStats.registerSuccessfulEvent(elapsedMicros);
} else {
blockingReadStats.registerSuccessfulEvent(elapsedMicros);
}
lastTxId = record.getTransactionId();
} else {
nullReadCounter.inc();
}
if (null == record && !nonBlocking) {
nonBlocking = true;
}
stopwatch.reset();
} catch (IOException e) {
logger.warn("Encountered reading record from stream {} : ", streamName, e);
reader = null;
break;
}
}
try {
TimeUnit.MILLISECONDS.sleep(conf.getZKSessionTimeoutMilliseconds());
} catch (InterruptedException e) {
}
}
}
use of com.twitter.distributedlog.LogRecord in project distributedlog by twitter.
the class TestDistributedLogServer method runSimpleBulkWriteTest.
private void runSimpleBulkWriteTest(int writeCount) throws Exception {
String name = String.format("dlserver-bulk-write-%d", writeCount);
dlClient.routingService.addHost(name, dlServer.getAddress());
List<ByteBuffer> writes = new ArrayList<ByteBuffer>(writeCount);
for (long i = 1; i <= writeCount; i++) {
writes.add(ByteBuffer.wrap(("" + i).getBytes()));
}
logger.debug("Write {} entries to stream {}.", writeCount, name);
List<Future<DLSN>> futures = dlClient.dlClient.writeBulk(name, writes);
assertEquals(futures.size(), writeCount);
for (Future<DLSN> future : futures) {
// No throw == pass.
DLSN dlsn = Await.result(future, Duration.fromSeconds(10));
}
DistributedLogManager dlm = DLMTestUtil.createNewDLM(name, conf, getUri());
LogReader reader = dlm.getInputStream(1);
int numRead = 0;
LogRecord r = reader.readNext(false);
while (null != r) {
int i = Integer.parseInt(new String(r.getPayload()));
assertEquals(numRead + 1, i);
++numRead;
r = reader.readNext(false);
}
assertEquals(writeCount, numRead);
reader.close();
dlm.close();
}
use of com.twitter.distributedlog.LogRecord in project distributedlog by twitter.
the class BulkWriteOp method executeOp.
@Override
protected Future<BulkWriteResponse> executeOp(AsyncLogWriter writer, Sequencer sequencer, Object txnLock) {
// Need to convert input buffers to LogRecords.
List<LogRecord> records;
Future<List<Future<DLSN>>> futureList;
synchronized (txnLock) {
records = asRecordList(buffers, sequencer);
futureList = writer.writeBulk(records);
}
// Collect into a list of tries to make it easier to extract exception or DLSN.
Future<List<Try<DLSN>>> writes = asTryList(futureList);
Future<BulkWriteResponse> response = writes.flatMap(new AbstractFunction1<List<Try<DLSN>>, Future<BulkWriteResponse>>() {
@Override
public Future<BulkWriteResponse> apply(List<Try<DLSN>> results) {
// Considered a success at batch level even if no individual writes succeeed.
// The reason is that its impossible to make an appropriate decision re retries without
// individual buffer failure reasons.
List<WriteResponse> writeResponses = new ArrayList<WriteResponse>(results.size());
BulkWriteResponse bulkWriteResponse = ResponseUtils.bulkWriteSuccess().setWriteResponses(writeResponses);
// failed.
if (results.size() > 0) {
Try<DLSN> firstResult = results.get(0);
if (isDefiniteFailure(firstResult)) {
return new ConstFuture(firstResult);
}
}
// Translate all futures to write responses.
Iterator<Try<DLSN>> iterator = results.iterator();
while (iterator.hasNext()) {
Try<DLSN> completedFuture = iterator.next();
try {
DLSN dlsn = completedFuture.get();
WriteResponse writeResponse = ResponseUtils.writeSuccess().setDlsn(dlsn.serialize());
writeResponses.add(writeResponse);
successRecordCounter.inc();
} catch (Exception ioe) {
WriteResponse writeResponse = ResponseUtils.write(ResponseUtils.exceptionToHeader(ioe));
writeResponses.add(writeResponse);
if (StatusCode.FOUND == writeResponse.getHeader().getCode()) {
redirectRecordCounter.inc();
} else {
failureRecordCounter.inc();
}
}
}
return Future.value(bulkWriteResponse);
}
});
return response;
}
Aggregations