use of org.apache.distributedlog.api.DistributedLogManager in project incubator-heron by apache.
the class DLDownloaderTest method testDownload.
@Test
public void testDownload() throws Exception {
String logName = "test-download";
URI uri = URI.create("distributedlog://127.0.0.1/test/distributedlog/" + logName);
File tempFile = File.createTempFile("test", "download");
// make sure it is deleted when the test completes
tempFile.deleteOnExit();
Path path = Paths.get(tempFile.toURI());
Namespace ns = mock(Namespace.class);
DistributedLogManager dlm = mock(DistributedLogManager.class);
LogReader reader = mock(LogReader.class);
when(ns.openLog(anyString())).thenReturn(dlm);
when(dlm.getInputStream(eq(DLSN.InitialDLSN))).thenReturn(reader);
when(reader.readNext(anyBoolean())).thenThrow(new EndOfStreamException("eos"));
NamespaceBuilder nsBuilder = mock(NamespaceBuilder.class);
when(nsBuilder.clientId(anyString())).thenReturn(nsBuilder);
when(nsBuilder.conf(any(DistributedLogConfiguration.class))).thenReturn(nsBuilder);
when(nsBuilder.uri(any(URI.class))).thenReturn(nsBuilder);
when(nsBuilder.build()).thenReturn(ns);
PowerMockito.mockStatic(Extractor.class);
PowerMockito.doNothing().when(Extractor.class, "extract", any(InputStream.class), any(Path.class));
DLDownloader downloader = new DLDownloader(() -> nsBuilder);
downloader.download(uri, path);
URI parentUri = URI.create("distributedlog://127.0.0.1/test/distributedlog");
verify(nsBuilder, times(1)).clientId(eq("heron-downloader"));
verify(nsBuilder, times(1)).conf(eq(CONF));
verify(nsBuilder, times(1)).uri(parentUri);
PowerMockito.verifyStatic(times(1));
Extractor.extract(any(InputStream.class), eq(path));
verify(ns, times(1)).openLog(eq(logName));
verify(ns, times(1)).close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class BKDistributedLogNamespace method deleteLog.
@Override
public void deleteLog(String logName) throws InvalidStreamNameException, LogNotFoundException, IOException {
checkState();
logName = validateAndNormalizeName(logName);
com.google.common.base.Optional<URI> uri = Utils.ioResult(driver.getLogMetadataStore().getLogLocation(logName));
if (!uri.isPresent()) {
throw new LogNotFoundException("Log " + logName + " isn't found.");
}
DistributedLogManager dlm = openLogInternal(uri.get(), logName, Optional.empty(), Optional.empty());
dlm.delete();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestBKSyncLogReader method testReadRecordsWhenReadAheadCatchingUp.
@Test(timeout = 60000)
public void testReadRecordsWhenReadAheadCatchingUp() throws Exception {
String name = testName.getMethodName();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(Integer.MAX_VALUE);
confLocal.setReadAheadMaxRecords(1);
confLocal.setReadAheadBatchSize(1);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
for (long i = 1L; i <= 10L; i++) {
LogRecord record = DLMTestUtil.getLogRecordInstance(i);
out.write(record);
}
out.flush();
out.commit();
logger.info("Write first 10 records");
// open a reader to read
BKSyncLogReader reader = (BKSyncLogReader) dlm.getInputStream(1L);
// resume reading from sync reader. so it should be able to read all 10 records
// and return null to claim it as caughtup
LogRecord record = reader.readNext(false);
int numReads = 0;
long expectedTxId = 1L;
while (null != record) {
++numReads;
assertEquals(expectedTxId, record.getTransactionId());
DLMTestUtil.verifyLogRecord(record);
++expectedTxId;
record = reader.readNext(false);
}
assertEquals(10, numReads);
out.close();
reader.close();
dlm.close();
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestBKSyncLogReader method testCreateReaderBeyondLastTransactionId.
@Test(timeout = 60000)
public void testCreateReaderBeyondLastTransactionId() throws Exception {
String name = testName.getMethodName();
DistributedLogManager dlm = createNewDLM(conf, name);
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
for (long i = 1; i < 10; i++) {
LogRecord op = DLMTestUtil.getLogRecordInstance(i);
out.write(op);
}
out.closeAndComplete();
LogReader reader = dlm.getInputStream(20L);
assertNull(reader.readNext(false));
// write another 20 records
out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
for (long i = 10; i < 30; i++) {
LogRecord op = DLMTestUtil.getLogRecordInstance(i);
out.write(op);
}
out.closeAndComplete();
for (int i = 0; i < 10; i++) {
LogRecord record = waitForNextRecord(reader);
assertEquals(20L + i, record.getTransactionId());
}
assertNull(reader.readNext(false));
}
use of org.apache.distributedlog.api.DistributedLogManager in project bookkeeper by apache.
the class TestBKSyncLogReader method testReadRecordsAfterReadAheadCaughtUp.
@Test(timeout = 60000)
public void testReadRecordsAfterReadAheadCaughtUp() throws Exception {
String name = testName.getMethodName();
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.addConfiguration(conf);
confLocal.setOutputBufferSize(0);
confLocal.setPeriodicFlushFrequencyMilliSeconds(Integer.MAX_VALUE);
DistributedLogManager dlm = createNewDLM(confLocal, name);
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
for (long i = 1L; i <= 10L; i++) {
LogRecord record = DLMTestUtil.getLogRecordInstance(i);
out.write(record);
}
out.flush();
out.commit();
logger.info("Write first 10 records");
// all 10 records are added to the stream
// then open a reader to read
BKSyncLogReader reader = (BKSyncLogReader) dlm.getInputStream(1L);
// wait until readahead caught up
while (!reader.getReadAheadReader().isReadAheadCaughtUp()) {
TimeUnit.MILLISECONDS.sleep(20);
}
logger.info("ReadAhead is caught up with first 10 records");
for (long i = 11L; i <= 20L; i++) {
LogRecord record = DLMTestUtil.getLogRecordInstance(i);
out.write(record);
}
out.flush();
out.commit();
logger.info("Write another 10 records");
// resume reading from sync reader util it consumes 20 records
long expectedTxId = 1L;
for (int i = 0; i < 20; i++) {
LogRecord record = reader.readNext(false);
while (null == record) {
record = reader.readNext(false);
}
assertEquals(expectedTxId, record.getTransactionId());
DLMTestUtil.verifyLogRecord(record);
++expectedTxId;
}
// after read 20 records, it should return null
assertNull(reader.readNext(false));
out.close();
reader.close();
dlm.close();
}
Aggregations