use of org.apache.distributedlog.exceptions.DLInterruptedException in project bookkeeper by apache.
the class BookKeeperClient method initialize.
private synchronized void initialize() throws IOException {
if (null != this.bkc) {
return;
}
if (null == this.zkc) {
int zkSessionTimeout = conf.getBKClientZKSessionTimeoutMilliSeconds();
RetryPolicy retryPolicy = new BoundExponentialBackoffRetryPolicy(conf.getBKClientZKRetryBackoffStartMillis(), conf.getBKClientZKRetryBackoffMaxMillis(), conf.getBKClientZKNumRetries());
Credentials credentials = Credentials.NONE;
if (conf.getZkAclId() != null) {
credentials = new DigestCredentials(conf.getZkAclId(), conf.getZkAclId());
}
this.zkc = new ZooKeeperClient(name + ":zk", zkSessionTimeout, 2 * zkSessionTimeout, zkServers, retryPolicy, statsLogger.scope("bkc_zkc"), conf.getZKClientNumberRetryThreads(), conf.getBKClientZKRequestRateLimit(), credentials);
}
try {
commonInitialization(conf, ledgersPath, eventLoopGroup, statsLogger, requestTimer);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new DLInterruptedException("Interrupted on creating bookkeeper client " + name + " : ", e);
}
if (ownZK) {
LOG.info("BookKeeper Client created {} with its own ZK Client : ledgersPath = {}, numRetries = {}, " + "sessionTimeout = {}, backoff = {}, maxBackoff = {}, dnsResolver = {}", new Object[] { name, ledgersPath, conf.getBKClientZKNumRetries(), conf.getBKClientZKSessionTimeoutMilliSeconds(), conf.getBKClientZKRetryBackoffStartMillis(), conf.getBKClientZKRetryBackoffMaxMillis(), conf.getBkDNSResolverOverrides() });
} else {
LOG.info("BookKeeper Client created {} with shared zookeeper client : ledgersPath = {}, numRetries = {}, " + "sessionTimeout = {}, backoff = {}, maxBackoff = {}, dnsResolver = {}", new Object[] { name, ledgersPath, conf.getZKNumRetries(), conf.getZKSessionTimeoutMilliseconds(), conf.getZKRetryBackoffStartMillis(), conf.getZKRetryBackoffMaxMillis(), conf.getBkDNSResolverOverrides() });
}
}
use of org.apache.distributedlog.exceptions.DLInterruptedException in project bookkeeper by apache.
the class TestNonBlockingReadsMultiReader method testMultiReaders.
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
String name = "distrlog-multireaders";
final RateLimiter limiter = RateLimiter.create(1000);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned();
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(0)));
Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(1)));
final AtomicInteger writeCount = new AtomicInteger(2);
DistributedLogManager dlmread = createNewDLM(conf, name);
BKSyncLogReader reader0 = (BKSyncLogReader) dlmread.getInputStream(0);
try {
ReaderThread[] readerThreads = new ReaderThread[1];
readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
// readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
final AtomicBoolean running = new AtomicBoolean(true);
Thread writerThread = new Thread("WriteThread") {
@Override
public void run() {
try {
long txid = 2;
DLSN dlsn = DLSN.InvalidDLSN;
while (running.get()) {
limiter.acquire();
long curTxId = txid++;
dlsn = Utils.ioResult(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
writeCount.incrementAndGet();
if (curTxId % 1000 == 0) {
LOG.info("writer write {}", curTxId);
}
}
LOG.info("Completed writing record at {}", dlsn);
Utils.close(writer);
} catch (DLInterruptedException die) {
Thread.currentThread().interrupt();
} catch (Exception e) {
}
}
};
for (ReaderThread rt : readerThreads) {
rt.start();
}
writerThread.start();
TimeUnit.SECONDS.sleep(5);
LOG.info("Stopping writer");
running.set(false);
writerThread.join();
LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
while (writeCount.get() > (readerThreads[0].getReadCount())) {
LOG.info("Write Count = {}, Read Count = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount() });
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
for (ReaderThread readerThread : readerThreads) {
readerThread.stopReading();
}
} finally {
dlmwrite.close();
reader0.close();
dlmread.close();
}
}
use of org.apache.distributedlog.exceptions.DLInterruptedException in project bookkeeper by apache.
the class ZKSubscriptionStateStore method getLastCommitPositionFromZK.
CompletableFuture<DLSN> getLastCommitPositionFromZK() {
final CompletableFuture<DLSN> result = new CompletableFuture<DLSN>();
try {
logger.debug("Reading last commit position from path {}", zkPath);
zooKeeperClient.get().getData(zkPath, false, new AsyncCallback.DataCallback() {
@Override
public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
logger.debug("Read last commit position from path {}: rc = {}", zkPath, rc);
if (KeeperException.Code.NONODE.intValue() == rc) {
result.complete(DLSN.NonInclusiveLowerBound);
} else if (KeeperException.Code.OK.intValue() != rc) {
result.completeExceptionally(KeeperException.create(KeeperException.Code.get(rc), path));
} else {
try {
DLSN dlsn = DLSN.deserialize(new String(data, Charsets.UTF_8));
result.complete(dlsn);
} catch (Exception t) {
logger.warn("Invalid last commit position found from path {}", zkPath, t);
// invalid dlsn recorded in subscription state store
result.complete(DLSN.NonInclusiveLowerBound);
}
}
}
}, null);
} catch (ZooKeeperClient.ZooKeeperConnectionException zkce) {
result.completeExceptionally(zkce);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
result.completeExceptionally(new DLInterruptedException("getLastCommitPosition was interrupted", ie));
}
return result;
}
use of org.apache.distributedlog.exceptions.DLInterruptedException in project bookkeeper by apache.
the class BKAsyncLogReader method safeRun.
@Override
public void safeRun() {
synchronized (scheduleLock) {
if (scheduleDelayStopwatch.isRunning()) {
scheduleLatency.registerSuccessfulEvent(scheduleDelayStopwatch.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
}
Stopwatch runTime = Stopwatch.createStarted();
int iterations = 0;
long scheduleCountLocal = scheduleCountUpdater.get(this);
LOG.debug("{}: Scheduled Background Reader", readHandler.getFullyQualifiedName());
while (true) {
if (LOG.isTraceEnabled()) {
LOG.trace("{}: Executing Iteration: {}", readHandler.getFullyQualifiedName(), iterations++);
}
PendingReadRequest nextRequest = null;
synchronized (this) {
nextRequest = pendingRequests.peek();
// Queue is empty, nothing to read, return
if (null == nextRequest) {
LOG.trace("{}: Queue Empty waiting for Input", readHandler.getFullyQualifiedName());
scheduleCountUpdater.set(this, 0);
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
if (disableProcessingReadRequests) {
LOG.info("Reader of {} is forced to stop processing read requests", readHandler.getFullyQualifiedName());
return;
}
}
lastProcessTime.reset().start();
// know the last consumed read
if (null == lastExceptionUpdater.get(this)) {
if (nextRequest.getPromise().isCancelled()) {
setLastException(new DLInterruptedException("Interrupted on reading " + readHandler.getFullyQualifiedName()));
}
}
if (checkClosedOrInError("readNext")) {
Throwable lastException = lastExceptionUpdater.get(this);
if (lastException != null && !(lastException.getCause() instanceof LogNotFoundException)) {
LOG.warn("{}: Exception", readHandler.getFullyQualifiedName(), lastException);
}
backgroundReaderRunTime.registerFailedEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
try {
// Fail 10% of the requests when asked to simulate errors
if (bkDistributedLogManager.getFailureInjector().shouldInjectErrors()) {
throw new IOException("Reader Simulated Exception");
}
LogRecordWithDLSN record;
while (!nextRequest.hasReadEnoughRecords()) {
// read single record
do {
record = readNextRecord();
} while (null != record && (record.isControl() || (record.getDlsn().compareTo(getStartDLSN()) < 0)));
if (null == record) {
break;
} else {
if (record.isEndOfStream() && !returnEndOfStreamRecord) {
setLastException(new EndOfStreamException("End of Stream Reached for " + readHandler.getFullyQualifiedName()));
break;
}
// gap detection
if (recordPositionsContainsGap(record, lastPosition)) {
bkDistributedLogManager.raiseAlert("Gap detected between records at record = {}", record);
if (positionGapDetectionEnabled) {
throw new DLIllegalStateException("Gap detected between records at record = " + record);
}
}
lastPosition = record.getLastPositionWithinLogSegment();
nextRequest.addRecord(record);
}
}
} catch (IOException exc) {
setLastException(exc);
if (!(exc instanceof LogNotFoundException)) {
LOG.warn("{} : read with skip Exception", readHandler.getFullyQualifiedName(), lastExceptionUpdater.get(this));
}
continue;
}
if (nextRequest.hasReadRecords()) {
long remainingWaitTime = nextRequest.getRemainingWaitTime();
if (remainingWaitTime > 0 && !nextRequest.hasReadEnoughRecords()) {
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
scheduleDelayStopwatch.reset().start();
scheduleCountUpdater.set(this, 0);
// the request could still wait for more records
backgroundScheduleTask = scheduler.scheduleOrdered(streamName, BACKGROUND_READ_SCHEDULER, remainingWaitTime, nextRequest.deadlineTimeUnit);
return;
}
PendingReadRequest request = pendingRequests.poll();
if (null != request && nextRequest == request) {
request.complete();
if (null != backgroundScheduleTask) {
backgroundScheduleTask.cancel(true);
backgroundScheduleTask = null;
}
} else {
DLIllegalStateException ise = new DLIllegalStateException("Unexpected condition at dlsn = " + nextRequest.records.get(0).getDlsn());
nextRequest.completeExceptionally(ise);
if (null != request) {
request.completeExceptionally(ise);
}
// We should never get here as we should have exited the loop if
// pendingRequests were empty
bkDistributedLogManager.raiseAlert("Unexpected condition at dlsn = {}", nextRequest.records.get(0).getDlsn());
setLastException(ise);
}
} else {
if (0 == scheduleCountLocal) {
LOG.trace("Schedule count dropping to zero", lastExceptionUpdater.get(this));
backgroundReaderRunTime.registerSuccessfulEvent(runTime.stop().elapsed(TimeUnit.MICROSECONDS), TimeUnit.MICROSECONDS);
return;
}
scheduleCountLocal = scheduleCountUpdater.decrementAndGet(this);
}
}
}
}
use of org.apache.distributedlog.exceptions.DLInterruptedException in project bookkeeper by apache.
the class BKLogSegmentEntryReader method safeRun.
/**
* The core function to propagate fetched entries to read requests.
*/
@Override
public void safeRun() {
long scheduleCountLocal = scheduleCountUpdater.get(this);
while (true) {
PendingReadRequest nextRequest = null;
synchronized (readQueue) {
nextRequest = readQueue.peek();
}
// if read queue is empty, nothing to read, return
if (null == nextRequest) {
scheduleCountUpdater.set(this, 0L);
return;
}
// we don't know the last consumed read
if (null == lastExceptionUpdater.get(this)) {
if (nextRequest.getPromise().isCancelled()) {
completeExceptionally(new DLInterruptedException("Interrupted on reading log segment " + getSegment() + " : " + nextRequest.getPromise().isCancelled()), false);
}
}
// if the reader is in error state, stop read
if (checkClosedOrInError()) {
return;
}
// read entries from readahead cache to satisfy next read request
readEntriesFromReadAheadCache(nextRequest);
// check if we can satisfy the read request
if (nextRequest.hasReadEntries()) {
PendingReadRequest request;
synchronized (readQueue) {
request = readQueue.poll();
}
if (null != request && nextRequest == request) {
request.complete();
} else {
DLIllegalStateException ise = new DLIllegalStateException("Unexpected condition at reading from " + getSegment());
nextRequest.completeExceptionally(ise);
if (null != request) {
request.completeExceptionally(ise);
}
completeExceptionally(ise, false);
}
} else {
if (0 == scheduleCountLocal) {
return;
}
scheduleCountLocal = scheduleCountUpdater.decrementAndGet(this);
}
}
}
Aggregations