use of com.twitter.distributedlog.exceptions.UnexpectedException in project distributedlog by twitter.
the class BKLogHandler method asyncGetLedgerListInternal.
private void asyncGetLedgerListInternal(final Comparator<LogSegmentMetadata> comparator, final LogSegmentFilter segmentFilter, final Watcher watcher, final GenericCallback<List<LogSegmentMetadata>> finalCallback, final AtomicInteger numAttemptsLeft, final AtomicLong backoffMillis) {
final Stopwatch stopwatch = Stopwatch.createStarted();
try {
if (LOG.isTraceEnabled()) {
LOG.trace("Async getting ledger list for {}.", getFullyQualifiedName());
}
final GenericCallback<List<LogSegmentMetadata>> callback = new GenericCallback<List<LogSegmentMetadata>>() {
@Override
public void operationComplete(int rc, List<LogSegmentMetadata> result) {
long elapsedMicros = stopwatch.stop().elapsed(TimeUnit.MICROSECONDS);
if (KeeperException.Code.OK.intValue() != rc) {
getListStat.registerFailedEvent(elapsedMicros);
} else {
if (LogSegmentFilter.DEFAULT_FILTER == segmentFilter) {
isFullListFetched.set(true);
}
getListStat.registerSuccessfulEvent(elapsedMicros);
}
finalCallback.operationComplete(rc, result);
}
};
zooKeeperClient.get().getChildren(logMetadata.getLogSegmentsPath(), watcher, new AsyncCallback.Children2Callback() {
@Override
public void processResult(final int rc, final String path, final Object ctx, final List<String> children, final Stat stat) {
if (KeeperException.Code.OK.intValue() != rc) {
if ((KeeperException.Code.CONNECTIONLOSS.intValue() == rc || KeeperException.Code.SESSIONEXPIRED.intValue() == rc || KeeperException.Code.SESSIONMOVED.intValue() == rc) && numAttemptsLeft.decrementAndGet() > 0) {
long backoffMs = backoffMillis.get();
backoffMillis.set(Math.min(conf.getZKRetryBackoffMaxMillis(), 2 * backoffMs));
scheduler.schedule(new Runnable() {
@Override
public void run() {
asyncGetLedgerListInternal(comparator, segmentFilter, watcher, finalCallback, numAttemptsLeft, backoffMillis);
}
}, backoffMs, TimeUnit.MILLISECONDS);
return;
}
callback.operationComplete(rc, null);
return;
}
if (LOG.isTraceEnabled()) {
LOG.trace("Got ledger list from {} : {}", logMetadata.getLogSegmentsPath(), children);
}
ledgerListWatchSet.set(true);
Set<String> segmentsReceived = new HashSet<String>();
segmentsReceived.addAll(segmentFilter.filter(children));
Set<String> segmentsAdded;
final Set<String> removedSegments = Collections.synchronizedSet(new HashSet<String>());
final Map<String, LogSegmentMetadata> addedSegments = Collections.synchronizedMap(new HashMap<String, LogSegmentMetadata>());
Pair<Set<String>, Set<String>> segmentChanges = logSegmentCache.diff(segmentsReceived);
segmentsAdded = segmentChanges.getLeft();
removedSegments.addAll(segmentChanges.getRight());
if (segmentsAdded.isEmpty()) {
if (LOG.isTraceEnabled()) {
LOG.trace("No segments added for {}.", getFullyQualifiedName());
}
// update the cache before fetch
logSegmentCache.update(removedSegments, addedSegments);
List<LogSegmentMetadata> segmentList;
try {
segmentList = getCachedLogSegments(comparator);
} catch (UnexpectedException e) {
callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(), null);
return;
}
callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
notifyUpdatedLogSegments(segmentList);
if (!removedSegments.isEmpty()) {
notifyOnOperationComplete();
}
return;
}
final AtomicInteger numChildren = new AtomicInteger(segmentsAdded.size());
final AtomicInteger numFailures = new AtomicInteger(0);
for (final String segment : segmentsAdded) {
metadataStore.getLogSegment(logMetadata.getLogSegmentPath(segment)).addEventListener(new FutureEventListener<LogSegmentMetadata>() {
@Override
public void onSuccess(LogSegmentMetadata result) {
addedSegments.put(segment, result);
complete();
}
@Override
public void onFailure(Throwable cause) {
// 2. In progress segment has been completed => inprogress ZNode does not exist
if (cause instanceof KeeperException && KeeperException.Code.NONODE == ((KeeperException) cause).code()) {
removedSegments.add(segment);
complete();
} else {
// fail fast
if (1 == numFailures.incrementAndGet()) {
int rcToReturn = KeeperException.Code.SYSTEMERROR.intValue();
if (cause instanceof KeeperException) {
rcToReturn = ((KeeperException) cause).code().intValue();
} else if (cause instanceof ZKException) {
rcToReturn = ((ZKException) cause).getKeeperExceptionCode().intValue();
}
// :( properly we need dlog related response code.
callback.operationComplete(rcToReturn, null);
return;
}
}
}
private void complete() {
if (0 == numChildren.decrementAndGet() && numFailures.get() == 0) {
// update the cache only when fetch completed
logSegmentCache.update(removedSegments, addedSegments);
List<LogSegmentMetadata> segmentList;
try {
segmentList = getCachedLogSegments(comparator);
} catch (UnexpectedException e) {
callback.operationComplete(KeeperException.Code.DATAINCONSISTENCY.intValue(), null);
return;
}
callback.operationComplete(KeeperException.Code.OK.intValue(), segmentList);
notifyUpdatedLogSegments(segmentList);
notifyOnOperationComplete();
}
}
});
}
}
}, null);
} catch (ZooKeeperClient.ZooKeeperConnectionException e) {
getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
} catch (InterruptedException e) {
getListStat.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS));
finalCallback.operationComplete(KeeperException.Code.CONNECTIONLOSS.intValue(), null);
}
}
use of com.twitter.distributedlog.exceptions.UnexpectedException in project distributedlog by twitter.
the class TestLogSegmentsZK method testCreateLogSegmentMissingMaxSequenceNumber.
/**
* Create Log Segment when no max sequence number recorded in /ledgers. e.g. old version.
*/
@Test(timeout = 60000)
public void testCreateLogSegmentMissingMaxSequenceNumber() throws Exception {
URI uri = createURI();
String streamName = testName.getMethodName();
DistributedLogConfiguration conf = new DistributedLogConfiguration().setLockTimeout(99999).setOutputBufferSize(0).setImmediateFlushEnabled(true).setEnableLedgerAllocatorPool(true).setLedgerAllocatorPoolName("test");
BKDistributedLogNamespace namespace = BKDistributedLogNamespace.newBuilder().conf(conf).uri(uri).build();
namespace.createLog(streamName);
MaxLogSegmentSequenceNo max1 = getMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf);
assertEquals(DistributedLogConstants.UNASSIGNED_LOGSEGMENT_SEQNO, max1.getSequenceNumber());
DistributedLogManager dlm = namespace.openLog(streamName);
final int numSegments = 3;
for (int i = 0; i < numSegments; i++) {
BKSyncLogWriter out = (BKSyncLogWriter) dlm.startLogSegmentNonPartitioned();
out.write(DLMTestUtil.getLogRecordInstance(i));
out.closeAndComplete();
}
MaxLogSegmentSequenceNo max2 = getMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf);
assertEquals(3, max2.getSequenceNumber());
// nuke the max ledger sequence number
updateMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf, new byte[0]);
DistributedLogManager dlm1 = namespace.openLog(streamName);
try {
dlm1.startLogSegmentNonPartitioned();
fail("Should fail with unexpected exceptions");
} catch (UnexpectedException ue) {
// expected
} finally {
dlm1.close();
}
// invalid max ledger sequence number
updateMaxLogSegmentSequenceNo(namespace.getSharedWriterZKCForDL(), uri, streamName, conf, "invalid-max".getBytes(UTF_8));
DistributedLogManager dlm2 = namespace.openLog(streamName);
try {
dlm2.startLogSegmentNonPartitioned();
fail("Should fail with unexpected exceptions");
} catch (UnexpectedException ue) {
// expected
} finally {
dlm2.close();
}
dlm.close();
namespace.close();
}
use of com.twitter.distributedlog.exceptions.UnexpectedException in project distributedlog by twitter.
the class ZKLogMetadataForWriter method processLogMetadatas.
static ZKLogMetadataForWriter processLogMetadatas(URI uri, String logName, String logIdentifier, List<Versioned<byte[]>> metadatas, boolean ownAllocator) throws UnexpectedException {
try {
// max id
Versioned<byte[]> maxTxnIdData = metadatas.get(MetadataIndex.MAX_TXID);
ensureMetadataExist(maxTxnIdData);
// version
Versioned<byte[]> versionData = metadatas.get(MetadataIndex.VERSION);
ensureMetadataExist(maxTxnIdData);
Preconditions.checkArgument(LAYOUT_VERSION == bytesToInt(versionData.getValue()));
// lock path
ensureMetadataExist(metadatas.get(MetadataIndex.LOCK));
// read lock path
ensureMetadataExist(metadatas.get(MetadataIndex.READ_LOCK));
// max lssn
Versioned<byte[]> maxLSSNData = metadatas.get(MetadataIndex.LOGSEGMENTS);
ensureMetadataExist(maxLSSNData);
try {
DLUtils.deserializeLogSegmentSequenceNumber(maxLSSNData.getValue());
} catch (NumberFormatException nfe) {
throw new UnexpectedException("Invalid max sequence number found in log " + logName, nfe);
}
// allocation path
Versioned<byte[]> allocationData;
if (ownAllocator) {
allocationData = metadatas.get(MetadataIndex.ALLOCATION);
ensureMetadataExist(allocationData);
} else {
allocationData = new Versioned<byte[]>(null, null);
}
return new ZKLogMetadataForWriter(uri, logName, logIdentifier, maxLSSNData, maxTxnIdData, allocationData);
} catch (IllegalArgumentException iae) {
throw new UnexpectedException("Invalid log " + logName, iae);
} catch (NullPointerException npe) {
throw new UnexpectedException("Invalid log " + logName, npe);
}
}
use of com.twitter.distributedlog.exceptions.UnexpectedException in project distributedlog by twitter.
the class LogSegmentCache method getLogSegments.
/**
* Retrieve log segments from the cache.
*
* - first sort the log segments in ascending order
* - do validation and assign corresponding sequence id
* - apply comparator after validation
*
* @param comparator
* comparator to sort the returned log segments.
* @return list of sorted and filtered log segments.
* @throws UnexpectedException if unexpected condition detected (e.g. ledger sequence number gap)
*/
public List<LogSegmentMetadata> getLogSegments(Comparator<LogSegmentMetadata> comparator) throws UnexpectedException {
List<LogSegmentMetadata> segmentsToReturn;
synchronized (logSegments) {
segmentsToReturn = new ArrayList<LogSegmentMetadata>(logSegments.size());
segmentsToReturn.addAll(logSegments.values());
}
Collections.sort(segmentsToReturn, LogSegmentMetadata.COMPARATOR);
long startSequenceId = DistributedLogConstants.UNASSIGNED_SEQUENCE_ID;
LogSegmentMetadata prevSegment = null;
for (int i = 0; i < segmentsToReturn.size(); i++) {
LogSegmentMetadata segment = segmentsToReturn.get(i);
// states (inprogress vs completed). it could happen during completing log segment without transaction
if (null != prevSegment && prevSegment.getVersion() >= LogSegmentMetadata.LogSegmentMetadataVersion.VERSION_V2_LEDGER_SEQNO.value && segment.getVersion() >= LogSegmentMetadata.LogSegmentMetadataVersion.VERSION_V2_LEDGER_SEQNO.value && prevSegment.getLogSegmentSequenceNumber() != segment.getLogSegmentSequenceNumber() && prevSegment.getLogSegmentSequenceNumber() + 1 != segment.getLogSegmentSequenceNumber()) {
LOG.error("{} found ledger sequence number gap between log segment {} and {}", new Object[] { streamName, prevSegment, segment });
throw new UnexpectedException(streamName + " found ledger sequence number gap between log segment " + prevSegment.getLogSegmentSequenceNumber() + " and " + segment.getLogSegmentSequenceNumber());
}
// assign sequence id
if (!segment.isInProgress()) {
if (segment.supportsSequenceId()) {
startSequenceId = segment.getStartSequenceId() + segment.getRecordCount();
if (null != prevSegment && prevSegment.supportsSequenceId() && prevSegment.getStartSequenceId() > segment.getStartSequenceId()) {
LOG.warn("{} found decreasing start sequence id in log segment {}, previous is {}", new Object[] { streamName, segment, prevSegment });
}
} else {
startSequenceId = DistributedLogConstants.UNASSIGNED_SEQUENCE_ID;
}
} else {
if (segment.supportsSequenceId()) {
LogSegmentMetadata newSegment = segment.mutator().setStartSequenceId(startSequenceId == DistributedLogConstants.UNASSIGNED_SEQUENCE_ID ? 0L : startSequenceId).build();
segmentsToReturn.set(i, newSegment);
}
break;
}
prevSegment = segment;
}
if (comparator != LogSegmentMetadata.COMPARATOR) {
Collections.sort(segmentsToReturn, comparator);
}
return segmentsToReturn;
}
use of com.twitter.distributedlog.exceptions.UnexpectedException in project distributedlog by twitter.
the class StreamManagerImpl method doDeleteAndRemoveAsync.
private Future<Void> doDeleteAndRemoveAsync(final String streamName) {
Stream stream = streams.get(streamName);
if (null == stream) {
logger.warn("No stream {} to delete.", streamName);
return Future.exception(new UnexpectedException("No stream " + streamName + " to delete."));
} else {
Future<Void> result;
logger.info("Deleting stream {}, {}", streamName, stream);
try {
stream.delete();
result = stream.requestClose("Stream Deleted");
} catch (IOException e) {
logger.error("Failed on removing stream {} : ", streamName, e);
result = Future.exception(e);
}
return result;
}
}
Aggregations