use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FileWriteAheadLogManager method initNextWriteHandle.
/**
* Fills the file header for a new segment. Calling this method signals we are done with the segment and it can be
* archived. If we don't have prepared file yet and achiever is busy this method blocks.
*
* @param cur Current file write handle released by WAL writer.
* @return Initialized file handle.
* @throws IgniteCheckedException If exception occurred.
*/
private FileWriteHandle initNextWriteHandle(FileWriteHandle cur) throws IgniteCheckedException {
IgniteCheckedException error = null;
try {
File nextFile = pollNextFile(cur.getSegmentId());
if (log.isDebugEnabled())
log.debug("Switching to a new WAL segment: " + nextFile.getAbsolutePath());
SegmentIO fileIO = null;
FileWriteHandle hnd;
boolean interrupted = false;
if (switchSegmentRecordOffset != null)
switchSegmentRecordOffset.set((int) ((cur.getSegmentId() + 1) % dsCfg.getWalSegments()), 0);
while (true) {
try {
fileIO = new SegmentIO(cur.getSegmentId() + 1, ioFactory.create(nextFile));
IgniteInClosure<FileIO> lsnr = createWalFileListener;
if (lsnr != null)
lsnr.apply(fileIO);
hnd = fileHandleManager.nextHandle(fileIO, serializer);
if (interrupted)
Thread.currentThread().interrupt();
break;
} catch (ClosedByInterruptException ignore) {
interrupted = true;
Thread.interrupted();
if (fileIO != null) {
try {
fileIO.close();
} catch (IOException ignored) {
// No-op.
}
fileIO = null;
}
}
}
hnd.writeHeader();
return hnd;
} catch (IgniteCheckedException e) {
throw error = e;
} catch (IOException e) {
throw error = new StorageException("Unable to initialize WAL segment", e);
} finally {
if (error != null)
cctx.kernalContext().failure().process(new FailureContext(CRITICAL_ERROR, error));
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FileWriteAheadLogManager method restoreWriteHandle.
/**
* @param lastReadPtr Last read WAL file pointer.
* @return Initialized file write handle.
* @throws StorageException If failed to initialize WAL write handle.
*/
private FileWriteHandle restoreWriteHandle(@Nullable WALPointer lastReadPtr) throws StorageException {
long absIdx = lastReadPtr == null ? 0 : lastReadPtr.index();
@Nullable FileArchiver archiver0 = archiver;
long segNo = archiver0 == null ? absIdx : absIdx % dsCfg.getWalSegments();
File curFile = new File(walWorkDir, fileName(segNo));
int off = lastReadPtr == null ? 0 : lastReadPtr.fileOffset();
int len = lastReadPtr == null ? 0 : lastReadPtr.length();
try {
SegmentIO fileIO = new SegmentIO(absIdx, ioFactory.create(curFile));
IgniteInClosure<FileIO> lsnr = createWalFileListener;
if (lsnr != null)
lsnr.apply(fileIO);
try {
int serVer = serializerVer;
// If we have existing segment, try to read version from it.
if (lastReadPtr != null) {
try {
serVer = readSegmentHeader(fileIO, segmentFileInputFactory).getSerializerVersion();
} catch (SegmentEofException | EOFException ignore) {
serVer = serializerVer;
}
}
RecordSerializer ser = new RecordSerializerFactoryImpl(cctx).createSerializer(serVer);
if (log.isInfoEnabled()) {
log.info("Resuming logging to WAL segment [file=" + curFile.getAbsolutePath() + ", offset=" + off + ", ver=" + serVer + ']');
}
FileWriteHandle hnd = fileHandleManager.initHandle(fileIO, off + len, ser);
segmentAware.curAbsWalIdx(absIdx);
FileDescriptor[] walArchiveFiles = walArchiveFiles();
segmentAware.minReserveIndex(F.isEmpty(walArchiveFiles) ? -1 : walArchiveFiles[0].idx - 1);
segmentAware.lastTruncatedArchiveIdx(F.isEmpty(walArchiveFiles) ? -1 : walArchiveFiles[0].idx - 1);
if (archiver0 == null)
segmentAware.setLastArchivedAbsoluteIndex(absIdx - 1);
// Getting segment sizes.
F.asList(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER)).stream().map(FileDescriptor::new).forEach(fd -> {
if (fd.isCompressed())
segmentSize.put(fd.idx(), fd.file().length());
else
segmentSize.putIfAbsent(fd.idx(), fd.file().length());
});
// Size of the 8th segment will be set in #resumeLogging.
if (archiver0 != null) {
for (long i = absIdx - (absIdx % dsCfg.getWalSegments()); i < absIdx; i++) segmentSize.putIfAbsent(i, maxWalSegmentSize);
}
return hnd;
} catch (IgniteCheckedException | IOException e) {
try {
fileIO.close();
} catch (IOException suppressed) {
e.addSuppressed(suppressed);
}
if (e instanceof StorageException)
throw (StorageException) e;
throw e instanceof IOException ? (IOException) e : new IOException(e);
}
} catch (IOException e) {
throw new StorageException("Failed to restore WAL write handle: " + curFile.getAbsolutePath(), e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FsyncFileWriteHandle method fsync.
/**
* @param ptr Pointer to sync.
* @throws StorageException If failed.
* @throws IgniteInterruptedCheckedException If interrupted.
*/
protected void fsync(WALPointer ptr, boolean stop) throws StorageException, IgniteInterruptedCheckedException {
lock.lock();
try {
if (ptr != null) {
if (!needFsync(ptr))
return;
if (fsyncDelay > 0 && !stopped()) {
// Delay fsync to collect as many updates as possible: trade latency for throughput.
U.await(fsync, fsyncDelay, TimeUnit.NANOSECONDS);
if (!needFsync(ptr))
return;
}
}
flushOrWait(ptr, stop);
if (stopped())
return;
if (lastFsyncPos != written) {
// Fsync position must be behind.
assert lastFsyncPos < written;
boolean metricsEnabled = metrics.metricsEnabled();
long start = metricsEnabled ? System.nanoTime() : 0;
try {
fileIO.force();
} catch (IOException e) {
throw new StorageException(e);
}
lastFsyncPos = written;
if (fsyncDelay > 0)
fsync.signalAll();
long end = metricsEnabled ? System.nanoTime() : 0;
if (metricsEnabled)
metrics.onFsync(end - start);
}
} finally {
lock.unlock();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FsyncFileWriteHandle method flush.
/**
* @param expHead Expected head of chain. If head was changed, flush is not performed in this thread
* @throws StorageException If failed.
*/
private boolean flush(WALRecord expHead, boolean stop) throws StorageException {
if (expHead.previous() == null) {
FakeRecord frHead = (FakeRecord) expHead;
if (// Protects from CASing terminal FakeRecord(true) to FakeRecord(false)
!stop || frHead.stop)
return false;
}
// Fail-fast before CAS.
checkNode();
if (!head.compareAndSet(expHead, new FakeRecord(new WALPointer(getSegmentId(), (int) nextPosition(expHead), 0), stop)))
return false;
if (expHead.chainSize() == 0)
return false;
// Any failure in this code must invalidate the environment.
try {
// We can safely allow other threads to start building next chains while we are doing flush here.
ByteBuffer buf;
boolean tmpBuf = false;
if (expHead.chainSize() > tlbSize) {
buf = GridUnsafe.allocateBuffer(expHead.chainSize());
// We need to manually release this temporary direct buffer.
tmpBuf = true;
} else
buf = tlb.get();
try {
long pos = fillBuffer(buf, expHead);
writeBuffer(pos, buf);
} finally {
if (tmpBuf)
GridUnsafe.freeBuffer(buf);
}
return true;
} catch (Throwable e) {
StorageException se = e instanceof StorageException ? (StorageException) e : new StorageException("Unable to write", new IOException(e));
cctx.kernalContext().failure().process(new FailureContext(CRITICAL_ERROR, se));
// All workers waiting for a next segment must be woken up and stopped
signalNextAvailable();
throw se;
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class GridDhtAtomicCache method processDhtAtomicUpdateRequest.
/**
* @param nodeId Sender node ID.
* @param req Dht atomic update request.
*/
private void processDhtAtomicUpdateRequest(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
assert Thread.currentThread().getName().startsWith("sys-stripe-") : Thread.currentThread().getName();
if (msgLog.isDebugEnabled()) {
msgLog.debug("Received DHT atomic update request [futId=" + req.futureId() + ", writeVer=" + req.writeVersion() + ", node=" + nodeId + ']');
}
assert req.partition() >= 0 : req;
GridCacheVersion ver = req.writeVersion();
ctx.versions().onReceived(nodeId, ver);
GridDhtAtomicNearResponse nearRes = null;
if (req.nearNodeId() != null) {
nearRes = new GridDhtAtomicNearResponse(ctx.cacheId(), req.partition(), req.nearFutureId(), nodeId, req.flags());
}
boolean replicate = ctx.isDrEnabled();
boolean intercept = req.forceTransformBackups() && ctx.config().getInterceptor() != null;
boolean needTaskName = ctx.events().isRecordable(EVT_CACHE_OBJECT_READ) || ctx.events().isRecordable(EVT_CACHE_OBJECT_PUT) || ctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED);
String taskName = needTaskName ? ctx.kernalContext().task().resolveTaskName(req.taskNameHash()) : null;
ctx.shared().database().checkpointReadLock();
try {
for (int i = 0; i < req.size(); i++) {
KeyCacheObject key = req.key(i);
try {
while (true) {
GridDhtCacheEntry entry = null;
try {
entry = entryExx(key);
CacheObject val = req.value(i);
CacheObject prevVal = req.previousValue(i);
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
Long updateIdx = req.updateCounter(i);
GridCacheOperation op = entryProcessor != null ? TRANSFORM : (val != null) ? UPDATE : DELETE;
long ttl = req.ttl(i);
long expireTime = req.conflictExpireTime(i);
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, op == TRANSFORM ? entryProcessor : val, op == TRANSFORM ? req.invokeArguments() : null, /*write-through*/
(ctx.store().isLocal() && !ctx.shared().localStorePrimaryOnly()) && writeThrough() && !req.skipStore(), /*read-through*/
false, /*retval*/
false, req.keepBinary(), /*expiry policy*/
null, /*event*/
true, /*metrics*/
true, /*primary*/
false, /*check version*/
!req.forceTransformBackups(), req.topologyVersion(), CU.empty0(), replicate ? DR_BACKUP : DR_NONE, ttl, expireTime, req.conflictVersion(i), false, intercept, taskName, prevVal, updateIdx, null, req.transformOperation());
if (updRes.removeVersion() != null)
ctx.onDeferredDelete(entry, updRes.removeVersion());
entry.onUnlock();
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating backup value (will retry): " + key);
entry = null;
} finally {
if (entry != null)
entry.touch();
}
}
} catch (NodeStoppingException e) {
U.warn(log, "Failed to update key on backup (local node is stopping): " + key);
return;
} catch (GridDhtInvalidPartitionException ignored) {
// Ignore.
} catch (IgniteCheckedException | RuntimeException e) {
if (e instanceof RuntimeException && !X.hasCause(e, IgniteOutOfMemoryException.class))
throw (RuntimeException) e;
IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e);
if (nearRes != null)
nearRes.addFailedKey(key, err);
U.error(log, "Failed to update key on backup node: " + key, e);
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
GridDhtAtomicUpdateResponse dhtRes = null;
if (req.nearSize() > 0 || req.obsoleteNearKeysSize() > 0) {
List<KeyCacheObject> nearEvicted = null;
if (isNearEnabled(ctx))
nearEvicted = ((GridNearAtomicCache<K, V>) near()).processDhtAtomicUpdateRequest(nodeId, req, nearRes);
else if (req.nearSize() > 0) {
nearEvicted = new ArrayList<>(req.nearSize());
for (int i = 0; i < req.nearSize(); i++) nearEvicted.add(req.nearKey(i));
}
if (nearEvicted != null) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
dhtRes.nearEvicted(nearEvicted);
}
}
try {
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().flush(null, false);
} catch (StorageException e) {
if (dhtRes != null)
dhtRes.onError(new IgniteCheckedException(e));
if (nearRes != null)
nearRes.onClassError(e);
} catch (IgniteCheckedException e) {
if (dhtRes != null)
dhtRes.onError(e);
if (nearRes != null)
nearRes.onClassError(e);
}
if (nearRes != null)
sendDhtNearResponse(req, nearRes);
if (dhtRes == null && req.replyWithoutDelay()) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
}
if (dhtRes != null)
sendDhtPrimaryResponse(nodeId, req, dhtRes);
else
sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId());
}
Aggregations