use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FsyncFileWriteHandle method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null, true);
assert stopped() : "Segment is not closed after close flush: " + head.get();
try {
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVersion);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxSegmentSize) {
final ByteBuffer buf = ByteBuffer.allocate(switchSegmentRecSize);
segmentRecord.position(new WALPointer(getSegmentId(), (int) written, switchSegmentRecSize));
backwardSerializer.writeRecord(segmentRecord, buf);
buf.rewind();
written += fileIO.writeFully(buf, written);
switchSegmentRecordOffset = (int) written;
}
} catch (IgniteCheckedException e) {
throw new IOException(e);
} finally {
assert mode == WALMode.FSYNC;
// Do the final fsync.
fileIO.force();
lastFsyncPos = written;
fileIO.close();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
lock.unlock();
}
} else
return false;
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FsyncFileWriteHandle method writeBuffer.
/**
* @param pos Position in file to start write from. May be checked against actual position to wait previous writes
* to complete.
* @param buf Buffer to write to file.
* @throws StorageException If failed.
*/
@SuppressWarnings("TooBroadScope")
private void writeBuffer(long pos, ByteBuffer buf) throws StorageException {
boolean interrupted = false;
lock.lock();
try {
assert fileIO != null : "Writing to a closed segment.";
checkNode();
long lastLogged = U.currentTimeMillis();
long logBackoff = 2_000;
// If we were too fast, need to wait previous writes to complete.
while (written != pos) {
// No one can write further than we are now.
assert written < pos : "written = " + written + ", pos = " + pos;
// Permutation occurred between blocks write operations.
// Order of acquiring lock is not the same as order of write.
long now = U.currentTimeMillis();
if (now - lastLogged >= logBackoff) {
if (logBackoff < 60 * 60_000)
logBackoff *= 2;
U.warn(log, "Still waiting for a concurrent write to complete [written=" + written + ", pos=" + pos + ", lastFsyncPos=" + lastFsyncPos + ", stop=" + stop.get() + ", actualPos=" + safePosition() + ']');
lastLogged = now;
}
try {
writeComplete.await(2, TimeUnit.SECONDS);
} catch (InterruptedException ignore) {
interrupted = true;
}
checkNode();
}
// Do the write.
int size = buf.remaining();
assert size > 0 : size;
try {
assert written == fileIO.position();
fileIO.writeFully(buf);
written += size;
metrics.onWalBytesWritten(size);
assert written == fileIO.position();
} catch (IOException e) {
StorageException se = new StorageException("Unable to write", e);
cctx.kernalContext().failure().process(new FailureContext(CRITICAL_ERROR, se));
throw se;
}
} finally {
writeComplete.signalAll();
lock.unlock();
if (interrupted)
Thread.currentThread().interrupt();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FilePageStore method sync.
/**
* {@inheritDoc}
*/
@Override
public void sync() throws StorageException {
lock.writeLock().lock();
try {
init();
FileIO fileIO = this.fileIO;
if (fileIO != null)
fileIO.force();
} catch (IOException e) {
throw new StorageException("Failed to fsync partition file [file=" + getFileAbsolutePath() + ']', e);
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class FilePageStore method truncate.
/**
* {@inheritDoc}
*/
@Override
public void truncate(int tag) throws StorageException {
init();
Path filePath = pathProvider.apply();
lock.writeLock().lock();
try {
this.tag = tag;
fileIO.clear();
fileIO.close();
fileIO = null;
Files.delete(filePath);
fileExists = false;
} catch (IOException e) {
throw new StorageException("Failed to truncate partition file [file=" + filePath.toAbsolutePath() + "]", e);
} finally {
allocatedTracker.accept(-1L * allocated.getAndSet(0) / pageSize);
inited = false;
lock.writeLock().unlock();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.StorageException in project ignite by apache.
the class CheckpointMarkersStorage method writeCheckpointEntry.
/**
* @param entryBuf Buffer which would be written to disk.
* @param cp Prepared checkpoint entry.
* @param type Type of checkpoint marker.
* @param skipSync {@code true} if file sync should be skip after write.
* @throws StorageException if fail.
*/
private void writeCheckpointEntry(ByteBuffer entryBuf, CheckpointEntry cp, CheckpointEntryType type, boolean skipSync) throws StorageException {
String fileName = checkpointFileName(cp, type);
String tmpFileName = fileName + FilePageStoreManager.TMP_SUFFIX;
try {
try (FileIO io = ioFactory.create(Paths.get(cpDir.getAbsolutePath(), skipSync ? fileName : tmpFileName).toFile(), StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
io.writeFully(entryBuf);
entryBuf.clear();
if (!skipSync)
io.force(true);
}
if (!skipSync)
Files.move(Paths.get(cpDir.getAbsolutePath(), tmpFileName), Paths.get(cpDir.getAbsolutePath(), fileName));
} catch (IOException e) {
throw new StorageException("Failed to write checkpoint entry [ptr=" + cp.checkpointMark() + ", cpTs=" + cp.timestamp() + ", cpId=" + cp.checkpointId() + ", type=" + type + "]", e);
}
}
Aggregations