use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class WalPageCompressionIntegrationTest method doTestPageCompression.
/**
* @throws Exception If failed.
*/
@Override
protected void doTestPageCompression() throws Exception {
// Ignite instance with compressed WAL page records.
IgniteEx ignite0 = startGrid(0);
compression = DiskPageCompression.DISABLED;
compressionLevel = null;
// Reference ignite instance with uncompressed WAL page records.
IgniteEx ignite1 = startGrid(1);
ignite0.cluster().active(true);
ignite1.cluster().active(true);
String cacheName = "test";
CacheConfiguration<Integer, TestVal> ccfg = new CacheConfiguration<Integer, TestVal>().setName(cacheName).setBackups(0).setAtomicityMode(ATOMIC).setIndexedTypes(Integer.class, TestVal.class);
IgniteCache<Integer, TestVal> cache0 = ignite0.getOrCreateCache(ccfg);
IgniteCache<Integer, TestVal> cache1 = ignite1.getOrCreateCache(ccfg);
int cnt = 20_000;
for (int i = 0; i < cnt; i++) {
assertTrue(cache0.putIfAbsent(i, new TestVal(i)));
assertTrue(cache1.putIfAbsent(i, new TestVal(i)));
}
for (int i = 0; i < cnt; i += 2) {
assertEquals(new TestVal(i), cache0.getAndRemove(i));
assertEquals(new TestVal(i), cache1.getAndRemove(i));
}
// Write any WAL record to get current WAL pointers.
WALPointer ptr0 = ignite0.context().cache().context().wal().log(new CheckpointRecord(null));
WALPointer ptr1 = ignite1.context().cache().context().wal().log(new CheckpointRecord(null));
log.info("Compressed WAL pointer: " + ptr0);
log.info("Uncompressed WAL pointer: " + ptr1);
assertTrue("Compressed WAL must be smaller than uncompressed [ptr0=" + ptr0 + ", ptr1=" + ptr1 + ']', ptr0.compareTo(ptr1) < 0);
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class FileWriteHandleImpl method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws IgniteCheckedException If failed.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws IgniteCheckedException, StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null);
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxWalSegmentSize) {
segmentRecord.size(switchSegmentRecSize);
WALPointer segRecPtr = addRecord(segmentRecord);
if (segRecPtr != null) {
fsync(segRecPtr);
switchSegmentRecordOffset = segRecPtr.fileOffset() + switchSegmentRecSize;
} else {
if (log.isDebugEnabled())
log.debug("Not enough space in wal segment to write segment switch");
}
} else {
if (log.isDebugEnabled()) {
log.debug("Not enough space in wal segment to write segment switch, written=" + written + ", switchSegmentRecSize=" + switchSegmentRecSize);
}
}
// Unconditional flush (tail of the buffer)
flushOrWait(null);
if (mmap) {
List<SegmentedRingByteBuffer.ReadSegment> segs = buf.poll(maxWalSegmentSize);
if (segs != null) {
assert segs.size() == 1;
segs.get(0).release();
}
}
// Do the final fsync.
if (mode != WALMode.NONE) {
if (mmap)
((MappedByteBuffer) buf.buf).force();
else
fileIO.force();
lastFsyncPos = written;
}
if (mmap) {
try {
fileIO.close();
} catch (IOException ignore) {
// No-op.
}
} else {
walWriter.close();
if (!rollOver)
buf.free();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
if (mmap)
buf.free();
lock.unlock();
}
} else
return false;
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class FsyncFileWriteHandle method addRecord.
/**
* {@inheritDoc}
*/
@Nullable
@Override
public WALPointer addRecord(WALRecord rec) throws StorageException {
assert rec.size() > 0 || rec.getClass() == FakeRecord.class;
boolean flushed = false;
for (; ; ) {
WALRecord h = head.get();
long nextPos = nextPosition(h);
if (nextPos + rec.size() >= maxSegmentSize || stopped(h)) {
// Can not write to this segment, need to switch to the next one.
return null;
}
int newChainSize = h.chainSize() + rec.size();
if (newChainSize > tlbSize && !flushed) {
boolean res = h.previous() == null || flush(h, false);
if (rec.size() > tlbSize)
flushed = res;
continue;
}
rec.chainSize(newChainSize);
rec.previous(h);
WALPointer ptr = new WALPointer(getSegmentId(), (int) nextPos, rec.size());
rec.position(ptr);
if (head.compareAndSet(h, rec))
return ptr;
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class FsyncFileWriteHandle method writeHeader.
/**
* Write serializer version to current handle. NOTE: Method mutates {@code fileIO} position, written and
* lastFsyncPos fields.
*
* @throws StorageException If fail to write serializer version.
*/
@Override
public void writeHeader() throws StorageException {
try {
assert fileIO.position() == 0 : "Serializer version can be written only at the begin of file " + fileIO.position();
long updatedPosition = writeSerializerVersion(fileIO, getSegmentId(), serializer.version(), mode);
written = updatedPosition;
lastFsyncPos = updatedPosition;
head.set(new FakeRecord(new WALPointer(getSegmentId(), (int) updatedPosition, 0), false));
} catch (IOException e) {
throw new StorageException("Unable to write serializer version for segment " + getSegmentId(), e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class FsyncFileWriteHandle method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null, true);
assert stopped() : "Segment is not closed after close flush: " + head.get();
try {
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVersion);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxSegmentSize) {
final ByteBuffer buf = ByteBuffer.allocate(switchSegmentRecSize);
segmentRecord.position(new WALPointer(getSegmentId(), (int) written, switchSegmentRecSize));
backwardSerializer.writeRecord(segmentRecord, buf);
buf.rewind();
written += fileIO.writeFully(buf, written);
switchSegmentRecordOffset = (int) written;
}
} catch (IgniteCheckedException e) {
throw new IOException(e);
} finally {
assert mode == WALMode.FSYNC;
// Do the final fsync.
fileIO.force();
lastFsyncPos = written;
fileIO.close();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
lock.unlock();
}
} else
return false;
}
Aggregations