use of org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkInvariantSwitchSegmentSize.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkInvariantSwitchSegmentSize(int serVer) throws Exception {
GridKernalContext kctx = new StandaloneGridKernalContext(log, null, null) {
@Override
public IgniteCacheObjectProcessor cacheObjects() {
return new CacheObjectBinaryProcessorImpl(this);
}
};
RecordSerializer serializer = new RecordSerializerFactoryImpl(new GridCacheSharedContext<>(kctx, null, null, null, null, null, null, new IgniteCacheDatabaseSharedManager() {
@Override
public int pageSize() {
return DataStorageConfiguration.DFLT_PAGE_SIZE;
}
}, null, null, null, null, null, null, null, null, null, null, null, null, null)).createSerializer(serVer);
SwitchSegmentRecord switchSegmentRecord = new SwitchSegmentRecord();
int recordSize = serializer.size(switchSegmentRecord);
Assert.assertEquals(1, recordSize);
}
use of org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord in project ignite by apache.
the class FileWriteHandleImpl method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws IgniteCheckedException If failed.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws IgniteCheckedException, StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null);
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxWalSegmentSize) {
segmentRecord.size(switchSegmentRecSize);
WALPointer segRecPtr = addRecord(segmentRecord);
if (segRecPtr != null) {
fsync(segRecPtr);
switchSegmentRecordOffset = segRecPtr.fileOffset() + switchSegmentRecSize;
} else {
if (log.isDebugEnabled())
log.debug("Not enough space in wal segment to write segment switch");
}
} else {
if (log.isDebugEnabled()) {
log.debug("Not enough space in wal segment to write segment switch, written=" + written + ", switchSegmentRecSize=" + switchSegmentRecSize);
}
}
// Unconditional flush (tail of the buffer)
flushOrWait(null);
if (mmap) {
List<SegmentedRingByteBuffer.ReadSegment> segs = buf.poll(maxWalSegmentSize);
if (segs != null) {
assert segs.size() == 1;
segs.get(0).release();
}
}
// Do the final fsync.
if (mode != WALMode.NONE) {
if (mmap)
((MappedByteBuffer) buf.buf).force();
else
fileIO.force();
lastFsyncPos = written;
}
if (mmap) {
try {
fileIO.close();
} catch (IOException ignore) {
// No-op.
}
} else {
walWriter.close();
if (!rollOver)
buf.free();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
if (mmap)
buf.free();
lock.unlock();
}
} else
return false;
}
use of org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord in project ignite by apache.
the class FsyncFileWriteHandle method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null, true);
assert stopped() : "Segment is not closed after close flush: " + head.get();
try {
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVersion);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxSegmentSize) {
final ByteBuffer buf = ByteBuffer.allocate(switchSegmentRecSize);
segmentRecord.position(new WALPointer(getSegmentId(), (int) written, switchSegmentRecSize));
backwardSerializer.writeRecord(segmentRecord, buf);
buf.rewind();
written += fileIO.writeFully(buf, written);
switchSegmentRecordOffset = (int) written;
}
} catch (IgniteCheckedException e) {
throw new IOException(e);
} finally {
assert mode == WALMode.FSYNC;
// Do the final fsync.
fileIO.force();
lastFsyncPos = written;
fileIO.close();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
lock.unlock();
}
} else
return false;
}
use of org.apache.ignite.internal.pagemem.wal.record.SwitchSegmentRecord in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkInvariantSwitchSegment.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkInvariantSwitchSegment(int serVer) throws Exception {
String workDir = U.defaultWorkDirectory();
T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
IgniteWriteAheadLogManager walMgr = initTup.get1();
RecordSerializer recordSerializer = initTup.get2();
int switchSegmentRecordSize = recordSerializer.size(new SwitchSegmentRecord());
log.info("switchSegmentRecordSize:" + switchSegmentRecordSize);
int tailSize = 0;
/* Initial record payload size. */
int payloadSize = 1024;
int recSize = 0;
MetastoreDataRecord rec = null;
/* Record size. */
int recordTypeSize = 1;
/* Record pointer. */
int recordPointerSize = 8 + 4 + 4;
int lowBound = recordTypeSize + recordPointerSize;
int highBound = lowBound + /*CRC*/
4;
int attempt = 1000;
// Try find how many record need for specific tail size.
while (true) {
if (attempt < 0)
throw new IgniteCheckedException("Can not find any payload size for test, " + "lowBound=" + lowBound + ", highBound=" + highBound);
if (tailSize >= lowBound && tailSize < highBound)
break;
payloadSize++;
byte[] payload = new byte[payloadSize];
// Fake record for payload.
rec = new MetastoreDataRecord("0", payload);
recSize = recordSerializer.size(rec);
tailSize = (SEGMENT_SIZE - HEADER_RECORD_SIZE) % recSize;
attempt--;
}
Assert.assertNotNull(rec);
int recordsToWrite = SEGMENT_SIZE / recSize;
log.info("records to write " + recordsToWrite + " tail size " + (SEGMENT_SIZE - HEADER_RECORD_SIZE) % recSize);
// Add more record for rollover to the next segment.
recordsToWrite += 100;
for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
walMgr.flush(null, true);
SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
// Await archiver move segment to WAL archive.
waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5_000);
// Filling tail some garbage. Simulate tail garbage on rotate segment in WAL work directory.
if (switchSegmentRecordSize > 1) {
File seg = new File(workDir + ARCHIVE_SUB_DIR + "/0000000000000000.wal");
FileIOFactory ioFactory = new RandomAccessFileIOFactory();
FileIO seg0 = ioFactory.create(seg);
byte[] bytes = new byte[tailSize];
Random rnd = new Random();
rnd.nextBytes(bytes);
// Some record type.
bytes[0] = (byte) (METASTORE_DATA_RECORD.ordinal() + 1);
seg0.position((int) (seg0.size() - tailSize));
seg0.write(bytes, 0, tailSize);
seg0.force(true);
seg0.close();
}
int expRecords = recordsToWrite;
int actualRecords = 0;
// Check that switch segment works as expected and all record is reachable.
try (WALIterator it = walMgr.replay(null)) {
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec0 = tup.get2();
if (rec0.type() == METASTORE_DATA_RECORD)
actualRecords++;
}
}
Assert.assertEquals("Not all records read during iteration.", expRecords, actualRecords);
}
Aggregations