use of org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer in project ignite by apache.
the class FsyncFileWriteHandle method close.
/**
* @return {@code true} If this thread actually closed the segment.
* @throws StorageException If failed.
*/
@Override
public boolean close(boolean rollOver) throws StorageException {
if (stop.compareAndSet(false, true)) {
lock.lock();
try {
flushOrWait(null, true);
assert stopped() : "Segment is not closed after close flush: " + head.get();
try {
try {
RecordSerializer backwardSerializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVersion);
SwitchSegmentRecord segmentRecord = new SwitchSegmentRecord();
int switchSegmentRecSize = backwardSerializer.size(segmentRecord);
if (rollOver && written + switchSegmentRecSize < maxSegmentSize) {
final ByteBuffer buf = ByteBuffer.allocate(switchSegmentRecSize);
segmentRecord.position(new WALPointer(getSegmentId(), (int) written, switchSegmentRecSize));
backwardSerializer.writeRecord(segmentRecord, buf);
buf.rewind();
written += fileIO.writeFully(buf, written);
switchSegmentRecordOffset = (int) written;
}
} catch (IgniteCheckedException e) {
throw new IOException(e);
} finally {
assert mode == WALMode.FSYNC;
// Do the final fsync.
fileIO.force();
lastFsyncPos = written;
fileIO.close();
}
} catch (IOException e) {
throw new StorageException("Failed to close WAL write handle [idx=" + getSegmentId() + "]", e);
}
if (log.isDebugEnabled())
log.debug("Closed WAL write handle [idx=" + getSegmentId() + "]");
return true;
} finally {
lock.unlock();
}
} else
return false;
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method initiate.
/**
* Initiate WAL manager.
*
* @param serVer WAL serializer version.
* @param workDir Work directory path.
* @return Tuple of WAL manager and WAL record serializer.
* @throws IgniteCheckedException If some think failed.
*/
private T2<IgniteWriteAheadLogManager, RecordSerializer> initiate(int serVer, String workDir) throws IgniteCheckedException {
GridKernalContext kctx = new StandaloneGridKernalContext(log, null, null) {
@Override
protected IgniteConfiguration prepareIgniteConfiguration() {
IgniteConfiguration cfg = super.prepareIgniteConfiguration();
cfg.setDataStorageConfiguration(new DataStorageConfiguration().setWalSegmentSize(SEGMENT_SIZE).setWalRecordIteratorBufferSize(SEGMENT_SIZE / 2).setWalMode(WALMode.FSYNC).setWalPath(workDir + WORK_SUB_DIR).setWalArchivePath(workDir + ARCHIVE_SUB_DIR).setFileIOFactory(new RandomAccessFileIOFactory()));
cfg.setEventStorageSpi(new NoopEventStorageSpi());
return cfg;
}
@Override
public GridInternalSubscriptionProcessor internalSubscriptionProcessor() {
return new GridInternalSubscriptionProcessor(this);
}
@Override
public GridEventStorageManager event() {
return new GridEventStorageManager(this);
}
};
IgniteWriteAheadLogManager walMgr = new FileWriteAheadLogManager(kctx);
GridTestUtils.setFieldValue(walMgr, "serializerVer", serVer);
GridCacheSharedContext<?, ?> ctx = new GridCacheSharedContext<>(kctx, null, null, null, null, walMgr, new WalStateManager(kctx), new GridCacheDatabaseSharedManager(kctx), null, null, null, null, null, new GridCacheIoManager(), null, null, null, null, null, null, null);
walMgr.start(ctx);
walMgr.onActivate(kctx);
walMgr.resumeLogging(null);
RecordSerializer recordSerializer = new RecordSerializerFactoryImpl(ctx).createSerializer(walMgr.serializerVersion());
return new T2<>(walMgr, recordSerializer);
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkSwitchReadingSegmentDuringIteration.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkSwitchReadingSegmentDuringIteration(int serVer) throws Exception {
String workDir = U.defaultWorkDirectory();
T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
IgniteWriteAheadLogManager walMgr = initTup.get1();
RecordSerializer recordSerializer = initTup.get2();
MetastoreDataRecord rec = new MetastoreDataRecord("0", new byte[100]);
int recSize = recordSerializer.size(rec);
// Add more record for rollover to the next segment.
int recordsToWrite = SEGMENT_SIZE / recSize + 100;
SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
// Guard from archiving before iterator would be created.
assertTrue(segmentAware.lock(0));
for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
walMgr.flush(null, true);
AtomicInteger actualRecords = new AtomicInteger(0);
AtomicReference<String> startedSegmentPath = new AtomicReference<>();
AtomicReference<String> finishedSegmentPath = new AtomicReference<>();
CountDownLatch startedIterLatch = new CountDownLatch(1);
CountDownLatch finishedArchivedLatch = new CountDownLatch(1);
IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
// Check that switch segment works as expected and all record is reachable.
try (WALIterator it = walMgr.replay(null)) {
Object handle = getFieldValueHierarchy(it, "currWalSegment");
FileInput in = getFieldValueHierarchy(handle, "in");
Object delegate = getFieldValueHierarchy(in.io(), "delegate");
Channel ch = getFieldValueHierarchy(delegate, "ch");
String path = getFieldValueHierarchy(ch, "path");
startedSegmentPath.set(path);
startedIterLatch.countDown();
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec0 = tup.get2();
if (rec0.type() == METASTORE_DATA_RECORD)
actualRecords.incrementAndGet();
finishedArchivedLatch.await();
}
in = getFieldValueHierarchy(handle, "in");
delegate = getFieldValueHierarchy(in.io(), "delegate");
ch = getFieldValueHierarchy(delegate, "ch");
path = getFieldValueHierarchy(ch, "path");
finishedSegmentPath.set(path);
}
return null;
});
startedIterLatch.await();
segmentAware.unlock(0);
waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5000);
finishedArchivedLatch.countDown();
fut.get();
// should started iteration from work directory but finish from archive directory.
assertEquals(workDir + WORK_SUB_DIR + File.separator + "0000000000000000.wal", startedSegmentPath.get());
assertEquals(workDir + ARCHIVE_SUB_DIR + File.separator + "0000000000000000.wal", finishedSegmentPath.get());
Assert.assertEquals("Not all records read during iteration.", recordsToWrite, actualRecords.get());
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer in project ignite by apache.
the class IgniteWalIteratorSwitchSegmentTest method checkInvariantSwitchSegment.
/**
* @param serVer WAL serializer version.
* @throws Exception If some thing failed.
*/
private void checkInvariantSwitchSegment(int serVer) throws Exception {
String workDir = U.defaultWorkDirectory();
T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
IgniteWriteAheadLogManager walMgr = initTup.get1();
RecordSerializer recordSerializer = initTup.get2();
int switchSegmentRecordSize = recordSerializer.size(new SwitchSegmentRecord());
log.info("switchSegmentRecordSize:" + switchSegmentRecordSize);
int tailSize = 0;
/* Initial record payload size. */
int payloadSize = 1024;
int recSize = 0;
MetastoreDataRecord rec = null;
/* Record size. */
int recordTypeSize = 1;
/* Record pointer. */
int recordPointerSize = 8 + 4 + 4;
int lowBound = recordTypeSize + recordPointerSize;
int highBound = lowBound + /*CRC*/
4;
int attempt = 1000;
// Try find how many record need for specific tail size.
while (true) {
if (attempt < 0)
throw new IgniteCheckedException("Can not find any payload size for test, " + "lowBound=" + lowBound + ", highBound=" + highBound);
if (tailSize >= lowBound && tailSize < highBound)
break;
payloadSize++;
byte[] payload = new byte[payloadSize];
// Fake record for payload.
rec = new MetastoreDataRecord("0", payload);
recSize = recordSerializer.size(rec);
tailSize = (SEGMENT_SIZE - HEADER_RECORD_SIZE) % recSize;
attempt--;
}
Assert.assertNotNull(rec);
int recordsToWrite = SEGMENT_SIZE / recSize;
log.info("records to write " + recordsToWrite + " tail size " + (SEGMENT_SIZE - HEADER_RECORD_SIZE) % recSize);
// Add more record for rollover to the next segment.
recordsToWrite += 100;
for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
walMgr.flush(null, true);
SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
// Await archiver move segment to WAL archive.
waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5_000);
// Filling tail some garbage. Simulate tail garbage on rotate segment in WAL work directory.
if (switchSegmentRecordSize > 1) {
File seg = new File(workDir + ARCHIVE_SUB_DIR + "/0000000000000000.wal");
FileIOFactory ioFactory = new RandomAccessFileIOFactory();
FileIO seg0 = ioFactory.create(seg);
byte[] bytes = new byte[tailSize];
Random rnd = new Random();
rnd.nextBytes(bytes);
// Some record type.
bytes[0] = (byte) (METASTORE_DATA_RECORD.ordinal() + 1);
seg0.position((int) (seg0.size() - tailSize));
seg0.write(bytes, 0, tailSize);
seg0.force(true);
seg0.close();
}
int expRecords = recordsToWrite;
int actualRecords = 0;
// Check that switch segment works as expected and all record is reachable.
try (WALIterator it = walMgr.replay(null)) {
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec0 = tup.get2();
if (rec0.type() == METASTORE_DATA_RECORD)
actualRecords++;
}
}
Assert.assertEquals("Not all records read during iteration.", expRecords, actualRecords);
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer in project ignite by apache.
the class IgniteWalSerializerVersionTest method testCheckDifferentSerializerVersions.
/**
* @throws Exception If failed.
*/
@Test
public void testCheckDifferentSerializerVersions() throws Exception {
System.setProperty(IGNITE_WAL_SERIALIZER_VERSION, "1");
IgniteEx ig0 = (IgniteEx) startGrid();
IgniteWriteAheadLogManager wal0 = ig0.context().cache().context().wal();
RecordSerializer ser0 = U.field(wal0, "serializer");
assertTrue(ser0 instanceof RecordV1Serializer);
stopGrid();
System.setProperty(IGNITE_WAL_SERIALIZER_VERSION, "2");
IgniteEx ig1 = (IgniteEx) startGrid();
IgniteWriteAheadLogManager wal1 = ig1.context().cache().context().wal();
RecordSerializer ser1 = U.field(wal1, "serializer");
assertTrue(ser1 instanceof RecordV2Serializer);
stopGrid();
System.setProperty(IGNITE_WAL_SERIALIZER_VERSION, "3");
GridTestUtils.assertThrowsAnyCause(log, new GPC<Void>() {
@Override
public Void call() throws Exception {
startGrid();
return null;
}
}, IgniteCheckedException.class, "Failed to create a serializer with the given version");
System.setProperty(IGNITE_WAL_SERIALIZER_VERSION, "1");
IgniteEx ig2 = (IgniteEx) startGrid();
IgniteWriteAheadLogManager wal2 = ig2.context().cache().context().wal();
RecordSerializer ser2 = U.field(wal2, "serializer");
assertTrue(ser2 instanceof RecordV1Serializer);
stopGrid();
}
Aggregations