use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class GridCacheDatabaseSharedManager method applyLogicalUpdates.
/**
* @param status Last registered checkpoint status.
* @param restoreMeta Metastore restore phase if {@code true}.
* @throws IgniteCheckedException If failed to apply updates.
* @throws StorageException If IO exception occurred while reading write-ahead log.
*/
private RestoreLogicalState applyLogicalUpdates(CheckpointStatus status, IgnitePredicate<Integer> cacheGroupsPredicate, IgniteBiPredicate<WALRecord.RecordType, WALPointer> recordTypePredicate, boolean restoreMeta) throws IgniteCheckedException {
if (log.isInfoEnabled())
log.info("Applying lost " + (restoreMeta ? "metastore" : "cache") + " updates since last checkpoint record [lastMarked=" + status.startPtr + ", lastCheckpointId=" + status.cpStartId + ']');
if (!restoreMeta)
cctx.kernalContext().query().skipFieldLookup(true);
long start = U.currentTimeMillis();
AtomicReference<Throwable> applyError = new AtomicReference<>();
AtomicLong applied = new AtomicLong();
long lastArchivedSegment = cctx.wal().lastArchivedSegment();
StripedExecutor exec = cctx.kernalContext().pools().getStripedExecutorService();
Semaphore semaphore = new Semaphore(semaphorePertmits(exec));
Map<GroupPartitionId, Integer> partitionRecoveryStates = new HashMap<>();
WALIterator it = cctx.wal().replay(status.startPtr, recordTypePredicate);
RestoreLogicalState restoreLogicalState = new RestoreLogicalState(status, it, lastArchivedSegment, cacheGroupsPredicate, partitionRecoveryStates);
final IgniteTxManager txManager = cctx.tm();
try {
while (restoreLogicalState.hasNext()) {
WALRecord rec = restoreLogicalState.next();
if (rec == null)
break;
switch(rec.type()) {
case TX_RECORD:
if (restoreMeta) {
// Also restore tx states.
TxRecord txRec = (TxRecord) rec;
txManager.collectTxStates(txRec);
}
break;
case // Calculate initial partition states
CHECKPOINT_RECORD:
CheckpointRecord cpRec = (CheckpointRecord) rec;
for (Map.Entry<Integer, CacheState> entry : cpRec.cacheGroupStates().entrySet()) {
CacheState cacheState = entry.getValue();
for (int i = 0; i < cacheState.size(); i++) {
int partId = cacheState.partitionByIndex(i);
byte state = cacheState.stateByIndex(i);
// Ignore undefined state.
if (state != -1) {
partitionRecoveryStates.put(new GroupPartitionId(entry.getKey(), partId), (int) state);
}
}
}
break;
case ROLLBACK_TX_RECORD:
RollbackRecord rbRec = (RollbackRecord) rec;
CacheGroupContext ctx = cctx.cache().cacheGroup(rbRec.groupId());
if (ctx != null && !ctx.isLocal()) {
GridDhtLocalPartition part = ctx.topology().forceCreatePartition(rbRec.partitionId());
ctx.offheap().dataStore(part).updateInitialCounter(rbRec.start(), rbRec.range());
}
break;
case MVCC_DATA_RECORD:
case DATA_RECORD:
case DATA_RECORD_V2:
case ENCRYPTED_DATA_RECORD:
case ENCRYPTED_DATA_RECORD_V2:
case ENCRYPTED_DATA_RECORD_V3:
DataRecord dataRec = (DataRecord) rec;
int entryCnt = dataRec.entryCount();
for (int i = 0; i < entryCnt; i++) {
DataEntry dataEntry = dataRec.get(i);
if (!restoreMeta && txManager.uncommitedTx(dataEntry))
continue;
int cacheId = dataEntry.cacheId();
DynamicCacheDescriptor cacheDesc = cctx.cache().cacheDescriptor(cacheId);
// Can empty in case recovery node on blt changed.
if (cacheDesc == null)
continue;
stripedApply(() -> {
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
if (skipRemovedIndexUpdates(cacheCtx.groupId(), PageIdAllocator.INDEX_PARTITION))
cctx.kernalContext().query().markAsRebuildNeeded(cacheCtx, true);
try {
applyUpdate(cacheCtx, dataEntry);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to apply data entry, dataEntry=" + dataEntry + ", ptr=" + dataRec.position());
applyError.compareAndSet(null, e);
}
applied.incrementAndGet();
}, cacheDesc.groupId(), dataEntry.partitionId(), exec, semaphore);
}
break;
case MVCC_TX_RECORD:
MvccTxRecord txRecord = (MvccTxRecord) rec;
byte txState = convertToTxState(txRecord.state());
cctx.coordinators().updateState(txRecord.mvccVersion(), txState, true);
break;
case PART_META_UPDATE_STATE:
PartitionMetaStateRecord metaStateRecord = (PartitionMetaStateRecord) rec;
GroupPartitionId groupPartitionId = new GroupPartitionId(metaStateRecord.groupId(), metaStateRecord.partitionId());
restoreLogicalState.partitionRecoveryStates.put(groupPartitionId, (int) metaStateRecord.state());
break;
case METASTORE_DATA_RECORD:
MetastoreDataRecord metastoreDataRecord = (MetastoreDataRecord) rec;
metaStorage.applyUpdate(metastoreDataRecord.key(), metastoreDataRecord.value());
break;
case META_PAGE_UPDATE_NEXT_SNAPSHOT_ID:
case META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID:
case META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID:
case META_PAGE_UPDATE_LAST_ALLOCATED_INDEX:
PageDeltaRecord pageDelta = (PageDeltaRecord) rec;
stripedApplyPage((pageMem) -> {
try {
applyPageDelta(pageMem, pageDelta, false);
} catch (IgniteCheckedException e) {
U.error(log, "Failed to apply page delta, " + pageDelta);
applyError.compareAndSet(null, e);
}
}, pageDelta.groupId(), partId(pageDelta.pageId()), exec, semaphore);
break;
case MASTER_KEY_CHANGE_RECORD_V2:
cctx.kernalContext().encryption().applyKeys((MasterKeyChangeRecordV2) rec);
break;
case REENCRYPTION_START_RECORD:
cctx.kernalContext().encryption().applyReencryptionStartRecord((ReencryptionStartRecord) rec);
break;
case INDEX_ROOT_PAGE_RENAME_RECORD:
IndexRenameRootPageRecord record = (IndexRenameRootPageRecord) rec;
int cacheId = record.cacheId();
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
if (cacheCtx != null) {
IgniteCacheOffheapManager offheap = cacheCtx.offheap();
for (int i = 0; i < record.segments(); i++) offheap.renameRootPageForIndex(cacheId, record.oldTreeName(), record.newTreeName(), i);
}
break;
case PARTITION_CLEARING_START_RECORD:
PartitionClearingStartRecord rec0 = (PartitionClearingStartRecord) rec;
CacheGroupContext grp = this.ctx.cache().cacheGroup(rec0.groupId());
if (grp != null) {
GridDhtLocalPartition part;
try {
part = grp.topology().forceCreatePartition(rec0.partitionId());
} catch (IgniteCheckedException e) {
throw new IgniteException("Cannot get or create a partition [groupId=" + rec0.groupId() + ", partitionId=" + rec0.partitionId() + "]", e);
}
stripedApply(() -> {
try {
part.updateClearVersion(rec0.clearVersion());
IgniteInternalFuture<?> clearFut = grp.shared().evict().evictPartitionAsync(grp, part, new GridFutureAdapter<>());
clearFut.get();
part.updateClearVersion();
} catch (IgniteCheckedException e) {
U.error(log, "Failed to apply partition clearing record, " + rec0);
applyError.compareAndSet(null, e);
}
}, rec0.groupId(), rec0.partitionId(), exec, semaphore);
}
break;
default:
}
}
} finally {
it.close();
if (!restoreMeta)
cctx.kernalContext().query().skipFieldLookup(false);
}
awaitApplyComplete(exec, applyError);
if (log.isInfoEnabled())
log.info("Finished applying WAL changes [updatesApplied=" + applied + ", time=" + (U.currentTimeMillis() - start) + " ms]");
for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) lsnr.afterLogicalUpdatesApplied(this, restoreLogicalState);
return restoreLogicalState;
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class CdcCacheVersionTest method testConflictVersionWritten.
/**
* Test that conflict version is writtern to WAL.
*/
@Test
public void testConflictVersionWritten() throws Exception {
walProvider = (ctx) -> new FileWriteAheadLogManager(ctx) {
@Override
public WALPointer log(WALRecord rec) throws IgniteCheckedException {
if (rec.type() != DATA_RECORD_V2)
return super.log(rec);
DataRecord dataRec = (DataRecord) rec;
for (int i = 0; i < dataRec.entryCount(); i++) {
DataEntry dataEntry = dataRec.writeEntries().get(i);
assertEquals(CU.cacheId(DEFAULT_CACHE_NAME), dataEntry.cacheId());
assertEquals(DFLT_CLUSTER_ID, dataEntry.writeVersion().dataCenterId());
assertNotNull(dataEntry.writeVersion().conflictVersion());
assertEquals(OTHER_CLUSTER_ID, dataEntry.writeVersion().conflictVersion().dataCenterId());
walRecCheckedCntr.incrementAndGet();
}
return super.log(rec);
}
};
conflictResolutionMgrSupplier = () -> new CacheVersionConflictResolver() {
@Override
public <K1, V1> GridCacheVersionConflictContext<K1, V1> resolve(CacheObjectValueContext ctx, GridCacheVersionedEntryEx<K1, V1> oldEntry, GridCacheVersionedEntryEx<K1, V1> newEntry, boolean atomicVerComparator) {
GridCacheVersionConflictContext<K1, V1> res = new GridCacheVersionConflictContext<>(ctx, oldEntry, newEntry);
res.useNew();
assertEquals(OTHER_CLUSTER_ID, newEntry.version().dataCenterId());
if (!oldEntry.isStartVersion())
assertEquals(OTHER_CLUSTER_ID, oldEntry.version().dataCenterId());
conflictCheckedCntr.incrementAndGet();
return res;
}
@Override
public String toString() {
return "TestCacheConflictResolutionManager";
}
};
startGrids(gridCnt);
IgniteEx cli = startClientGrid(gridCnt);
for (int i = 0; i < gridCnt; i++) {
grid(i).context().cache().context().versions().dataCenterId(DFLT_CLUSTER_ID);
assertEquals(DFLT_CLUSTER_ID, grid(i).context().metric().registry(CACHE_METRICS).<IntMetric>findMetric(DATA_VER_CLUSTER_ID).value());
}
cli.cluster().state(ACTIVE);
IgniteCache<Integer, User> cache = cli.getOrCreateCache(new CacheConfiguration<Integer, User>(DEFAULT_CACHE_NAME).setCacheMode(cacheMode).setAtomicityMode(atomicityMode).setBackups(Integer.MAX_VALUE));
if (atomicityMode == ATOMIC)
putRemoveCheck(cli, cache, null, null);
else {
// Check operations for transaction cache without explicit transaction.
putRemoveCheck(cli, cache, null, null);
// Check operations for transaction cache with explicit transaction in all modes.
for (TransactionConcurrency concurrency : TransactionConcurrency.values()) for (TransactionIsolation isolation : TransactionIsolation.values()) putRemoveCheck(cli, cache, concurrency, isolation);
}
for (int i = 0; i < gridCnt; i++) {
boolean dfltCacheFound = false;
assertFalse(grid(i).context().clientNode());
SystemView<CacheView> caches = grid(i).context().systemView().view(CACHES_VIEW);
for (CacheView v : caches) {
if (v.cacheName().equals(DEFAULT_CACHE_NAME)) {
assertEquals(v.conflictResolver(), "TestCacheConflictResolutionManager");
dfltCacheFound = true;
} else
assertNull(v.conflictResolver());
}
assertTrue(dfltCacheFound);
}
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class RecordDataV1Serializer method readPlainDataEntry.
/**
* @param in Input to read from.
* @return Read entry.
*/
DataEntry readPlainDataEntry(ByteBufferBackedDataInput in, RecordType type) throws IOException, IgniteCheckedException {
int cacheId = in.readInt();
int keySize = in.readInt();
byte keyType = in.readByte();
byte[] keyBytes = new byte[keySize];
in.readFully(keyBytes);
int valSize = in.readInt();
byte valType = 0;
byte[] valBytes = null;
if (valSize >= 0) {
valType = in.readByte();
valBytes = new byte[valSize];
in.readFully(valBytes);
}
byte ord = in.readByte();
GridCacheOperation op = GridCacheOperation.fromOrdinal(ord & 0xFF);
GridCacheVersion nearXidVer = readVersion(in, true);
GridCacheVersion writeVer = readVersion(in, false);
int partId = in.readInt();
long partCntr = in.readLong();
long expireTime = in.readLong();
byte flags = type == DATA_RECORD_V2 ? in.readByte() : (byte) 0;
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
if (cacheCtx != null) {
CacheObjectContext coCtx = cacheCtx.cacheObjectContext();
KeyCacheObject key = co.toKeyCacheObject(coCtx, keyType, keyBytes);
if (key.partition() == -1)
key.partition(partId);
CacheObject val = valBytes != null ? co.toCacheObject(coCtx, valType, valBytes) : null;
return new DataEntry(cacheId, key, val, op, nearXidVer, writeVer, expireTime, partId, partCntr, flags);
} else
return new LazyDataEntry(cctx, cacheId, keyType, keyBytes, valType, valBytes, op, nearXidVer, writeVer, expireTime, partId, partCntr, flags);
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class RecordDataV1Serializer method readPlainRecord.
/**
* Reads {@code WalRecord} of {@code type} from input.
* Input should be plain(not encrypted).
*
* @param type Record type.
* @param in Input
* @param encrypted Record was encrypted.
* @param recordSize Record size.
* @return Deserialized record.
* @throws IOException If failed.
* @throws IgniteCheckedException If failed.
*/
WALRecord readPlainRecord(RecordType type, ByteBufferBackedDataInput in, boolean encrypted, int recordSize) throws IOException, IgniteCheckedException {
WALRecord res;
switch(type) {
case PAGE_RECORD:
byte[] arr = new byte[pageSize];
int cacheId = in.readInt();
long pageId = in.readLong();
in.readFully(arr);
res = new PageSnapshot(new FullPageId(pageId, cacheId), arr, encrypted ? realPageSize : pageSize);
break;
case CHECKPOINT_RECORD:
long msb = in.readLong();
long lsb = in.readLong();
boolean hasPtr = in.readByte() != 0;
long idx = hasPtr ? in.readLong() : 0;
int off = hasPtr ? in.readInt() : 0;
int len = hasPtr ? in.readInt() : 0;
Map<Integer, CacheState> states = readPartitionStates(in);
boolean end = in.readByte() != 0;
WALPointer walPtr = hasPtr ? new WALPointer(idx, off, len) : null;
CheckpointRecord cpRec = new CheckpointRecord(new UUID(msb, lsb), walPtr, end);
cpRec.cacheGroupStates(states);
res = cpRec;
break;
case META_PAGE_INIT:
cacheId = in.readInt();
pageId = in.readLong();
int ioType = in.readUnsignedShort();
int ioVer = in.readUnsignedShort();
long treeRoot = in.readLong();
long reuseListRoot = in.readLong();
res = new MetaPageInitRecord(cacheId, pageId, ioType, ioVer, treeRoot, reuseListRoot, log);
break;
case INDEX_META_PAGE_DELTA_RECORD:
res = new MetaPageUpdateIndexDataRecord(in);
break;
case PARTITION_META_PAGE_UPDATE_COUNTERS:
res = new MetaPageUpdatePartitionDataRecord(in);
break;
case PARTITION_META_PAGE_UPDATE_COUNTERS_V2:
res = new MetaPageUpdatePartitionDataRecordV2(in);
break;
case PARTITION_META_PAGE_DELTA_RECORD_V3:
res = new MetaPageUpdatePartitionDataRecordV3(in);
break;
case MEMORY_RECOVERY:
long ts = in.readLong();
res = new MemoryRecoveryRecord(ts);
break;
case PARTITION_DESTROY:
cacheId = in.readInt();
int partId = in.readInt();
res = new PartitionDestroyRecord(cacheId, partId);
break;
case DATA_RECORD:
case DATA_RECORD_V2:
int entryCnt = in.readInt();
if (entryCnt == 1)
res = new DataRecord(readPlainDataEntry(in, type), 0L);
else {
List<DataEntry> entries = new ArrayList<>(entryCnt);
for (int i = 0; i < entryCnt; i++) entries.add(readPlainDataEntry(in, type));
res = new DataRecord(entries, 0L);
}
break;
case ENCRYPTED_DATA_RECORD:
case ENCRYPTED_DATA_RECORD_V2:
case ENCRYPTED_DATA_RECORD_V3:
entryCnt = in.readInt();
if (entryCnt == 1)
res = new DataRecord(readEncryptedDataEntry(in, type), 0L);
else {
List<DataEntry> entries = new ArrayList<>(entryCnt);
for (int i = 0; i < entryCnt; i++) entries.add(readEncryptedDataEntry(in, type));
res = new DataRecord(entries, 0L);
}
break;
case METASTORE_DATA_RECORD:
int strLen = in.readInt();
byte[] strBytes = new byte[strLen];
in.readFully(strBytes);
String key = new String(strBytes);
int valLen = in.readInt();
assert valLen >= 0;
byte[] val;
if (valLen > 0) {
val = new byte[valLen];
in.readFully(val);
} else
val = null;
return new MetastoreDataRecord(key, val);
case HEADER_RECORD:
long magic = in.readLong();
if (magic != HeaderRecord.REGULAR_MAGIC && magic != HeaderRecord.COMPACTED_MAGIC)
throw new EOFException("Magic is corrupted [actual=" + U.hexLong(magic) + ']');
int ver = in.readInt();
res = new HeaderRecord(ver);
break;
case DATA_PAGE_INSERT_RECORD:
{
cacheId = in.readInt();
pageId = in.readLong();
int size = in.readUnsignedShort();
in.ensure(size);
byte[] payload = new byte[size];
in.readFully(payload);
res = new DataPageInsertRecord(cacheId, pageId, payload);
break;
}
case DATA_PAGE_UPDATE_RECORD:
{
cacheId = in.readInt();
pageId = in.readLong();
int itemId = in.readInt();
int size = in.readUnsignedShort();
in.ensure(size);
byte[] payload = new byte[size];
in.readFully(payload);
res = new DataPageUpdateRecord(cacheId, pageId, itemId, payload);
break;
}
case DATA_PAGE_INSERT_FRAGMENT_RECORD:
{
cacheId = in.readInt();
pageId = in.readLong();
final long lastLink = in.readLong();
final int payloadSize = in.readInt();
final byte[] payload = new byte[payloadSize];
in.readFully(payload);
res = new DataPageInsertFragmentRecord(cacheId, pageId, payload, lastLink);
break;
}
case DATA_PAGE_REMOVE_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
int itemId = in.readUnsignedByte();
res = new DataPageRemoveRecord(cacheId, pageId, itemId);
break;
case DATA_PAGE_SET_FREE_LIST_PAGE:
cacheId = in.readInt();
pageId = in.readLong();
long freeListPage = in.readLong();
res = new DataPageSetFreeListPageRecord(cacheId, pageId, freeListPage);
break;
case MVCC_DATA_PAGE_MARK_UPDATED_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
itemId = in.readInt();
long newMvccCrd = in.readLong();
long newMvccCntr = in.readLong();
int newMvccOpCntr = in.readInt();
res = new DataPageMvccMarkUpdatedRecord(cacheId, pageId, itemId, newMvccCrd, newMvccCntr, newMvccOpCntr);
break;
case MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
itemId = in.readInt();
byte txState = in.readByte();
res = new DataPageMvccUpdateTxStateHintRecord(cacheId, pageId, itemId, txState);
break;
case MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
itemId = in.readInt();
byte newTxState = in.readByte();
res = new DataPageMvccUpdateNewTxStateHintRecord(cacheId, pageId, itemId, newTxState);
break;
case INIT_NEW_PAGE_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
ioType = in.readUnsignedShort();
ioVer = in.readUnsignedShort();
long virtualPageId = in.readLong();
res = new InitNewPageRecord(cacheId, pageId, ioType, ioVer, virtualPageId, log);
break;
case BTREE_META_PAGE_INIT_ROOT:
cacheId = in.readInt();
pageId = in.readLong();
long rootId = in.readLong();
res = new MetaPageInitRootRecord(cacheId, pageId, rootId);
break;
case BTREE_META_PAGE_INIT_ROOT2:
cacheId = in.readInt();
pageId = in.readLong();
long rootId2 = in.readLong();
int inlineSize = in.readShort();
res = new MetaPageInitRootInlineRecord(cacheId, pageId, rootId2, inlineSize);
break;
case BTREE_META_PAGE_INIT_ROOT_V3:
cacheId = in.readInt();
pageId = in.readLong();
long rootId3 = in.readLong();
int inlineSize3 = in.readShort();
long flags = in.readLong();
byte[] revHash = new byte[IgniteProductVersion.REV_HASH_SIZE];
byte maj = in.readByte();
byte min = in.readByte();
byte maint = in.readByte();
long verTs = in.readLong();
in.readFully(revHash);
IgniteProductVersion createdVer = new IgniteProductVersion(maj, min, maint, verTs, revHash);
res = new MetaPageInitRootInlineFlagsCreatedVersionRecord(cacheId, pageId, rootId3, inlineSize3, flags, createdVer);
break;
case BTREE_META_PAGE_ADD_ROOT:
cacheId = in.readInt();
pageId = in.readLong();
rootId = in.readLong();
res = new MetaPageAddRootRecord(cacheId, pageId, rootId);
break;
case BTREE_META_PAGE_CUT_ROOT:
cacheId = in.readInt();
pageId = in.readLong();
res = new MetaPageCutRootRecord(cacheId, pageId);
break;
case BTREE_INIT_NEW_ROOT:
cacheId = in.readInt();
pageId = in.readLong();
rootId = in.readLong();
ioType = in.readUnsignedShort();
ioVer = in.readUnsignedShort();
long leftId = in.readLong();
long rightId = in.readLong();
BPlusIO<?> io = BPlusIO.getBPlusIO(ioType, ioVer);
byte[] rowBytes = new byte[io.getItemSize()];
in.readFully(rowBytes);
res = new NewRootInitRecord<>(cacheId, pageId, rootId, (BPlusInnerIO<?>) io, leftId, rowBytes, rightId);
break;
case BTREE_PAGE_RECYCLE:
cacheId = in.readInt();
pageId = in.readLong();
long newPageId = in.readLong();
res = new RecycleRecord(cacheId, pageId, newPageId);
break;
case BTREE_PAGE_INSERT:
cacheId = in.readInt();
pageId = in.readLong();
ioType = in.readUnsignedShort();
ioVer = in.readUnsignedShort();
int itemIdx = in.readUnsignedShort();
rightId = in.readLong();
io = BPlusIO.getBPlusIO(ioType, ioVer);
rowBytes = new byte[io.getItemSize()];
in.readFully(rowBytes);
res = new InsertRecord<>(cacheId, pageId, io, itemIdx, rowBytes, rightId);
break;
case BTREE_FIX_LEFTMOST_CHILD:
cacheId = in.readInt();
pageId = in.readLong();
rightId = in.readLong();
res = new FixLeftmostChildRecord(cacheId, pageId, rightId);
break;
case BTREE_FIX_COUNT:
cacheId = in.readInt();
pageId = in.readLong();
int cnt = in.readUnsignedShort();
res = new FixCountRecord(cacheId, pageId, cnt);
break;
case BTREE_PAGE_REPLACE:
cacheId = in.readInt();
pageId = in.readLong();
ioType = in.readUnsignedShort();
ioVer = in.readUnsignedShort();
itemIdx = in.readUnsignedShort();
io = BPlusIO.getBPlusIO(ioType, ioVer);
rowBytes = new byte[io.getItemSize()];
in.readFully(rowBytes);
res = new ReplaceRecord<>(cacheId, pageId, io, rowBytes, itemIdx);
break;
case BTREE_PAGE_REMOVE:
cacheId = in.readInt();
pageId = in.readLong();
itemIdx = in.readUnsignedShort();
cnt = in.readUnsignedShort();
res = new RemoveRecord(cacheId, pageId, itemIdx, cnt);
break;
case BTREE_PAGE_INNER_REPLACE:
cacheId = in.readInt();
pageId = in.readLong();
int dstIdx = in.readUnsignedShort();
long srcPageId = in.readLong();
int srcIdx = in.readUnsignedShort();
long rmvId = in.readLong();
res = new InnerReplaceRecord<>(cacheId, pageId, dstIdx, srcPageId, srcIdx, rmvId);
break;
case BTREE_FORWARD_PAGE_SPLIT:
cacheId = in.readInt();
pageId = in.readLong();
long fwdId = in.readLong();
ioType = in.readUnsignedShort();
ioVer = in.readUnsignedShort();
srcPageId = in.readLong();
int mid = in.readUnsignedShort();
cnt = in.readUnsignedShort();
res = new SplitForwardPageRecord(cacheId, pageId, fwdId, ioType, ioVer, srcPageId, mid, cnt);
break;
case BTREE_EXISTING_PAGE_SPLIT:
cacheId = in.readInt();
pageId = in.readLong();
mid = in.readUnsignedShort();
fwdId = in.readLong();
res = new SplitExistingPageRecord(cacheId, pageId, mid, fwdId);
break;
case BTREE_PAGE_MERGE:
cacheId = in.readInt();
pageId = in.readLong();
long prntId = in.readLong();
int prntIdx = in.readUnsignedShort();
rightId = in.readLong();
boolean emptyBranch = in.readBoolean();
res = new MergeRecord<>(cacheId, pageId, prntId, prntIdx, rightId, emptyBranch);
break;
case BTREE_FIX_REMOVE_ID:
cacheId = in.readInt();
pageId = in.readLong();
rmvId = in.readLong();
res = new FixRemoveId(cacheId, pageId, rmvId);
break;
case PAGES_LIST_SET_NEXT:
cacheId = in.readInt();
pageId = in.readLong();
long nextPageId = in.readLong();
res = new PagesListSetNextRecord(cacheId, pageId, nextPageId);
break;
case PAGES_LIST_SET_PREVIOUS:
cacheId = in.readInt();
pageId = in.readLong();
long prevPageId = in.readLong();
res = new PagesListSetPreviousRecord(cacheId, pageId, prevPageId);
break;
case PAGES_LIST_INIT_NEW_PAGE:
cacheId = in.readInt();
pageId = in.readLong();
ioType = in.readInt();
ioVer = in.readInt();
newPageId = in.readLong();
prevPageId = in.readLong();
long addDataPageId = in.readLong();
res = new PagesListInitNewPageRecord(cacheId, pageId, ioType, ioVer, newPageId, prevPageId, addDataPageId, log);
break;
case PAGES_LIST_ADD_PAGE:
cacheId = in.readInt();
pageId = in.readLong();
long dataPageId = in.readLong();
res = new PagesListAddPageRecord(cacheId, pageId, dataPageId);
break;
case PAGES_LIST_REMOVE_PAGE:
cacheId = in.readInt();
pageId = in.readLong();
long rmvdPageId = in.readLong();
res = new PagesListRemovePageRecord(cacheId, pageId, rmvdPageId);
break;
case TRACKING_PAGE_DELTA:
cacheId = in.readInt();
pageId = in.readLong();
long pageIdToMark = in.readLong();
long nextSnapshotId0 = in.readLong();
long lastSuccessfulSnapshotId0 = in.readLong();
res = new TrackingPageDeltaRecord(cacheId, pageId, pageIdToMark, nextSnapshotId0, lastSuccessfulSnapshotId0);
break;
case META_PAGE_UPDATE_NEXT_SNAPSHOT_ID:
cacheId = in.readInt();
pageId = in.readLong();
long nextSnapshotId = in.readLong();
res = new MetaPageUpdateNextSnapshotId(cacheId, pageId, nextSnapshotId);
break;
case META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID:
cacheId = in.readInt();
pageId = in.readLong();
long lastSuccessfulFullSnapshotId = in.readLong();
res = new MetaPageUpdateLastSuccessfulFullSnapshotId(cacheId, pageId, lastSuccessfulFullSnapshotId);
break;
case META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID:
cacheId = in.readInt();
pageId = in.readLong();
long lastSuccessfulSnapshotId = in.readLong();
long lastSuccessfulSnapshotTag = in.readLong();
res = new MetaPageUpdateLastSuccessfulSnapshotId(cacheId, pageId, lastSuccessfulSnapshotId, lastSuccessfulSnapshotTag);
break;
case META_PAGE_UPDATE_LAST_ALLOCATED_INDEX:
cacheId = in.readInt();
pageId = in.readLong();
int lastAllocatedIdx = in.readInt();
res = new MetaPageUpdateLastAllocatedIndex(cacheId, pageId, lastAllocatedIdx);
break;
case PART_META_UPDATE_STATE:
cacheId = in.readInt();
partId = in.readInt();
byte state = in.readByte();
long updateCntr = in.readLong();
GridDhtPartitionState partState = GridDhtPartitionState.fromOrdinal(state);
res = new PartitionMetaStateRecord(cacheId, partId, partState, updateCntr);
break;
case PAGE_LIST_META_RESET_COUNT_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
res = new PageListMetaResetCountRecord(cacheId, pageId);
break;
case ROTATED_ID_PART_RECORD:
cacheId = in.readInt();
pageId = in.readLong();
byte rotatedIdPart = in.readByte();
res = new RotatedIdPartRecord(cacheId, pageId, rotatedIdPart);
break;
case SWITCH_SEGMENT_RECORD:
throw new EOFException("END OF SEGMENT");
case TX_RECORD:
res = txRecordSerializer.readTx(in);
break;
case MASTER_KEY_CHANGE_RECORD:
case MASTER_KEY_CHANGE_RECORD_V2:
int keyNameLen = in.readInt();
byte[] keyNameBytes = new byte[keyNameLen];
in.readFully(keyNameBytes);
String masterKeyName = new String(keyNameBytes);
int keysCnt = in.readInt();
List<T2<Integer, GroupKeyEncrypted>> grpKeys = new ArrayList<>(keysCnt);
boolean readKeyId = type == MASTER_KEY_CHANGE_RECORD_V2;
for (int i = 0; i < keysCnt; i++) {
int grpId = in.readInt();
int keyId = readKeyId ? in.readByte() & 0xff : 0;
int grpKeySize = in.readInt();
byte[] grpKey = new byte[grpKeySize];
in.readFully(grpKey);
grpKeys.add(new T2<>(grpId, new GroupKeyEncrypted(keyId, grpKey)));
}
res = new MasterKeyChangeRecordV2(masterKeyName, grpKeys);
break;
case REENCRYPTION_START_RECORD:
int grpsCnt = in.readInt();
Map<Integer, Byte> map = U.newHashMap(grpsCnt);
for (int i = 0; i < grpsCnt; i++) {
int grpId = in.readInt();
byte keyId = in.readByte();
map.put(grpId, keyId);
}
res = new ReencryptionStartRecord(map);
break;
case INDEX_ROOT_PAGE_RENAME_RECORD:
res = new IndexRenameRootPageRecord(in);
break;
case PARTITION_CLEARING_START_RECORD:
int partId0 = in.readInt();
int grpId = in.readInt();
long clearVer = in.readLong();
res = new PartitionClearingStartRecord(partId0, grpId, clearVer);
break;
default:
throw new UnsupportedOperationException("Type: " + type);
}
return res;
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class RecordDataV2Serializer method writePlainRecord.
/**
* {@inheritDoc}
*/
@Override
protected void writePlainRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedException {
if (rec instanceof HeaderRecord)
throw new UnsupportedOperationException("Writing header records is forbidden since version 2 of serializer");
switch(rec.type()) {
case CHECKPOINT_RECORD:
CheckpointRecord cpRec = (CheckpointRecord) rec;
WALPointer walPtr = cpRec.checkpointMark();
UUID cpId = cpRec.checkpointId();
buf.putLong(cpId.getMostSignificantBits());
buf.putLong(cpId.getLeastSignificantBits());
buf.put(walPtr == null ? (byte) 0 : 1);
if (walPtr != null) {
buf.putLong(walPtr.index());
buf.putInt(walPtr.fileOffset());
buf.putInt(walPtr.length());
}
putCacheStates(buf, cpRec.cacheGroupStates());
buf.put(cpRec.end() ? (byte) 1 : 0);
break;
case MVCC_DATA_RECORD:
case DATA_RECORD_V2:
DataRecord dataRec = (DataRecord) rec;
int entryCnt = dataRec.entryCount();
buf.putInt(entryCnt);
buf.putLong(dataRec.timestamp());
boolean encrypted = isDataRecordEncrypted(dataRec);
for (int i = 0; i < entryCnt; i++) {
DataEntry dataEntry = dataRec.get(i);
if (encrypted)
putEncryptedDataEntry(buf, dataEntry);
else
putPlainDataEntry(buf, dataEntry);
}
break;
case SNAPSHOT:
SnapshotRecord snpRec = (SnapshotRecord) rec;
buf.putLong(snpRec.getSnapshotId());
buf.put(snpRec.isFull() ? (byte) 1 : 0);
break;
case EXCHANGE:
ExchangeRecord r = (ExchangeRecord) rec;
buf.putInt(r.getType().ordinal());
buf.putShort(r.getConstId());
buf.putLong(r.timestamp());
break;
case TX_RECORD:
txRecordSerializer.write((TxRecord) rec, buf);
break;
case MVCC_TX_RECORD:
txRecordSerializer.write((MvccTxRecord) rec, buf);
break;
case ROLLBACK_TX_RECORD:
RollbackRecord rb = (RollbackRecord) rec;
buf.putInt(rb.groupId());
buf.putInt(rb.partitionId());
buf.putLong(rb.start());
buf.putLong(rb.range());
break;
case TRACKING_PAGE_REPAIR_DELTA:
TrackingPageRepairDeltaRecord tprDelta = (TrackingPageRepairDeltaRecord) rec;
buf.putInt(tprDelta.groupId());
buf.putLong(tprDelta.pageId());
break;
default:
super.writePlainRecord(rec, buf);
}
}
Aggregations