use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteWalReaderTest method iterateAndCount.
/**
* Iterates on records and closes iterator.
*
* @param walIter iterator to count, will be closed.
* @return count of records.
* @throws IgniteCheckedException if failed to iterate.
*/
private int iterateAndCount(WALIterator walIter) throws IgniteCheckedException {
int cnt = 0;
try (WALIterator it = walIter) {
while (it.hasNextX()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
WALRecord walRecord = tup.get2();
if (walRecord.type() == DATA_RECORD_V2 || walRecord.type() == MVCC_DATA_RECORD) {
DataRecord record = (DataRecord) walRecord;
for (int i = 0; i < record.entryCount(); i++) {
DataEntry entry = record.get(i);
KeyCacheObject key = entry.key();
CacheObject val = entry.value();
if (walRecord.type() == DATA_RECORD_V2) {
assertEquals(primary, (entry.flags() & DataEntry.PRIMARY_FLAG) != 0);
assertEquals(rebalance, (entry.flags() & DataEntry.PRELOAD_FLAG) != 0);
}
if (DUMP_RECORDS)
log.info("Op: " + entry.op() + ", Key: " + key + ", Value: " + val);
}
}
if (DUMP_RECORDS)
log.info("Record: " + walRecord);
cnt++;
}
}
return cnt;
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteWalReaderTest method iterateAndCountDataRecord.
/**
* Iterates over data records, checks each DataRecord and its entries, finds out all transactions in WAL.
*
* @param walIter iterator to use.
* @return count of data records observed for each global TX ID. Contains null for non tx updates.
* @throws IgniteCheckedException if failure.
*/
private Map<GridCacheVersion, Integer> iterateAndCountDataRecord(WALIterator walIter, @Nullable IgniteBiInClosure<Object, Object> cacheObjHnd, @Nullable IgniteInClosure<DataRecord> dataRecordHnd) throws IgniteCheckedException {
Map<GridCacheVersion, Integer> entriesUnderTxFound = new HashMap<>();
try (WALIterator stIt = walIter) {
while (stIt.hasNextX()) {
IgniteBiTuple<WALPointer, WALRecord> tup = stIt.nextX();
WALRecord walRecord = tup.get2();
WALRecord.RecordType type = walRecord.type();
// noinspection EnumSwitchStatementWhichMissesCases
switch(type) {
case DATA_RECORD_V2:
// Fallthrough.
case MVCC_DATA_RECORD:
{
assert walRecord instanceof DataRecord;
DataRecord dataRecord = (DataRecord) walRecord;
if (dataRecordHnd != null)
dataRecordHnd.apply(dataRecord);
for (int i = 0; i < dataRecord.entryCount(); i++) {
DataEntry entry = dataRecord.get(i);
if (walRecord.type() == DATA_RECORD_V2) {
assertEquals(primary, (entry.flags() & DataEntry.PRIMARY_FLAG) != 0);
assertEquals(rebalance, (entry.flags() & DataEntry.PRELOAD_FLAG) != 0);
}
GridCacheVersion globalTxId = entry.nearXidVersion();
Object unwrappedKeyObj;
Object unwrappedValObj;
if (entry instanceof UnwrappedDataEntry) {
UnwrappedDataEntry unwrapDataEntry = (UnwrappedDataEntry) entry;
unwrappedKeyObj = unwrapDataEntry.unwrappedKey();
unwrappedValObj = unwrapDataEntry.unwrappedValue();
} else if (entry instanceof MarshalledDataEntry) {
unwrappedKeyObj = null;
unwrappedValObj = null;
// can't check value
} else {
final CacheObject val = entry.value();
unwrappedValObj = val instanceof BinaryObject ? val : val.value(null, false);
final CacheObject key = entry.key();
unwrappedKeyObj = key instanceof BinaryObject ? key : key.value(null, false);
}
if (DUMP_RECORDS)
log.info("//Entry operation " + entry.op() + "; cache Id" + entry.cacheId() + "; " + "under transaction: " + globalTxId + // ; entry " + entry +
"; Key: " + unwrappedKeyObj + "; Value: " + unwrappedValObj);
if (cacheObjHnd != null && (unwrappedKeyObj != null || unwrappedValObj != null))
cacheObjHnd.apply(unwrappedKeyObj, unwrappedValObj);
Integer entriesUnderTx = entriesUnderTxFound.get(globalTxId);
entriesUnderTxFound.put(globalTxId, entriesUnderTx == null ? 1 : entriesUnderTx + 1);
}
}
break;
case TX_RECORD:
// Fallthrough
case MVCC_TX_RECORD:
{
assert walRecord instanceof TxRecord;
TxRecord txRecord = (TxRecord) walRecord;
GridCacheVersion globalTxId = txRecord.nearXidVersion();
if (DUMP_RECORDS)
log.info("//Tx Record, state: " + txRecord.state() + "; nearTxVersion" + globalTxId);
}
}
}
}
return entriesUnderTxFound;
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class TestStorageUtils method corruptDataEntry.
/**
* Corrupts data entry.
*
* @param ctx Context.
* @param key Key.
* @param breakCntr Break counter.
* @param breakData Break data.
*/
public static void corruptDataEntry(GridCacheContext<?, ?> ctx, Object key, boolean breakCntr, boolean breakData) throws IgniteCheckedException {
assert !ctx.isLocal();
int partId = ctx.affinity().partition(key);
GridDhtLocalPartition locPart = ctx.topology().localPartition(partId);
CacheEntry<Object, Object> e = ctx.cache().keepBinary().getEntry(key);
KeyCacheObject keyCacheObj = e.getKey() instanceof BinaryObject ? (KeyCacheObject) e.getKey() : new KeyCacheObjectImpl(e.getKey(), null, partId);
DataEntry dataEntry = new DataEntry(ctx.cacheId(), keyCacheObj, new CacheObjectImpl(breakData ? e.getValue().toString() + "brokenValPostfix" : e.getValue(), null), GridCacheOperation.UPDATE, new GridCacheVersion(), new GridCacheVersion(), 0L, partId, breakCntr ? locPart.updateCounter() + 1 : locPart.updateCounter(), DataEntry.EMPTY_FLAGS);
IgniteCacheDatabaseSharedManager db = ctx.shared().database();
db.checkpointReadLock();
try {
assert dataEntry.op() == GridCacheOperation.UPDATE;
ctx.offheap().update(ctx, dataEntry.key(), dataEntry.value(), dataEntry.writeVersion(), dataEntry.expireTime(), locPart, null);
ctx.offheap().dataStore(locPart).updateInitialCounter(dataEntry.partitionCounter() - 1, 1);
} finally {
db.checkpointReadUnlock();
}
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class IgniteWalConverter method toString.
/**
* Converting {@link WALRecord} to a string with sensitive data.
*
* @param walRecord Instance of {@link WALRecord}.
* @param sensitiveData Strategy for processing of sensitive data.
* @return String representation of {@link WALRecord}.
*/
private static String toString(WALRecord walRecord, ProcessSensitiveData sensitiveData) {
if (walRecord instanceof DataRecord) {
final DataRecord dataRecord = (DataRecord) walRecord;
int entryCnt = dataRecord.entryCount();
final List<DataEntry> entryWrappers = new ArrayList<>(entryCnt);
for (int i = 0; i < entryCnt; i++) entryWrappers.add(new DataEntryWrapper(dataRecord.get(i), sensitiveData));
dataRecord.setWriteEntries(entryWrappers);
} else if (walRecord instanceof MetastoreDataRecord)
walRecord = new MetastoreDataRecordWrapper((MetastoreDataRecord) walRecord, sensitiveData);
return walRecord.toString();
}
use of org.apache.ignite.internal.pagemem.wal.record.DataEntry in project ignite by apache.
the class RecordDataV2Serializer method writeRecord.
/**
* {@inheritDoc}
*/
@Override
public void writeRecord(WALRecord rec, ByteBuffer buf) throws IgniteCheckedException {
if (rec instanceof HeaderRecord)
throw new UnsupportedOperationException("Writing header records is forbidden since version 2 of serializer");
switch(rec.type()) {
case CHECKPOINT_RECORD:
CheckpointRecord cpRec = (CheckpointRecord) rec;
assert cpRec.checkpointMark() == null || cpRec.checkpointMark() instanceof FileWALPointer : "Invalid WAL record: " + cpRec;
FileWALPointer walPtr = (FileWALPointer) cpRec.checkpointMark();
UUID cpId = cpRec.checkpointId();
buf.putLong(cpId.getMostSignificantBits());
buf.putLong(cpId.getLeastSignificantBits());
buf.put(walPtr == null ? (byte) 0 : 1);
if (walPtr != null) {
buf.putLong(walPtr.index());
buf.putInt(walPtr.fileOffset());
buf.putInt(walPtr.length());
}
putCacheStates(buf, cpRec.cacheGroupStates());
buf.put(cpRec.end() ? (byte) 1 : 0);
break;
case DATA_RECORD:
DataRecord dataRec = (DataRecord) rec;
buf.putInt(dataRec.writeEntries().size());
buf.putLong(dataRec.timestamp());
for (DataEntry dataEntry : dataRec.writeEntries()) RecordDataV1Serializer.putDataEntry(buf, dataEntry);
break;
case SNAPSHOT:
SnapshotRecord snpRec = (SnapshotRecord) rec;
buf.putLong(snpRec.getSnapshotId());
buf.put(snpRec.isFull() ? (byte) 1 : 0);
break;
case EXCHANGE:
ExchangeRecord r = (ExchangeRecord) rec;
buf.putInt(r.getType().ordinal());
buf.putShort(r.getConstId());
buf.putLong(r.timestamp());
break;
case TX_RECORD:
txRecordSerializer.write((TxRecord) rec, buf);
break;
case BASELINE_TOP_RECORD:
bltRecSerializer.write((BaselineTopologyRecord) rec, buf);
break;
default:
delegateSerializer.writeRecord(rec, buf);
}
}
Aggregations