use of org.apache.hadoop.hdds.utils.TransactionInfo in project ozone by apache.
the class SCMStateMachine method takeSnapshot.
@Override
public long takeSnapshot() throws IOException {
TermIndex lastTermIndex = getLastAppliedTermIndex();
long lastAppliedIndex = lastTermIndex.getIndex();
if (!isInitialized) {
return lastAppliedIndex;
}
long startTime = Time.monotonicNow();
TransactionInfo latestTrxInfo = transactionBuffer.getLatestTrxInfo();
TransactionInfo lastAppliedTrxInfo = TransactionInfo.fromTermIndex(lastTermIndex);
if (latestTrxInfo.compareTo(lastAppliedTrxInfo) < 0) {
transactionBuffer.updateLatestTrxInfo(lastAppliedTrxInfo);
transactionBuffer.setLatestSnapshot(lastAppliedTrxInfo.toSnapshotInfo());
} else {
lastAppliedIndex = latestTrxInfo.getTransactionIndex();
}
transactionBuffer.flush();
LOG.info("Current Snapshot Index {}, takeSnapshot took {} ms", lastAppliedIndex, Time.monotonicNow() - startTime);
return lastAppliedIndex;
}
use of org.apache.hadoop.hdds.utils.TransactionInfo in project ozone by apache.
the class TestOzoneManagerDoubleBufferWithOMResponse method testDoubleBufferWithMixOfTransactions.
/**
* This test first creates a volume, and then does a mix of transactions
* like create/delete buckets and add them to double buffer. Then it
* verifies OM DB entries are matching with actual responses added to
* double buffer or not.
*/
@Test
public void testDoubleBufferWithMixOfTransactions() throws Exception {
// This test checks count, data in table is correct or not.
Queue<OMBucketCreateResponse> bucketQueue = new ConcurrentLinkedQueue<>();
Queue<OMBucketDeleteResponse> deleteBucketQueue = new ConcurrentLinkedQueue<>();
String volumeName = UUID.randomUUID().toString();
OMVolumeCreateResponse omVolumeCreateResponse = (OMVolumeCreateResponse) createVolume(volumeName, trxId.incrementAndGet());
int bucketCount = 10;
doMixTransactions(volumeName, bucketCount, deleteBucketQueue, bucketQueue);
// As for every 2 transactions of create bucket we add deleted bucket.
final int deleteCount = 5;
// We are doing +1 for volume transaction.
GenericTestUtils.waitFor(() -> doubleBuffer.getFlushedTransactionCount() == (bucketCount + deleteCount + 1), 100, 120000);
Assert.assertEquals(1, omMetadataManager.countRowsInTable(omMetadataManager.getVolumeTable()));
Assert.assertEquals(5, omMetadataManager.countRowsInTable(omMetadataManager.getBucketTable()));
// Now after this in our DB we should have 5 buckets and one volume
checkVolume(volumeName, omVolumeCreateResponse);
checkCreateBuckets(bucketQueue);
checkDeletedBuckets(deleteBucketQueue);
// Check lastAppliedIndex is updated correctly or not.
GenericTestUtils.waitFor(() -> bucketCount + deleteCount + 1 == lastAppliedIndex, 100, 30000);
TransactionInfo transactionInfo = omMetadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
assertNotNull(transactionInfo);
Assert.assertEquals(lastAppliedIndex, transactionInfo.getTransactionIndex());
Assert.assertEquals(term, transactionInfo.getTerm());
}
use of org.apache.hadoop.hdds.utils.TransactionInfo in project ozone by apache.
the class OzoneManager method instantiatePrepareStateOnStartup.
/**
* Determines if the prepare gate should be enabled on this OM after OM
* is restarted.
* This must be done after metadataManager is instantiated
* and before the RPC server is started.
*/
private void instantiatePrepareStateOnStartup() throws IOException {
TransactionInfo txnInfo = metadataManager.getTransactionInfoTable().get(TRANSACTION_INFO_KEY);
if (txnInfo == null) {
// No prepare request could be received if there are not transactions.
prepareState = new OzoneManagerPrepareState(configuration);
} else {
prepareState = new OzoneManagerPrepareState(configuration, txnInfo.getTransactionIndex());
TransactionInfo dbPrepareValue = metadataManager.getTransactionInfoTable().get(PREPARE_MARKER_KEY);
boolean hasMarkerFile = (prepareState.getState().getStatus() == PrepareStatus.PREPARE_COMPLETED);
boolean hasDBMarker = (dbPrepareValue != null);
if (hasDBMarker) {
long dbPrepareIndex = dbPrepareValue.getTransactionIndex();
if (hasMarkerFile) {
long prepareFileIndex = prepareState.getState().getIndex();
// since this is synced through Ratis, to avoid divergence.
if (prepareFileIndex != dbPrepareIndex) {
LOG.warn("Prepare marker file index {} does not match DB prepare " + "index {}. Writing DB index to prepare file and maintaining " + "prepared state.", prepareFileIndex, dbPrepareIndex);
prepareState.finishPrepare(dbPrepareIndex);
}
// Else, marker and DB are present and match, so OM is prepared.
} else {
// Prepare cancelled with startup flag to remove marker file.
// Persist this to the DB.
// If the startup flag is used it should be used on all OMs to avoid
// divergence.
metadataManager.getTransactionInfoTable().delete(PREPARE_MARKER_KEY);
}
} else if (hasMarkerFile) {
// through, OM should replay it so both the DB and marker file exist.
throw new OMException("Prepare marker file found on startup without " + "a corresponding database entry. Corrupt prepare state.", ResultCodes.PREPARE_FAILED);
}
// Else, no DB or marker file, OM is not prepared.
}
}
use of org.apache.hadoop.hdds.utils.TransactionInfo in project ozone by apache.
the class OzoneManager method installCheckpoint.
/**
* Install checkpoint. If the checkpoints snapshot index is greater than
* OM's last applied transaction index, then re-initialize the OM
* state via this checkpoint. Before re-initializing OM state, the OM Ratis
* server should be stopped so that no new transactions can be applied.
*/
TermIndex installCheckpoint(String leaderId, DBCheckpoint omDBCheckpoint) throws Exception {
Path checkpointLocation = omDBCheckpoint.getCheckpointLocation();
TransactionInfo checkpointTrxnInfo = OzoneManagerRatisUtils.getTrxnInfoFromCheckpoint(configuration, checkpointLocation);
LOG.info("Installing checkpoint with OMTransactionInfo {}", checkpointTrxnInfo);
return installCheckpoint(leaderId, checkpointLocation, checkpointTrxnInfo);
}
use of org.apache.hadoop.hdds.utils.TransactionInfo in project ozone by apache.
the class TestTransactionInfoCodec method toAndFromPersistedFormat.
@Test
public void toAndFromPersistedFormat() throws Exception {
TransactionInfo transactionInfo = new TransactionInfo.Builder().setTransactionIndex(100).setCurrentTerm(11).build();
TransactionInfo convertedTransactionInfo = codec.fromPersistedFormat(codec.toPersistedFormat(transactionInfo));
Assert.assertEquals(transactionInfo, convertedTransactionInfo);
}
Aggregations