use of org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl in project ozone by apache.
the class SCMHADBTransactionBufferImpl method flush.
@Override
public void flush() throws IOException {
// write latest trx info into trx table in the same batch
Table<String, TransactionInfo> transactionInfoTable = metadataStore.getTransactionInfoTable();
transactionInfoTable.putWithBatch(currentBatchOperation, TRANSACTION_INFO_KEY, latestTrxInfo);
metadataStore.getStore().commitBatchOperation(currentBatchOperation);
currentBatchOperation.close();
this.latestSnapshot = latestTrxInfo.toSnapshotInfo();
// reset batch operation
currentBatchOperation = metadataStore.getStore().initBatchOperation();
DeletedBlockLog deletedBlockLog = scm.getScmBlockManager().getDeletedBlockLog();
Preconditions.checkArgument(deletedBlockLog instanceof DeletedBlockLogImpl);
((DeletedBlockLogImpl) deletedBlockLog).onFlush();
}
use of org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl in project ozone by apache.
the class SCMStateMachine method notifyLeaderChanged.
@Override
public void notifyLeaderChanged(RaftGroupMemberId groupMemberId, RaftPeerId newLeaderId) {
if (!isInitialized) {
return;
}
currentLeaderTerm.set(scm.getScmHAManager().getRatisServer().getDivision().getInfo().getCurrentTerm());
if (!groupMemberId.getPeerId().equals(newLeaderId)) {
LOG.info("leader changed, yet current SCM is still follower.");
return;
}
LOG.info("current SCM becomes leader of term {}.", currentLeaderTerm);
scm.getScmContext().updateLeaderAndTerm(true, currentLeaderTerm.get());
scm.getSequenceIdGen().invalidateBatch();
DeletedBlockLog deletedBlockLog = scm.getScmBlockManager().getDeletedBlockLog();
Preconditions.checkArgument(deletedBlockLog instanceof DeletedBlockLogImpl);
((DeletedBlockLogImpl) deletedBlockLog).onBecomeLeader();
scm.getScmDecommissionManager().onBecomeLeader();
}
Aggregations