Search in sources :

Example 1 with BatchOperation

use of org.apache.hadoop.hdds.utils.db.BatchOperation in project ozone by apache.

the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV1.

private void markBlocksForDeletionSchemaV1(KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException {
    long containerId = delTX.getContainerID();
    if (!isTxnIdValid(containerId, containerData, delTX)) {
        return;
    }
    int newDeletionBlocks = 0;
    try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
        Table<String, BlockData> blockDataTable = containerDB.getStore().getBlockDataTable();
        Table<String, ChunkInfoList> deletedBlocksTable = containerDB.getStore().getDeletedBlocksTable();
        try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
            for (Long blkLong : delTX.getLocalIDList()) {
                String blk = blkLong.toString();
                BlockData blkInfo = blockDataTable.get(blk);
                if (blkInfo != null) {
                    String deletingKey = OzoneConsts.DELETING_KEY_PREFIX + blk;
                    if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(String.format("Ignoring delete for block %s in container %d." + " Entry already added.", blk, containerId));
                        }
                        continue;
                    }
                    // Found the block in container db,
                    // use an atomic update to change its state to deleting.
                    blockDataTable.putWithBatch(batch, deletingKey, blkInfo);
                    blockDataTable.deleteWithBatch(batch, blk);
                    newDeletionBlocks++;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Transited Block {} to DELETING state in container {}", blk, containerId);
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId);
                    }
                }
            }
            updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
            containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
        } catch (IOException e) {
            // with a certain number of retries.
            throw new IOException("Failed to delete blocks for TXID = " + delTX.getTxID(), e);
        }
    }
}
Also used : BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) IOException(java.io.IOException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList)

Example 2 with BatchOperation

use of org.apache.hadoop.hdds.utils.db.BatchOperation in project ozone by apache.

the class BlockManagerImpl method persistPutBlock.

public static long persistPutBlock(KeyValueContainer container, BlockData data, ConfigurationSource config, boolean endOfBlock) throws IOException {
    Preconditions.checkNotNull(data, "BlockData cannot be null for put " + "operation.");
    Preconditions.checkState(data.getContainerID() >= 0, "Container Id " + "cannot be negative");
    // against a single DB. We rely on DB level locking to avoid conflicts.
    try (ReferenceCountedDB db = BlockUtils.getDB(container.getContainerData(), config)) {
        // This is a post condition that acts as a hint to the user.
        // Should never fail.
        Preconditions.checkNotNull(db, DB_NULL_ERR_MSG);
        long bcsId = data.getBlockCommitSequenceId();
        long containerBCSId = container.getContainerData().getBlockCommitSequenceId();
        // In such cases, we should overwrite the block as well
        if ((bcsId != 0) && (bcsId <= containerBCSId)) {
            // Since the blockCommitSequenceId stored in the db is greater than
            // equal to blockCommitSequenceId to be updated, it means the putBlock
            // transaction is reapplied in the ContainerStateMachine on restart.
            // It also implies that the given block must already exist in the db.
            // just log and return
            LOG.debug("blockCommitSequenceId {} in the Container Db is greater" + " than the supplied value {}. Ignoring it", containerBCSId, bcsId);
            return data.getSize();
        }
        // Check if the block is present in the pendingPutBlockCache for the
        // container to determine whether the blockCount is already incremented
        // for this block in the DB or not.
        long localID = data.getLocalID();
        boolean isBlockInCache = container.isBlockInPendingPutBlockCache(localID);
        boolean incrBlockCount = false;
        // update the blockData as well as BlockCommitSequenceId here
        try (BatchOperation batch = db.getStore().getBatchHandler().initBatchOperation()) {
            // If block exists in cache, blockCount should not be incremented.
            if (!isBlockInCache) {
                if (db.getStore().getBlockDataTable().get(Long.toString(localID)) == null) {
                    // Block does not exist in DB => blockCount needs to be
                    // incremented when the block is added into DB.
                    incrBlockCount = true;
                }
            }
            db.getStore().getBlockDataTable().putWithBatch(batch, Long.toString(localID), data);
            if (bcsId != 0) {
                db.getStore().getMetadataTable().putWithBatch(batch, OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID, bcsId);
            }
            // Set Bytes used, this bytes used will be updated for every write and
            // only get committed for every put block. In this way, when datanode
            // is up, for computation of disk space by container only committed
            // block length is used, And also on restart the blocks committed to DB
            // is only used to compute the bytes used. This is done to keep the
            // current behavior and avoid DB write during write chunk operation.
            db.getStore().getMetadataTable().putWithBatch(batch, OzoneConsts.CONTAINER_BYTES_USED, container.getContainerData().getBytesUsed());
            // Set Block Count for a container.
            if (incrBlockCount) {
                db.getStore().getMetadataTable().putWithBatch(batch, OzoneConsts.BLOCK_COUNT, container.getContainerData().getBlockCount() + 1);
            }
            db.getStore().getBatchHandler().commitBatchOperation(batch);
        }
        if (bcsId != 0) {
            container.updateBlockCommitSequenceId(bcsId);
        }
        // in-memory after the DB update.
        if (incrBlockCount) {
            container.getContainerData().incrBlockCount();
        }
        // have to read the DB to check for block existence
        if (!isBlockInCache && !endOfBlock) {
            container.addToPendingPutBlockCache(localID);
        } else if (isBlockInCache && endOfBlock) {
            // Remove the block from the PendingPutBlockCache as there would not
            // be any more writes to this block
            container.removeFromPendingPutBlockCache(localID);
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Block " + data.getBlockID() + " successfully committed with bcsId " + bcsId + " chunk size " + data.getChunks().size());
        }
        return data.getSize();
    }
}
Also used : BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 3 with BatchOperation

use of org.apache.hadoop.hdds.utils.db.BatchOperation in project ozone by apache.

the class ReconContainerMetadataManagerImpl method batchStoreContainerReplicaHistory.

/**
 * Batch version of storeContainerReplicaHistory.
 *
 * @param replicaHistoryMap Replica history map
 * @throws IOException
 */
@Override
public void batchStoreContainerReplicaHistory(Map<Long, Map<UUID, ContainerReplicaHistory>> replicaHistoryMap) throws IOException {
    BatchOperation batchOperation = containerDbStore.initBatchOperation();
    for (Map.Entry<Long, Map<UUID, ContainerReplicaHistory>> entry : replicaHistoryMap.entrySet()) {
        final long containerId = entry.getKey();
        final Map<UUID, ContainerReplicaHistory> tsMap = entry.getValue();
        List<ContainerReplicaHistory> tsList = new ArrayList<>();
        for (Map.Entry<UUID, ContainerReplicaHistory> e : tsMap.entrySet()) {
            tsList.add(e.getValue());
        }
        containerReplicaHistoryTable.putWithBatch(batchOperation, containerId, new ContainerReplicaHistoryList(tsList));
    }
    containerDbStore.commitBatchOperation(batchOperation);
}
Also used : ContainerReplicaHistory(org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistory) ArrayList(java.util.ArrayList) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) UUID(java.util.UUID) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) ContainerReplicaHistoryList(org.apache.hadoop.ozone.recon.scm.ContainerReplicaHistoryList)

Example 4 with BatchOperation

use of org.apache.hadoop.hdds.utils.db.BatchOperation in project ozone by apache.

the class SCMCertStore method storeValidScmCertificate.

/**
 * Writes a new SCM certificate that was issued to the persistent store.
 * @param serialID - Certificate Serial Number.
 * @param certificate - Certificate to persist.
 * @throws IOException - on Failure.
 */
public void storeValidScmCertificate(BigInteger serialID, X509Certificate certificate) throws IOException {
    lock.lock();
    try {
        BatchOperation batchOperation = scmMetadataStore.getBatchHandler().initBatchOperation();
        scmMetadataStore.getValidSCMCertsTable().putWithBatch(batchOperation, serialID, certificate);
        scmMetadataStore.getValidCertsTable().putWithBatch(batchOperation, serialID, certificate);
        scmMetadataStore.getStore().commitBatchOperation(batchOperation);
    } finally {
        lock.unlock();
    }
}
Also used : BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation)

Example 5 with BatchOperation

use of org.apache.hadoop.hdds.utils.db.BatchOperation in project ozone by apache.

the class OzoneManager method addS3GVolumeToDB.

/**
 * Create volume which is required for S3Gateway operations.
 * @throws IOException
 */
private void addS3GVolumeToDB() throws IOException {
    String s3VolumeName = HddsClientUtils.getS3VolumeName(configuration);
    String dbVolumeKey = metadataManager.getVolumeKey(s3VolumeName);
    if (!s3VolumeName.equals(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT)) {
        LOG.warn("Make sure that all S3Gateway use same volume name." + " Otherwise user need to manually create/configure Volume " + "configured by S3Gateway");
    }
    if (!metadataManager.getVolumeTable().isExist(dbVolumeKey)) {
        // the highest transaction ID is reserved for this operation.
        long transactionID = MAX_TRXN_ID + 1;
        long objectID = OmUtils.addEpochToTxId(metadataManager.getOmEpoch(), transactionID);
        String userName = UserGroupInformation.getCurrentUser().getShortUserName();
        // Add volume and user info to DB and cache.
        OmVolumeArgs omVolumeArgs = createS3VolumeInfo(s3VolumeName, objectID);
        String dbUserKey = metadataManager.getUserKey(userName);
        PersistedUserVolumeInfo userVolumeInfo = PersistedUserVolumeInfo.newBuilder().setObjectID(objectID).setUpdateID(transactionID).addVolumeNames(s3VolumeName).build();
        // Commit to DB.
        try (BatchOperation batchOperation = metadataManager.getStore().initBatchOperation()) {
            metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, omVolumeArgs);
            metadataManager.getUserTable().putWithBatch(batchOperation, dbUserKey, userVolumeInfo);
            metadataManager.getStore().commitBatchOperation(batchOperation);
        }
        // Add to cache.
        metadataManager.getVolumeTable().addCacheEntry(new CacheKey<>(dbVolumeKey), new CacheValue<>(Optional.of(omVolumeArgs), transactionID));
        metadataManager.getUserTable().addCacheEntry(new CacheKey<>(dbUserKey), new CacheValue<>(Optional.of(userVolumeInfo), transactionID));
        LOG.info("Created Volume {} With Owner {} required for S3Gateway " + "operations.", s3VolumeName, userName);
    }
}
Also used : OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) CertificateSignRequest.getEncodedString(org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString) BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) PersistedUserVolumeInfo(org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo)

Aggregations

BatchOperation (org.apache.hadoop.hdds.utils.db.BatchOperation)10 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 HashMap (java.util.HashMap)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 DatanodeStore (org.apache.hadoop.ozone.container.metadata.DatanodeStore)2 DatanodeStoreSchemaTwoImpl (org.apache.hadoop.ozone.container.metadata.DatanodeStoreSchemaTwoImpl)2 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)2 BigInteger (java.math.BigInteger)1 CRLException (java.security.cert.CRLException)1 X509CRL (java.security.cert.X509CRL)1 X509Certificate (java.security.cert.X509Certificate)1 Date (java.util.Date)1 LinkedHashMap (java.util.LinkedHashMap)1 List (java.util.List)1 Map (java.util.Map)1 UUID (java.util.UUID)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 SupplierWithIOException (org.apache.hadoop.hdds.function.SupplierWithIOException)1