Search in sources :

Example 1 with TableIterator

use of org.apache.hadoop.hdds.utils.db.TableIterator in project ozone by apache.

the class TestReconWithOzoneManagerHA method testReconGetsSnapshotFromLeader.

@Test
public void testReconGetsSnapshotFromLeader() throws Exception {
    AtomicReference<OzoneManager> ozoneManager = new AtomicReference<>();
    // Wait for OM leader election to finish
    GenericTestUtils.waitFor(() -> {
        OzoneManager om = cluster.getOMLeader();
        ozoneManager.set(om);
        return om != null;
    }, 100, 120000);
    Assert.assertNotNull("Timed out waiting OM leader election to finish: " + "no leader or more than one leader.", ozoneManager);
    Assert.assertTrue("Should have gotten the leader!", ozoneManager.get().isLeaderReady());
    OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
    String hostname = ozoneManager.get().getHttpServer().getHttpAddress().getHostName();
    String expectedUrl = "http://" + (hostname.equals("0.0.0.0") ? "localhost" : hostname) + ":" + ozoneManager.get().getHttpServer().getHttpAddress().getPort() + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT;
    String snapshotUrl = impl.getOzoneManagerSnapshotUrl();
    Assert.assertEquals("OM Snapshot should be requested from the leader.", expectedUrl, snapshotUrl);
    // Write some data
    String keyPrefix = "ratis";
    OzoneOutputStream key = objectStore.getVolume(VOL_NAME).getBucket(VOL_NAME).createKey(keyPrefix, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    key.write(keyPrefix.getBytes(UTF_8));
    key.flush();
    key.close();
    // Sync data to Recon
    impl.syncDataFromOM();
    ReconContainerMetadataManager reconContainerMetadataManager = cluster.getReconServer().getReconContainerMetadataManager();
    TableIterator iterator = reconContainerMetadataManager.getContainerTableIterator();
    String reconKeyPrefix = null;
    while (iterator.hasNext()) {
        Table.KeyValue<ContainerKeyPrefix, Integer> keyValue = (Table.KeyValue<ContainerKeyPrefix, Integer>) iterator.next();
        reconKeyPrefix = keyValue.getKey().getKeyPrefix();
    }
    Assert.assertEquals("Container data should be synced to recon.", String.format("/%s/%s/%s", VOL_NAME, VOL_NAME, keyPrefix), reconKeyPrefix);
}
Also used : Table(org.apache.hadoop.hdds.utils.db.Table) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneManagerServiceProviderImpl(org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) ReconContainerMetadataManager(org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager) OzoneManager(org.apache.hadoop.ozone.om.OzoneManager) ContainerKeyPrefix(org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix) Test(org.junit.Test)

Example 2 with TableIterator

use of org.apache.hadoop.hdds.utils.db.TableIterator in project ozone by apache.

the class KeyManagerImpl method listStatus.

/**
 * List the status for a file or a directory and its contents.
 *
 * @param args       Key args
 * @param recursive  For a directory if true all the descendants of a
 *                   particular directory are listed
 * @param startKey   Key from which listing needs to start. If startKey exists
 *                   its status is included in the final list.
 * @param numEntries Number of entries to list from the start key
 * @param clientAddress a hint to key manager, order the datanode in returned
 *                      pipeline by distance between client and datanode.
 * @return list of file status
 */
@Override
@SuppressWarnings("methodlength")
public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive, String startKey, long numEntries, String clientAddress) throws IOException {
    Preconditions.checkNotNull(args, "Key args can not be null");
    String volName = args.getVolumeName();
    String buckName = args.getBucketName();
    List<OzoneFileStatus> fileStatusList = new ArrayList<>();
    if (numEntries <= 0) {
        return fileStatusList;
    }
    if (isBucketFSOptimized(volName, buckName)) {
        return listStatusFSO(args, recursive, startKey, numEntries, clientAddress);
    }
    String volumeName = args.getVolumeName();
    String bucketName = args.getBucketName();
    String keyName = args.getKeyName();
    // A map sorted by OmKey to combine results from TableCache and DB.
    TreeMap<String, OzoneFileStatus> cacheKeyMap = new TreeMap<>();
    if (Strings.isNullOrEmpty(startKey)) {
        OzoneFileStatus fileStatus = getFileStatus(args, clientAddress);
        if (fileStatus.isFile()) {
            return Collections.singletonList(fileStatus);
        }
        // keyName is a directory
        startKey = OzoneFSUtils.addTrailingSlashIfNeeded(keyName);
    }
    // Note: eliminating the case where startCacheKey could end with '//'
    String keyArgs = OzoneFSUtils.addTrailingSlashIfNeeded(metadataManager.getOzoneKey(volumeName, bucketName, keyName));
    metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName);
    Table keyTable = metadataManager.getKeyTable(getBucketLayout(metadataManager, volName, buckName));
    TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> iterator;
    try {
        Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> cacheIter = keyTable.cacheIterator();
        String startCacheKey = OZONE_URI_DELIMITER + volumeName + OZONE_URI_DELIMITER + bucketName + OZONE_URI_DELIMITER + ((startKey.equals(OZONE_URI_DELIMITER)) ? "" : startKey);
        // First, find key in TableCache
        listStatusFindKeyInTableCache(cacheIter, keyArgs, startCacheKey, recursive, cacheKeyMap);
        iterator = keyTable.iterator();
    } finally {
        metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName);
    }
    // Then, find key in DB
    String seekKeyInDb = metadataManager.getOzoneKey(volumeName, bucketName, startKey);
    Table.KeyValue<String, OmKeyInfo> entry = iterator.seek(seekKeyInDb);
    int countEntries = 0;
    if (iterator.hasNext()) {
        if (entry.getKey().equals(keyArgs)) {
            // Skip the key itself, since we are listing inside the directory
            iterator.next();
        }
        // Iterate through seek results
        while (iterator.hasNext() && numEntries - countEntries > 0) {
            entry = iterator.next();
            String entryInDb = entry.getKey();
            OmKeyInfo omKeyInfo = entry.getValue();
            if (entryInDb.startsWith(keyArgs)) {
                String entryKeyName = omKeyInfo.getKeyName();
                if (recursive) {
                    if (!isKeyDeleted(entryInDb, keyTable)) {
                        cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !OzoneFSUtils.isFile(entryKeyName)));
                        countEntries++;
                    }
                } else {
                    // get the child of the directory to list from the entry. For
                    // example if directory to list is /a and entry is /a/b/c where
                    // c is a file. The immediate child is b which is a directory. c
                    // should not be listed as child of a.
                    String immediateChild = OzoneFSUtils.getImmediateChild(entryKeyName, keyName);
                    boolean isFile = OzoneFSUtils.isFile(immediateChild);
                    if (isFile) {
                        if (!isKeyDeleted(entryInDb, keyTable)) {
                            cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, scmBlockSize, !isFile));
                            countEntries++;
                        }
                    } else {
                        // if entry is a directory
                        if (!isKeyDeleted(entryInDb, keyTable)) {
                            if (!entryKeyName.equals(immediateChild)) {
                                OmKeyInfo fakeDirEntry = createDirectoryKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), immediateChild, omKeyInfo.getAcls());
                                cacheKeyMap.put(entryInDb, new OzoneFileStatus(fakeDirEntry, scmBlockSize, true));
                            } else {
                                // If entryKeyName matches dir name, we have the info
                                cacheKeyMap.put(entryInDb, new OzoneFileStatus(omKeyInfo, 0, true));
                            }
                            countEntries++;
                        }
                        // skip the other descendants of this child directory.
                        iterator.seek(getNextGreaterString(volumeName, bucketName, immediateChild));
                    }
                }
            } else {
                break;
            }
        }
    }
    countEntries = 0;
    // Convert results in cacheKeyMap to List
    for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
        // No need to check if a key is deleted or not here, this is handled
        // when adding entries to cacheKeyMap from DB.
        fileStatusList.add(fileStatus);
        countEntries++;
        if (countEntries >= numEntries) {
            break;
        }
    }
    // Clean up temp map and set
    cacheKeyMap.clear();
    List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
    fileStatusList.stream().map(s -> s.getKeyInfo()).forEach(keyInfoList::add);
    if (args.getLatestVersionLocation()) {
        slimLocationVersion(keyInfoList.toArray(new OmKeyInfo[0]));
    }
    refreshPipeline(keyInfoList);
    if (args.getSortDatanodes()) {
        sortDatanodes(clientAddress, keyInfoList.toArray(new OmKeyInfo[0]));
    }
    return fileStatusList;
}
Also used : Arrays(java.util.Arrays) INTERNAL_ERROR(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR) OzoneFSUtils(org.apache.hadoop.ozone.om.helpers.OzoneFSUtils) StringUtils(org.apache.commons.lang3.StringUtils) GeneralSecurityException(java.security.GeneralSecurityException) OM_KEY_PREFIX(org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX) HADOOP_SECURITY_KEY_PROVIDER_PATH(org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH) Map(java.util.Map) Path(java.nio.file.Path) EnumSet(java.util.EnumSet) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) DFS_CONTAINER_RATIS_ENABLED_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT) Set(java.util.Set) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) SecurityUtil(org.apache.hadoop.security.SecurityUtil) OzoneBlockTokenSecretManager(org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager) VOLUME_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND) CodecRegistry(org.apache.hadoop.hdds.utils.db.CodecRegistry) HDDS_BLOCK_TOKEN_ENABLED(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OZONE_URI_DELIMITER(org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER) OmMultipartUploadListParts(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT) INVALID_KMS_PROVIDER(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KMS_PROVIDER) TreeSet(java.util.TreeSet) ArrayList(java.util.ArrayList) Time.monotonicNow(org.apache.hadoop.util.Time.monotonicNow) EncryptedKeyVersion(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion) Strings(com.google.common.base.Strings) OMFileRequest(org.apache.hadoop.ozone.om.request.file.OMFileRequest) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) KeyProviderCryptoExtension(org.apache.hadoop.crypto.key.KeyProviderCryptoExtension) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) BlockGroup(org.apache.hadoop.ozone.common.BlockGroup) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) BucketEncryptionKeyInfo(org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo) OZONE_BLOCK_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) TreeMap(java.util.TreeMap) OZONE_SCM_BLOCK_SIZE_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT) DIRECTORY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND) Paths(java.nio.file.Paths) Table(org.apache.hadoop.hdds.utils.db.Table) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey) OmPartInfo(org.apache.hadoop.ozone.om.helpers.OmPartInfo) READ(org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ) Preconditions(com.google.common.base.Preconditions) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) RequestContext(org.apache.hadoop.ozone.security.acl.RequestContext) LoggerFactory(org.slf4j.LoggerFactory) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ScmBlockLocationProtocol(org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol) OZONE_SCM_BLOCK_SIZE(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE) KEY_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND) OZONE_BLOCK_DELETING_SERVICE_TIMEOUT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) OzoneAcl(org.apache.hadoop.ozone.OzoneAcl) OmMultipartUpload(org.apache.hadoop.ozone.om.helpers.OmMultipartUpload) StorageUnit(org.apache.hadoop.conf.StorageUnit) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) Collection(java.util.Collection) ReplicationFactor(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor) FILE_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) Objects(java.util.Objects) List(java.util.List) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) BUCKET_NOT_FOUND(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND) OZONE_KEY_PREALLOCATION_BLOCKS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX) OMClientRequest(org.apache.hadoop.ozone.om.request.OMClientRequest) OzoneObj(org.apache.hadoop.ozone.security.acl.OzoneObj) RDBStore(org.apache.hadoop.hdds.utils.db.RDBStore) OZONE_DIR_DELETING_SERVICE_INTERVAL(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL) OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT(org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT) HashMap(java.util.HashMap) BackgroundService(org.apache.hadoop.hdds.utils.BackgroundService) OZONE_CLIENT_LIST_TRASH_KEYS_MAX(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX) OmUtils(org.apache.hadoop.ozone.OmUtils) Stack(java.util.Stack) ResultCodes(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes) OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT) HashSet(java.util.HashSet) PartKeyInfo(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo) OmMultipartUploadList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneAclUtil(org.apache.hadoop.ozone.om.helpers.OzoneAclUtil) Server(org.apache.hadoop.ipc.Server) HDDS_BLOCK_TOKEN_ENABLED_DEFAULT(org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT) DFS_CONTAINER_RATIS_ENABLED_KEY(org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY) BUCKET_LOCK(org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) SCM_GET_PIPELINE_EXCEPTION(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) TimeUnit(java.util.concurrent.TimeUnit) KEY(org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) IAccessAuthorizer(org.apache.hadoop.ozone.security.acl.IAccessAuthorizer) StorageContainerLocationProtocol(org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol) OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT) Time(org.apache.hadoop.util.Time) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) Table(org.apache.hadoop.hdds.utils.db.Table) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) OzoneFileStatus(org.apache.hadoop.ozone.om.helpers.OzoneFileStatus)

Example 3 with TableIterator

use of org.apache.hadoop.hdds.utils.db.TableIterator in project ozone by apache.

the class OmMetadataManagerImpl method getPendingDeletionKeys.

@Override
public List<BlockGroup> getPendingDeletionKeys(final int keyCount) throws IOException {
    List<BlockGroup> keyBlocksList = Lists.newArrayList();
    try (TableIterator<String, ? extends KeyValue<String, RepeatedOmKeyInfo>> keyIter = getDeletedTable().iterator()) {
        int currentCount = 0;
        while (keyIter.hasNext() && currentCount < keyCount) {
            KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
            if (kv != null) {
                RepeatedOmKeyInfo infoList = kv.getValue();
                // Get block keys as a list.
                for (OmKeyInfo info : infoList.getOmKeyInfoList()) {
                    OmKeyLocationInfoGroup latest = info.getLatestVersionLocations();
                    List<BlockID> item = latest.getLocationList().stream().map(b -> new BlockID(b.getContainerID(), b.getLocalID())).collect(Collectors.toList());
                    BlockGroup keyBlocks = BlockGroup.newBuilder().setKeyName(kv.getKey()).addAllBlockIDs(item).build();
                    keyBlocksList.add(keyBlocks);
                    currentCount++;
                }
            }
        }
    }
    return keyBlocksList;
}
Also used : TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) OmVolumeArgsCodec(org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec) LoggerFactory(org.slf4j.LoggerFactory) OzoneFSUtils(org.apache.hadoop.ozone.om.helpers.OzoneFSUtils) OmDirectoryInfoCodec(org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT) StringUtils(org.apache.commons.lang3.StringUtils) RepeatedOmKeyInfoCodec(org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec) OM_KEY_PREFIX(org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) Duration(java.time.Duration) Map(java.util.Map) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) OzoneManagerLock(org.apache.hadoop.ozone.om.lock.OzoneManagerLock) OzoneTokenIdentifier(org.apache.hadoop.ozone.security.OzoneTokenIdentifier) OmMultipartUpload(org.apache.hadoop.ozone.om.helpers.OmMultipartUpload) DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) TokenIdentifierCodec(org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec) Set(java.util.Set) Instant(java.time.Instant) Collectors(java.util.stream.Collectors) List(java.util.List) S3SecretValueCodec(org.apache.hadoop.ozone.om.codec.S3SecretValueCodec) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) RocksDBConfiguration(org.apache.hadoop.hdds.utils.db.RocksDBConfiguration) OM_DB_NAME(org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME) KeyValue(org.apache.hadoop.hdds.utils.db.Table.KeyValue) TransactionInfo(org.apache.hadoop.hdds.utils.TransactionInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OmPrefixInfo(org.apache.hadoop.ozone.om.helpers.OmPrefixInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) HashMap(java.util.HashMap) OmUtils(org.apache.hadoop.ozone.OmUtils) TreeSet(java.util.TreeSet) ResultCodes(org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes) StringUtil(org.eclipse.jetty.util.StringUtil) ArrayList(java.util.ArrayList) Strings(com.google.common.base.Strings) UserVolumeInfoCodec(org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec) Lists(com.google.common.collect.Lists) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) S3SecretValue(org.apache.hadoop.ozone.om.helpers.S3SecretValue) DB_TRANSIENT_MARKER(org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) CacheType(org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType) BlockGroup(org.apache.hadoop.ozone.common.BlockGroup) IOException(java.io.IOException) OzoneConsts(org.apache.hadoop.ozone.OzoneConsts) OmKeyInfoCodec(org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec) File(java.io.File) OmPrefixInfoCodec(org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) TransactionInfoCodec(org.apache.hadoop.hdds.utils.TransactionInfoCodec) ChronoUnit(java.time.temporal.ChronoUnit) TreeMap(java.util.TreeMap) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Paths(java.nio.file.Paths) OmBucketInfoCodec(org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec) Table(org.apache.hadoop.hdds.utils.db.Table) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) ExitUtils(org.apache.ratis.util.ExitUtils) OmMultipartKeyInfoCodec(org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec) OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS(org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS) PersistedUserVolumeInfo(org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo) VisibleForTesting(com.google.common.annotations.VisibleForTesting) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) OmMultipartKeyInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockGroup(org.apache.hadoop.ozone.common.BlockGroup) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)

Example 4 with TableIterator

use of org.apache.hadoop.hdds.utils.db.TableIterator in project ozone by apache.

the class TableCountTask method reprocess.

/**
 * Iterate the rows of each table in OM snapshot DB and calculate the
 * counts for each table.
 *
 * @param omMetadataManager OM Metadata instance.
 * @return Pair
 */
@Override
public Pair<String, Boolean> reprocess(OMMetadataManager omMetadataManager) {
    for (String tableName : getTaskTables()) {
        Table table = omMetadataManager.getTable(tableName);
        try (TableIterator keyIter = table.iterator()) {
            long count = getCount(keyIter);
            ReconUtils.upsertGlobalStatsTable(sqlConfiguration, globalStatsDao, getRowKeyFromTable(tableName), count);
        } catch (IOException ioEx) {
            LOG.error("Unable to populate Table Count in Recon DB.", ioEx);
            return new ImmutablePair<>(getTaskName(), false);
        }
    }
    LOG.info("Completed a 'reprocess' run of TableCountTask.");
    return new ImmutablePair<>(getTaskName(), true);
}
Also used : TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) Table(org.apache.hadoop.hdds.utils.db.Table) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) IOException(java.io.IOException)

Aggregations

Table (org.apache.hadoop.hdds.utils.db.Table)4 TableIterator (org.apache.hadoop.hdds.utils.db.TableIterator)4 IOException (java.io.IOException)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 Strings (com.google.common.base.Strings)2 Paths (java.nio.file.Paths)2 Instant (java.time.Instant)2 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Iterator (java.util.Iterator)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 TreeMap (java.util.TreeMap)2 TreeSet (java.util.TreeSet)2 Collectors (java.util.stream.Collectors)2 StringUtils (org.apache.commons.lang3.StringUtils)2 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)2 Preconditions (com.google.common.base.Preconditions)1 Lists (com.google.common.collect.Lists)1