Search in sources :

Example 1 with TypedTable

use of org.apache.hadoop.hdds.utils.db.TypedTable in project ozone by apache.

the class OmMetadataManagerImpl method isVolumeEmpty.

/**
 * Given a volume, check if it is empty, i.e there are no buckets inside it.
 * We iterate in the bucket table and see if there is any key that starts with
 * the volume prefix. We actually look for /volume/, since if we don't have
 * the trailing slash it is possible that we might match some other volume.
 * <p>
 * For example, vol1 and vol122 might match, to avoid that we look for /vol1/
 *
 * @param volume - Volume name
 * @return true if the volume is empty
 */
@Override
public boolean isVolumeEmpty(String volume) throws IOException {
    String volumePrefix = getVolumeKey(volume + OM_KEY_PREFIX);
    // First check in bucket table cache.
    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>>> iterator = ((TypedTable<String, OmBucketInfo>) bucketTable).cacheIterator();
    while (iterator.hasNext()) {
        Map.Entry<CacheKey<String>, CacheValue<OmBucketInfo>> entry = iterator.next();
        String key = entry.getKey().getCacheKey();
        OmBucketInfo omBucketInfo = entry.getValue().getCacheValue();
        // Making sure that entry is not for delete bucket request.
        if (key.startsWith(volumePrefix) && omBucketInfo != null) {
            return false;
        }
    }
    try (TableIterator<String, ? extends KeyValue<String, OmBucketInfo>> bucketIter = bucketTable.iterator()) {
        KeyValue<String, OmBucketInfo> kv = bucketIter.seek(volumePrefix);
        if (kv != null) {
            // Check the entry in db is not marked for delete. This can happen
            // while entry is marked for delete, but it is not flushed to DB.
            CacheValue<OmBucketInfo> cacheValue = bucketTable.getCacheValue(new CacheKey(kv.getKey()));
            if (cacheValue != null) {
                if (kv.getKey().startsWith(volumePrefix) && cacheValue.getCacheValue() != null) {
                    // we found at least one bucket with this volume
                    return false;
                // prefix.
                }
            } else {
                if (kv.getKey().startsWith(volumePrefix)) {
                    // we found at least one bucket with this volume
                    return false;
                // prefix.
                }
            }
        }
    }
    return true;
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Example 2 with TypedTable

use of org.apache.hadoop.hdds.utils.db.TypedTable in project ozone by apache.

the class TestFileSizeCountTask method testReprocessAtScale.

@Test
public void testReprocessAtScale() throws IOException {
    // generate mocks for 2 volumes, 500 buckets each volume
    // and 42 keys in each bucket.
    List<OmKeyInfo> omKeyInfoList = new ArrayList<>();
    List<Boolean> hasNextAnswer = new ArrayList<>();
    for (int volIndex = 1; volIndex <= 2; volIndex++) {
        for (int bktIndex = 1; bktIndex <= 500; bktIndex++) {
            for (int keyIndex = 1; keyIndex <= 42; keyIndex++) {
                OmKeyInfo omKeyInfo = mock(OmKeyInfo.class);
                given(omKeyInfo.getKeyName()).willReturn("key" + keyIndex);
                given(omKeyInfo.getVolumeName()).willReturn("vol" + volIndex);
                given(omKeyInfo.getBucketName()).willReturn("bucket" + bktIndex);
                // Place keys in each bin
                long fileSize = (long) Math.pow(2, keyIndex + 9) - 1L;
                given(omKeyInfo.getDataSize()).willReturn(fileSize);
                omKeyInfoList.add(omKeyInfo);
                hasNextAnswer.add(true);
            }
        }
    }
    hasNextAnswer.add(false);
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
    TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
    when(keyTable.iterator()).thenReturn(mockKeyIter);
    when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
    when(mockKeyIter.hasNext()).thenAnswer(AdditionalAnswers.returnsElementsOf(hasNextAnswer));
    when(mockKeyIter.next()).thenReturn(mockKeyValue);
    when(mockKeyValue.getValue()).thenAnswer(AdditionalAnswers.returnsElementsOf(omKeyInfoList));
    Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    // 2 volumes * 500 buckets * 42 bins = 42000 rows
    assertEquals(42000, fileCountBySizeDao.count());
    Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 100000L is 131072L (next highest power of 2)
    recordToFind.value1("vol1");
    recordToFind.value3(131072L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    recordToFind.value2("bucket500");
    recordToFind.value3(Long.MAX_VALUE);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
Also used : ArrayList(java.util.ArrayList) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) Test(org.junit.Test)

Example 3 with TypedTable

use of org.apache.hadoop.hdds.utils.db.TypedTable in project ozone by apache.

the class TestFileSizeCountTask method testReprocess.

@Test
public void testReprocess() throws IOException {
    OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
    given(omKeyInfo1.getKeyName()).willReturn("key1");
    given(omKeyInfo1.getVolumeName()).willReturn("vol1");
    given(omKeyInfo1.getBucketName()).willReturn("bucket1");
    given(omKeyInfo1.getDataSize()).willReturn(1000L);
    OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class);
    given(omKeyInfo2.getKeyName()).willReturn("key2");
    given(omKeyInfo2.getVolumeName()).willReturn("vol1");
    given(omKeyInfo2.getBucketName()).willReturn("bucket1");
    given(omKeyInfo2.getDataSize()).willReturn(100000L);
    OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class);
    given(omKeyInfo3.getKeyName()).willReturn("key3");
    given(omKeyInfo3.getVolumeName()).willReturn("vol1");
    given(omKeyInfo3.getBucketName()).willReturn("bucket1");
    // 4PB
    given(omKeyInfo3.getDataSize()).willReturn(1125899906842624L * 4);
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
    TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
    when(keyTable.iterator()).thenReturn(mockKeyIter);
    when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
    when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
    when(mockKeyIter.next()).thenReturn(mockKeyValue);
    when(mockKeyValue.getValue()).thenReturn(omKeyInfo1).thenReturn(omKeyInfo2).thenReturn(omKeyInfo3);
    // Reprocess could be called from table having existing entries. Adding
    // an entry to simulate that.
    fileCountBySizeDao.insert(new FileCountBySize("vol1", "bucket1", 1024L, 10L));
    Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    assertEquals(3, fileCountBySizeDao.count());
    Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 100000L is 131072L (next highest power of 2)
    recordToFind.value3(131072L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 4PB is Long.MAX_VALUE
    recordToFind.value3(Long.MAX_VALUE);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) Test(org.junit.Test)

Example 4 with TypedTable

use of org.apache.hadoop.hdds.utils.db.TypedTable in project ozone by apache.

the class TestTableCountTask method testReprocess.

@Test
public void testReprocess() {
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    // Mock 5 rows in each table and test the count
    for (String tableName : tableCountTask.getTaskTables()) {
        TypedTable<String, Object> table = mock(TypedTable.class);
        TypedTable.TypedTableIterator mockIter = mock(TypedTable.TypedTableIterator.class);
        when(table.iterator()).thenReturn(mockIter);
        when(omMetadataManager.getTable(tableName)).thenReturn(table);
        when(mockIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
    }
    Pair<String, Boolean> result = tableCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    assertEquals(5L, getCountForTable(KEY_TABLE));
    assertEquals(5L, getCountForTable(VOLUME_TABLE));
    assertEquals(5L, getCountForTable(BUCKET_TABLE));
    assertEquals(5L, getCountForTable(OPEN_KEY_TABLE));
    assertEquals(5L, getCountForTable(DELETED_TABLE));
}
Also used : ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) Test(org.junit.Test)

Example 5 with TypedTable

use of org.apache.hadoop.hdds.utils.db.TypedTable in project ozone by apache.

the class OmMetadataManagerImpl method isBucketEmpty.

/**
 * Given a volume/bucket, check if it is empty, i.e there are no keys inside
 * it. Prefix is /volume/bucket/, and we lookup the keyTable.
 *
 * @param volume - Volume name
 * @param bucket - Bucket name
 * @return true if the bucket is empty
 */
@Override
public boolean isBucketEmpty(String volume, String bucket) throws IOException {
    String keyPrefix = getBucketKey(volume, bucket);
    // First check in key table cache.
    Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator = ((TypedTable<String, OmKeyInfo>) keyTable).cacheIterator();
    while (iterator.hasNext()) {
        Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry = iterator.next();
        String key = entry.getKey().getCacheKey();
        OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
        // Making sure that entry is not for delete key request.
        if (key.startsWith(keyPrefix) && omKeyInfo != null) {
            return false;
        }
    }
    try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter = keyTable.iterator()) {
        KeyValue<String, OmKeyInfo> kv = keyIter.seek(keyPrefix);
        if (kv != null) {
            // Check the entry in db is not marked for delete. This can happen
            // while entry is marked for delete, but it is not flushed to DB.
            CacheValue<OmKeyInfo> cacheValue = keyTable.getCacheValue(new CacheKey(kv.getKey()));
            if (cacheValue != null) {
                if (kv.getKey().startsWith(keyPrefix) && cacheValue.getCacheValue() != null) {
                    // we found at least one key with this vol/bucket
                    return false;
                // prefix.
                }
            } else {
                if (kv.getKey().startsWith(keyPrefix)) {
                    // we found at least one key with this vol/bucket
                    return false;
                // prefix.
                }
            }
        }
    }
    return true;
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) CacheValue(org.apache.hadoop.hdds.utils.db.cache.CacheValue) Map(java.util.Map) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap) CacheKey(org.apache.hadoop.hdds.utils.db.cache.CacheKey)

Aggregations

TypedTable (org.apache.hadoop.hdds.utils.db.TypedTable)6 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)4 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)3 AbstractReconSqlDBTest (org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest)3 Test (org.junit.Test)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 TreeMap (java.util.TreeMap)2 File (java.io.File)1 IOException (java.io.IOException)1 InputStream (java.io.InputStream)1 HttpURLConnection (java.net.HttpURLConnection)1 URI (java.net.URI)1 ArrayList (java.util.ArrayList)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 UUID (java.util.UUID)1 Callable (java.util.concurrent.Callable)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 ServletOutputStream (javax.servlet.ServletOutputStream)1