Search in sources :

Example 1 with FileCountBySize

use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.

the class TestFileSizeCountTask method testReprocess.

@Test
public void testReprocess() throws IOException {
    OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
    given(omKeyInfo1.getKeyName()).willReturn("key1");
    given(omKeyInfo1.getVolumeName()).willReturn("vol1");
    given(omKeyInfo1.getBucketName()).willReturn("bucket1");
    given(omKeyInfo1.getDataSize()).willReturn(1000L);
    OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class);
    given(omKeyInfo2.getKeyName()).willReturn("key2");
    given(omKeyInfo2.getVolumeName()).willReturn("vol1");
    given(omKeyInfo2.getBucketName()).willReturn("bucket1");
    given(omKeyInfo2.getDataSize()).willReturn(100000L);
    OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class);
    given(omKeyInfo3.getKeyName()).willReturn("key3");
    given(omKeyInfo3.getVolumeName()).willReturn("vol1");
    given(omKeyInfo3.getBucketName()).willReturn("bucket1");
    // 4PB
    given(omKeyInfo3.getDataSize()).willReturn(1125899906842624L * 4);
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
    TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
    when(keyTable.iterator()).thenReturn(mockKeyIter);
    when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
    when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
    when(mockKeyIter.next()).thenReturn(mockKeyValue);
    when(mockKeyValue.getValue()).thenReturn(omKeyInfo1).thenReturn(omKeyInfo2).thenReturn(omKeyInfo3);
    // Reprocess could be called from table having existing entries. Adding
    // an entry to simulate that.
    fileCountBySizeDao.insert(new FileCountBySize("vol1", "bucket1", 1024L, 10L));
    Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    assertEquals(3, fileCountBySizeDao.count());
    Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 100000L is 131072L (next highest power of 2)
    recordToFind.value3(131072L);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
    // file size upper bound for 4PB is Long.MAX_VALUE
    recordToFind.value3(Long.MAX_VALUE);
    assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) Test(org.junit.Test)

Example 2 with FileCountBySize

use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.

the class TestUtilizationSchemaDefinition method testFileCountBySizeCRUDOperations.

@Test
public void testFileCountBySizeCRUDOperations() throws SQLException {
    Connection connection = getConnection();
    DatabaseMetaData metaData = connection.getMetaData();
    ResultSet resultSet = metaData.getTables(null, null, FILE_COUNT_BY_SIZE_TABLE_NAME, null);
    while (resultSet.next()) {
        Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME, resultSet.getString("TABLE_NAME"));
    }
    FileCountBySizeDao fileCountBySizeDao = getDao(FileCountBySizeDao.class);
    UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
    FileCountBySize newRecord = new FileCountBySize();
    newRecord.setVolume("vol1");
    newRecord.setBucket("bucket1");
    newRecord.setFileSize(1024L);
    newRecord.setCount(1L);
    fileCountBySizeDao.insert(newRecord);
    Record3<String, String, Long> recordToFind = utilizationSchemaDefinition.getDSLContext().newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
    FileCountBySize dbRecord = fileCountBySizeDao.findById(recordToFind);
    assertEquals(Long.valueOf(1), dbRecord.getCount());
    dbRecord.setCount(2L);
    fileCountBySizeDao.update(dbRecord);
    dbRecord = fileCountBySizeDao.findById(recordToFind);
    assertEquals(Long.valueOf(2), dbRecord.getCount());
    Table<FileCountBySizeRecord> fileCountBySizeRecordTable = fileCountBySizeDao.getTable();
    List<UniqueKey<FileCountBySizeRecord>> tableKeys = fileCountBySizeRecordTable.getKeys();
    for (UniqueKey key : tableKeys) {
        String name = key.getName();
    }
}
Also used : UtilizationSchemaDefinition(org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition) Connection(java.sql.Connection) FileCountBySizeDao(org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao) DatabaseMetaData(java.sql.DatabaseMetaData) UniqueKey(org.jooq.UniqueKey) ResultSet(java.sql.ResultSet) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) FileCountBySizeRecord(org.hadoop.ozone.recon.schema.tables.records.FileCountBySizeRecord) Test(org.junit.Test)

Example 3 with FileCountBySize

use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.

the class UtilizationEndpoint method getFileCounts.

/**
 * Return the file counts from Recon DB.
 * @return {@link Response}
 */
@GET
@Path("/fileCount")
public Response getFileCounts(@QueryParam(RECON_QUERY_VOLUME) String volume, @QueryParam(RECON_QUERY_BUCKET) String bucket, @QueryParam(RECON_QUERY_FILE_SIZE) long fileSize) {
    DSLContext dslContext = utilizationSchemaDefinition.getDSLContext();
    List<FileCountBySize> resultSet;
    if (volume != null && bucket != null && fileSize > 0) {
        Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1(volume).value2(bucket).value3(fileSize);
        FileCountBySize record = fileCountBySizeDao.findById(recordToFind);
        resultSet = record != null ? Collections.singletonList(record) : Collections.emptyList();
    } else if (volume != null && bucket != null) {
        resultSet = dslContext.select().from(FILE_COUNT_BY_SIZE).where(FILE_COUNT_BY_SIZE.VOLUME.eq(volume)).and(FILE_COUNT_BY_SIZE.BUCKET.eq(bucket)).fetchInto(FileCountBySize.class);
    } else if (volume != null) {
        resultSet = fileCountBySizeDao.fetchByVolume(volume);
    } else {
        // fetch all records
        resultSet = fileCountBySizeDao.findAll();
    }
    return Response.ok(resultSet).build();
}
Also used : DSLContext(org.jooq.DSLContext) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) Path(javax.ws.rs.Path) GET(javax.ws.rs.GET)

Example 4 with FileCountBySize

use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.

the class TestEndpoints method testGetFileCounts.

@Test
public void testGetFileCounts() throws Exception {
    OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
    given(omKeyInfo1.getKeyName()).willReturn("key1");
    given(omKeyInfo1.getVolumeName()).willReturn("vol1");
    given(omKeyInfo1.getBucketName()).willReturn("bucket1");
    given(omKeyInfo1.getDataSize()).willReturn(1000L);
    OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class);
    given(omKeyInfo2.getKeyName()).willReturn("key2");
    given(omKeyInfo2.getVolumeName()).willReturn("vol1");
    given(omKeyInfo2.getBucketName()).willReturn("bucket1");
    given(omKeyInfo2.getDataSize()).willReturn(100000L);
    OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class);
    given(omKeyInfo3.getKeyName()).willReturn("key1");
    given(omKeyInfo3.getVolumeName()).willReturn("vol2");
    given(omKeyInfo3.getBucketName()).willReturn("bucket1");
    given(omKeyInfo3.getDataSize()).willReturn(1000L);
    OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
    TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
    TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
    TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
    when(keyTable.iterator()).thenReturn(mockKeyIter);
    when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
    when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
    when(mockKeyIter.next()).thenReturn(mockKeyValue);
    when(mockKeyValue.getValue()).thenReturn(omKeyInfo1).thenReturn(omKeyInfo2).thenReturn(omKeyInfo3);
    Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
    assertTrue(result.getRight());
    assertEquals(3, fileCountBySizeDao.count());
    Response response = utilizationEndpoint.getFileCounts(null, null, 0);
    List<FileCountBySize> resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(3, resultSet.size());
    assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 1024L && o.getCount() == 1L));
    assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 131072 && o.getCount() == 1L));
    assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol2") && o.getBucket().equals("bucket1") && o.getFileSize() == 1024L && o.getCount() == 1L));
    // Test for "volume" query param
    response = utilizationEndpoint.getFileCounts("vol1", null, 0);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(2, resultSet.size());
    assertTrue(resultSet.stream().allMatch(o -> o.getVolume().equals("vol1")));
    // Test for non-existent volume
    response = utilizationEndpoint.getFileCounts("vol", null, 0);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(0, resultSet.size());
    // Test for "volume" + "bucket" query param
    response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 0);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(2, resultSet.size());
    assertTrue(resultSet.stream().allMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1")));
    // Test for non-existent bucket
    response = utilizationEndpoint.getFileCounts("vol1", "bucket", 0);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(0, resultSet.size());
    // Test for "volume" + "bucket" + "fileSize" query params
    response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 131072);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(1, resultSet.size());
    FileCountBySize o = resultSet.get(0);
    assertTrue(o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 131072);
    // Test for non-existent fileSize
    response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 1310725);
    resultSet = (List<FileCountBySize>) response.getEntity();
    assertEquals(0, resultSet.size());
}
Also used : HttpURLConnection(java.net.HttpURLConnection) MetricsServiceProviderFactory(org.apache.hadoop.ozone.recon.MetricsServiceProviderFactory) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) ReconUtils(org.apache.hadoop.ozone.recon.ReconUtils) OMMetadataManagerTestUtils.writeDataToOm(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm) Pair(org.apache.commons.lang3.tuple.Pair) BDDMockito.given(org.mockito.BDDMockito.given) StorageContainerServiceProvider(org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider) DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata) GLOBAL_STATS(org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) GlobalStatsDao(org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao) OzoneManagerServiceProviderImpl(org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) ContainerHealthSchemaManager(org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) FileCountBySizeDao(org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao) TableCountTask(org.apache.hadoop.ozone.recon.tasks.TableCountTask) UriInfo(javax.ws.rs.core.UriInfo) PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) Mockito.mock(org.mockito.Mockito.mock) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) ClusterStateResponse(org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse) Callable(java.util.concurrent.Callable) ArgumentMatchers.anyBoolean(org.mockito.ArgumentMatchers.anyBoolean) OzoneStorageContainerManager(org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager) UtilizationSchemaDefinition(org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto) Before(org.junit.Before) OMMetadataManagerTestUtils.getRandomPipeline(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) OMMetadataManagerTestUtils.initializeNewOmMetadataManager(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) Assert.assertTrue(org.junit.Assert.assertTrue) FileUtils(org.apache.commons.io.FileUtils) Test(org.junit.Test) IOException(java.io.IOException) File(java.io.File) NodeState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState) OMMetadataManagerTestUtils.getTestReconOmMetadataManager(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager) ReconStorageContainerManagerFacade(org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade) Assert(org.junit.Assert) Assert.assertEquals(org.junit.Assert.assertEquals) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) PipelineMetadata(org.apache.hadoop.ozone.recon.api.types.PipelineMetadata) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest) FileSizeCountTask(org.apache.hadoop.ozone.recon.tasks.FileSizeCountTask) PROMETHEUS_INSTANT_QUERY_API(org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) BucketLayout(org.apache.hadoop.ozone.om.helpers.BucketLayout) DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) URLConnectionFactory(org.apache.hadoop.hdfs.web.URLConnectionFactory) DSLContext(org.jooq.DSLContext) StorageTypeProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto) StorageContainerServiceProviderImpl(org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl) URI(java.net.URI) ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) ReplicationFactor(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor) UUID(java.util.UUID) ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) List(java.util.List) Response(javax.ws.rs.core.Response) UpgradeUtils.defaultLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ContainerReplicaProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto) PipelineID(org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID) NodeOperationalState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState) ServletOutputStream(javax.servlet.ServletOutputStream) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) LinkedList(java.util.LinkedList) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) LambdaTestUtils(org.apache.ozone.test.LambdaTestUtils) HttpServletResponse(javax.servlet.http.HttpServletResponse) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Mockito.when(org.mockito.Mockito.when) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) PipelinesResponse(org.apache.hadoop.ozone.recon.api.types.PipelinesResponse) Mockito.verify(org.mockito.Mockito.verify) Configuration(org.jooq.Configuration) OmMetadataManagerImpl(org.apache.hadoop.ozone.om.OmMetadataManagerImpl) StorageContainerLocationProtocol(org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol) ReconTestInjector(org.apache.hadoop.ozone.recon.ReconTestInjector) InputStream(java.io.InputStream) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ClusterStateResponse(org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) Response(javax.ws.rs.core.Response) HttpServletResponse(javax.servlet.http.HttpServletResponse) PipelinesResponse(org.apache.hadoop.ozone.recon.api.types.PipelinesResponse) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) ReconOMMetadataManager(org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager) TypedTable(org.apache.hadoop.hdds.utils.db.TypedTable) FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize) List(java.util.List) LinkedList(java.util.LinkedList) ArgumentMatchers.anyBoolean(org.mockito.ArgumentMatchers.anyBoolean) Test(org.junit.Test) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest)

Example 5 with FileCountBySize

use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.

the class FileSizeCountTask method writeCountsToDB.

/**
 * Populate DB with the counts of file sizes calculated
 * using the dao.
 */
private void writeCountsToDB(boolean isDbTruncated, Map<FileSizeCountKey, Long> fileSizeCountMap) {
    fileSizeCountMap.keySet().forEach((FileSizeCountKey key) -> {
        FileCountBySize newRecord = new FileCountBySize();
        newRecord.setVolume(key.volume);
        newRecord.setBucket(key.bucket);
        newRecord.setFileSize(key.fileSizeUpperBound);
        newRecord.setCount(fileSizeCountMap.get(key));
        if (!isDbTruncated) {
            // Get the current count from database and update
            Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1(key.volume).value2(key.bucket).value3(key.fileSizeUpperBound);
            FileCountBySize fileCountRecord = fileCountBySizeDao.findById(recordToFind);
            if (fileCountRecord == null && newRecord.getCount() > 0L) {
                // insert new row only for non-zero counts.
                fileCountBySizeDao.insert(newRecord);
            } else if (fileCountRecord != null) {
                newRecord.setCount(fileCountRecord.getCount() + fileSizeCountMap.get(key));
                fileCountBySizeDao.update(newRecord);
            }
        } else if (newRecord.getCount() > 0) {
            // insert new row only for non-zero counts.
            fileCountBySizeDao.insert(newRecord);
        }
    });
}
Also used : FileCountBySize(org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize)

Aggregations

FileCountBySize (org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize)5 Test (org.junit.Test)2 File (java.io.File)1 IOException (java.io.IOException)1 InputStream (java.io.InputStream)1 HttpURLConnection (java.net.HttpURLConnection)1 URI (java.net.URI)1 Connection (java.sql.Connection)1 DatabaseMetaData (java.sql.DatabaseMetaData)1 ResultSet (java.sql.ResultSet)1 LinkedList (java.util.LinkedList)1 List (java.util.List)1 UUID (java.util.UUID)1 Callable (java.util.concurrent.Callable)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 ServletOutputStream (javax.servlet.ServletOutputStream)1 HttpServletResponse (javax.servlet.http.HttpServletResponse)1 GET (javax.ws.rs.GET)1 Path (javax.ws.rs.Path)1 Response (javax.ws.rs.core.Response)1