use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.
the class TestFileSizeCountTask method testReprocess.
@Test
public void testReprocess() throws IOException {
OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
given(omKeyInfo1.getKeyName()).willReturn("key1");
given(omKeyInfo1.getVolumeName()).willReturn("vol1");
given(omKeyInfo1.getBucketName()).willReturn("bucket1");
given(omKeyInfo1.getDataSize()).willReturn(1000L);
OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class);
given(omKeyInfo2.getKeyName()).willReturn("key2");
given(omKeyInfo2.getVolumeName()).willReturn("vol1");
given(omKeyInfo2.getBucketName()).willReturn("bucket1");
given(omKeyInfo2.getDataSize()).willReturn(100000L);
OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class);
given(omKeyInfo3.getKeyName()).willReturn("key3");
given(omKeyInfo3.getVolumeName()).willReturn("vol1");
given(omKeyInfo3.getBucketName()).willReturn("bucket1");
// 4PB
given(omKeyInfo3.getDataSize()).willReturn(1125899906842624L * 4);
OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
when(keyTable.iterator()).thenReturn(mockKeyIter);
when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
when(mockKeyIter.next()).thenReturn(mockKeyValue);
when(mockKeyValue.getValue()).thenReturn(omKeyInfo1).thenReturn(omKeyInfo2).thenReturn(omKeyInfo3);
// Reprocess could be called from table having existing entries. Adding
// an entry to simulate that.
fileCountBySizeDao.insert(new FileCountBySize("vol1", "bucket1", 1024L, 10L));
Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
assertTrue(result.getRight());
assertEquals(3, fileCountBySizeDao.count());
Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
// file size upper bound for 100000L is 131072L (next highest power of 2)
recordToFind.value3(131072L);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
// file size upper bound for 4PB is Long.MAX_VALUE
recordToFind.value3(Long.MAX_VALUE);
assertEquals(1L, fileCountBySizeDao.findById(recordToFind).getCount().longValue());
}
use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.
the class TestUtilizationSchemaDefinition method testFileCountBySizeCRUDOperations.
@Test
public void testFileCountBySizeCRUDOperations() throws SQLException {
Connection connection = getConnection();
DatabaseMetaData metaData = connection.getMetaData();
ResultSet resultSet = metaData.getTables(null, null, FILE_COUNT_BY_SIZE_TABLE_NAME, null);
while (resultSet.next()) {
Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME, resultSet.getString("TABLE_NAME"));
}
FileCountBySizeDao fileCountBySizeDao = getDao(FileCountBySizeDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
FileCountBySize newRecord = new FileCountBySize();
newRecord.setVolume("vol1");
newRecord.setBucket("bucket1");
newRecord.setFileSize(1024L);
newRecord.setCount(1L);
fileCountBySizeDao.insert(newRecord);
Record3<String, String, Long> recordToFind = utilizationSchemaDefinition.getDSLContext().newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
FileCountBySize dbRecord = fileCountBySizeDao.findById(recordToFind);
assertEquals(Long.valueOf(1), dbRecord.getCount());
dbRecord.setCount(2L);
fileCountBySizeDao.update(dbRecord);
dbRecord = fileCountBySizeDao.findById(recordToFind);
assertEquals(Long.valueOf(2), dbRecord.getCount());
Table<FileCountBySizeRecord> fileCountBySizeRecordTable = fileCountBySizeDao.getTable();
List<UniqueKey<FileCountBySizeRecord>> tableKeys = fileCountBySizeRecordTable.getKeys();
for (UniqueKey key : tableKeys) {
String name = key.getName();
}
}
use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.
the class UtilizationEndpoint method getFileCounts.
/**
* Return the file counts from Recon DB.
* @return {@link Response}
*/
@GET
@Path("/fileCount")
public Response getFileCounts(@QueryParam(RECON_QUERY_VOLUME) String volume, @QueryParam(RECON_QUERY_BUCKET) String bucket, @QueryParam(RECON_QUERY_FILE_SIZE) long fileSize) {
DSLContext dslContext = utilizationSchemaDefinition.getDSLContext();
List<FileCountBySize> resultSet;
if (volume != null && bucket != null && fileSize > 0) {
Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1(volume).value2(bucket).value3(fileSize);
FileCountBySize record = fileCountBySizeDao.findById(recordToFind);
resultSet = record != null ? Collections.singletonList(record) : Collections.emptyList();
} else if (volume != null && bucket != null) {
resultSet = dslContext.select().from(FILE_COUNT_BY_SIZE).where(FILE_COUNT_BY_SIZE.VOLUME.eq(volume)).and(FILE_COUNT_BY_SIZE.BUCKET.eq(bucket)).fetchInto(FileCountBySize.class);
} else if (volume != null) {
resultSet = fileCountBySizeDao.fetchByVolume(volume);
} else {
// fetch all records
resultSet = fileCountBySizeDao.findAll();
}
return Response.ok(resultSet).build();
}
use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.
the class TestEndpoints method testGetFileCounts.
@Test
public void testGetFileCounts() throws Exception {
OmKeyInfo omKeyInfo1 = mock(OmKeyInfo.class);
given(omKeyInfo1.getKeyName()).willReturn("key1");
given(omKeyInfo1.getVolumeName()).willReturn("vol1");
given(omKeyInfo1.getBucketName()).willReturn("bucket1");
given(omKeyInfo1.getDataSize()).willReturn(1000L);
OmKeyInfo omKeyInfo2 = mock(OmKeyInfo.class);
given(omKeyInfo2.getKeyName()).willReturn("key2");
given(omKeyInfo2.getVolumeName()).willReturn("vol1");
given(omKeyInfo2.getBucketName()).willReturn("bucket1");
given(omKeyInfo2.getDataSize()).willReturn(100000L);
OmKeyInfo omKeyInfo3 = mock(OmKeyInfo.class);
given(omKeyInfo3.getKeyName()).willReturn("key1");
given(omKeyInfo3.getVolumeName()).willReturn("vol2");
given(omKeyInfo3.getBucketName()).willReturn("bucket1");
given(omKeyInfo3.getDataSize()).willReturn(1000L);
OMMetadataManager omMetadataManager = mock(OmMetadataManagerImpl.class);
TypedTable<String, OmKeyInfo> keyTable = mock(TypedTable.class);
TypedTable.TypedTableIterator mockKeyIter = mock(TypedTable.TypedTableIterator.class);
TypedTable.TypedKeyValue mockKeyValue = mock(TypedTable.TypedKeyValue.class);
when(keyTable.iterator()).thenReturn(mockKeyIter);
when(omMetadataManager.getKeyTable(getBucketLayout())).thenReturn(keyTable);
when(mockKeyIter.hasNext()).thenReturn(true).thenReturn(true).thenReturn(true).thenReturn(false);
when(mockKeyIter.next()).thenReturn(mockKeyValue);
when(mockKeyValue.getValue()).thenReturn(omKeyInfo1).thenReturn(omKeyInfo2).thenReturn(omKeyInfo3);
Pair<String, Boolean> result = fileSizeCountTask.reprocess(omMetadataManager);
assertTrue(result.getRight());
assertEquals(3, fileCountBySizeDao.count());
Response response = utilizationEndpoint.getFileCounts(null, null, 0);
List<FileCountBySize> resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(3, resultSet.size());
assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 1024L && o.getCount() == 1L));
assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 131072 && o.getCount() == 1L));
assertTrue(resultSet.stream().anyMatch(o -> o.getVolume().equals("vol2") && o.getBucket().equals("bucket1") && o.getFileSize() == 1024L && o.getCount() == 1L));
// Test for "volume" query param
response = utilizationEndpoint.getFileCounts("vol1", null, 0);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(2, resultSet.size());
assertTrue(resultSet.stream().allMatch(o -> o.getVolume().equals("vol1")));
// Test for non-existent volume
response = utilizationEndpoint.getFileCounts("vol", null, 0);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(0, resultSet.size());
// Test for "volume" + "bucket" query param
response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 0);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(2, resultSet.size());
assertTrue(resultSet.stream().allMatch(o -> o.getVolume().equals("vol1") && o.getBucket().equals("bucket1")));
// Test for non-existent bucket
response = utilizationEndpoint.getFileCounts("vol1", "bucket", 0);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(0, resultSet.size());
// Test for "volume" + "bucket" + "fileSize" query params
response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 131072);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(1, resultSet.size());
FileCountBySize o = resultSet.get(0);
assertTrue(o.getVolume().equals("vol1") && o.getBucket().equals("bucket1") && o.getFileSize() == 131072);
// Test for non-existent fileSize
response = utilizationEndpoint.getFileCounts("vol1", "bucket1", 1310725);
resultSet = (List<FileCountBySize>) response.getEntity();
assertEquals(0, resultSet.size());
}
use of org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize in project ozone by apache.
the class FileSizeCountTask method writeCountsToDB.
/**
* Populate DB with the counts of file sizes calculated
* using the dao.
*/
private void writeCountsToDB(boolean isDbTruncated, Map<FileSizeCountKey, Long> fileSizeCountMap) {
fileSizeCountMap.keySet().forEach((FileSizeCountKey key) -> {
FileCountBySize newRecord = new FileCountBySize();
newRecord.setVolume(key.volume);
newRecord.setBucket(key.bucket);
newRecord.setFileSize(key.fileSizeUpperBound);
newRecord.setCount(fileSizeCountMap.get(key));
if (!isDbTruncated) {
// Get the current count from database and update
Record3<String, String, Long> recordToFind = dslContext.newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1(key.volume).value2(key.bucket).value3(key.fileSizeUpperBound);
FileCountBySize fileCountRecord = fileCountBySizeDao.findById(recordToFind);
if (fileCountRecord == null && newRecord.getCount() > 0L) {
// insert new row only for non-zero counts.
fileCountBySizeDao.insert(newRecord);
} else if (fileCountRecord != null) {
newRecord.setCount(fileCountRecord.getCount() + fileSizeCountMap.get(key));
fileCountBySizeDao.update(newRecord);
}
} else if (newRecord.getCount() > 0) {
// insert new row only for non-zero counts.
fileCountBySizeDao.insert(newRecord);
}
});
}
Aggregations