use of org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao in project ozone by apache.
the class TestFileSizeCountTask method setUp.
@Before
public void setUp() {
fileCountBySizeDao = getDao(FileCountBySizeDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition);
dslContext = utilizationSchemaDefinition.getDSLContext();
// Truncate table before running each test
dslContext.truncate(FILE_COUNT_BY_SIZE);
}
use of org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao in project ozone by apache.
the class TestEndpoints method initializeInjector.
private void initializeInjector() throws Exception {
reconOMMetadataManager = getTestReconOmMetadataManager(initializeNewOmMetadataManager(temporaryFolder.newFolder()), temporaryFolder.newFolder());
datanodeDetails = randomDatanodeDetails();
datanodeDetails2 = randomDatanodeDetails();
datanodeDetails.setHostName(HOST1);
datanodeDetails.setIpAddress(IP1);
datanodeDetails2.setHostName(HOST2);
datanodeDetails2.setIpAddress(IP2);
pipeline = getRandomPipeline(datanodeDetails);
pipelineId = pipeline.getId().getId().toString();
ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(containerId).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.ONE)).setState(LifeCycleState.OPEN).setOwner("test").setPipelineID(pipeline.getId()).build();
ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(containerInfo, pipeline);
StorageContainerLocationProtocol mockScmClient = mock(StorageContainerLocationProtocol.class);
StorageContainerServiceProvider mockScmServiceProvider = mock(StorageContainerServiceProviderImpl.class);
when(mockScmServiceProvider.getPipeline(pipeline.getId().getProtobuf())).thenReturn(pipeline);
when(mockScmServiceProvider.getContainerWithPipeline(containerId)).thenReturn(containerWithPipeline);
List<Long> containerIDs = new LinkedList<>();
containerIDs.add(containerId);
List<ContainerWithPipeline> cpw = new LinkedList<>();
cpw.add(containerWithPipeline);
when(mockScmServiceProvider.getExistContainerWithPipelinesInBatch(containerIDs)).thenReturn(cpw);
InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(PROMETHEUS_TEST_RESPONSE_FILE);
reconUtilsMock = mock(ReconUtils.class);
HttpURLConnection urlConnectionMock = mock(HttpURLConnection.class);
when(urlConnectionMock.getResponseCode()).thenReturn(HttpServletResponse.SC_OK);
when(urlConnectionMock.getInputStream()).thenReturn(inputStream);
when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class), anyString(), anyBoolean())).thenReturn(urlConnectionMock);
when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class), anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir());
ReconTestInjector reconTestInjector = new ReconTestInjector.Builder(temporaryFolder).withReconSqlDb().withReconOm(reconOMMetadataManager).withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)).addBinding(StorageContainerServiceProvider.class, mockScmServiceProvider).addBinding(OzoneStorageContainerManager.class, ReconStorageContainerManagerFacade.class).withContainerDB().addBinding(ClusterStateEndpoint.class).addBinding(NodeEndpoint.class).addBinding(MetricsServiceProviderFactory.class).addBinding(ContainerHealthSchemaManager.class).addBinding(UtilizationEndpoint.class).addBinding(ReconUtils.class, reconUtilsMock).addBinding(StorageContainerLocationProtocol.class, mockScmClient).build();
nodeEndpoint = reconTestInjector.getInstance(NodeEndpoint.class);
pipelineEndpoint = reconTestInjector.getInstance(PipelineEndpoint.class);
fileCountBySizeDao = getDao(FileCountBySizeDao.class);
GlobalStatsDao globalStatsDao = getDao(GlobalStatsDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
Configuration sqlConfiguration = reconTestInjector.getInstance(Configuration.class);
utilizationEndpoint = new UtilizationEndpoint(fileCountBySizeDao, utilizationSchemaDefinition);
fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition);
tableCountTask = new TableCountTask(globalStatsDao, sqlConfiguration, reconOMMetadataManager);
reconScm = (ReconStorageContainerManagerFacade) reconTestInjector.getInstance(OzoneStorageContainerManager.class);
clusterStateEndpoint = new ClusterStateEndpoint(reconScm, globalStatsDao);
MetricsServiceProviderFactory metricsServiceProviderFactory = reconTestInjector.getInstance(MetricsServiceProviderFactory.class);
metricsProxyEndpoint = new MetricsProxyEndpoint(metricsServiceProviderFactory);
dslContext = getDslContext();
}
use of org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao in project ozone by apache.
the class TestUtilizationSchemaDefinition method testFileCountBySizeCRUDOperations.
@Test
public void testFileCountBySizeCRUDOperations() throws SQLException {
Connection connection = getConnection();
DatabaseMetaData metaData = connection.getMetaData();
ResultSet resultSet = metaData.getTables(null, null, FILE_COUNT_BY_SIZE_TABLE_NAME, null);
while (resultSet.next()) {
Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME, resultSet.getString("TABLE_NAME"));
}
FileCountBySizeDao fileCountBySizeDao = getDao(FileCountBySizeDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
FileCountBySize newRecord = new FileCountBySize();
newRecord.setVolume("vol1");
newRecord.setBucket("bucket1");
newRecord.setFileSize(1024L);
newRecord.setCount(1L);
fileCountBySizeDao.insert(newRecord);
Record3<String, String, Long> recordToFind = utilizationSchemaDefinition.getDSLContext().newRecord(FILE_COUNT_BY_SIZE.VOLUME, FILE_COUNT_BY_SIZE.BUCKET, FILE_COUNT_BY_SIZE.FILE_SIZE).value1("vol1").value2("bucket1").value3(1024L);
FileCountBySize dbRecord = fileCountBySizeDao.findById(recordToFind);
assertEquals(Long.valueOf(1), dbRecord.getCount());
dbRecord.setCount(2L);
fileCountBySizeDao.update(dbRecord);
dbRecord = fileCountBySizeDao.findById(recordToFind);
assertEquals(Long.valueOf(2), dbRecord.getCount());
Table<FileCountBySizeRecord> fileCountBySizeRecordTable = fileCountBySizeDao.getTable();
List<UniqueKey<FileCountBySizeRecord>> tableKeys = fileCountBySizeRecordTable.getKeys();
for (UniqueKey key : tableKeys) {
String name = key.getName();
}
}
Aggregations