use of org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao in project ozone by apache.
the class TestTableCountTask method initializeInjector.
private void initializeInjector() throws IOException {
ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager(initializeNewOmMetadataManager(temporaryFolder.newFolder()), temporaryFolder.newFolder());
globalStatsDao = getDao(GlobalStatsDao.class);
tableCountTask = new TableCountTask(globalStatsDao, getConfiguration(), omMetadataManager);
dslContext = getDslContext();
}
use of org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao in project ozone by apache.
the class TestEndpoints method initializeInjector.
private void initializeInjector() throws Exception {
reconOMMetadataManager = getTestReconOmMetadataManager(initializeNewOmMetadataManager(temporaryFolder.newFolder()), temporaryFolder.newFolder());
datanodeDetails = randomDatanodeDetails();
datanodeDetails2 = randomDatanodeDetails();
datanodeDetails.setHostName(HOST1);
datanodeDetails.setIpAddress(IP1);
datanodeDetails2.setHostName(HOST2);
datanodeDetails2.setIpAddress(IP2);
pipeline = getRandomPipeline(datanodeDetails);
pipelineId = pipeline.getId().getId().toString();
ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(containerId).setReplicationConfig(RatisReplicationConfig.getInstance(ReplicationFactor.ONE)).setState(LifeCycleState.OPEN).setOwner("test").setPipelineID(pipeline.getId()).build();
ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(containerInfo, pipeline);
StorageContainerLocationProtocol mockScmClient = mock(StorageContainerLocationProtocol.class);
StorageContainerServiceProvider mockScmServiceProvider = mock(StorageContainerServiceProviderImpl.class);
when(mockScmServiceProvider.getPipeline(pipeline.getId().getProtobuf())).thenReturn(pipeline);
when(mockScmServiceProvider.getContainerWithPipeline(containerId)).thenReturn(containerWithPipeline);
List<Long> containerIDs = new LinkedList<>();
containerIDs.add(containerId);
List<ContainerWithPipeline> cpw = new LinkedList<>();
cpw.add(containerWithPipeline);
when(mockScmServiceProvider.getExistContainerWithPipelinesInBatch(containerIDs)).thenReturn(cpw);
InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream(PROMETHEUS_TEST_RESPONSE_FILE);
reconUtilsMock = mock(ReconUtils.class);
HttpURLConnection urlConnectionMock = mock(HttpURLConnection.class);
when(urlConnectionMock.getResponseCode()).thenReturn(HttpServletResponse.SC_OK);
when(urlConnectionMock.getInputStream()).thenReturn(inputStream);
when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class), anyString(), anyBoolean())).thenReturn(urlConnectionMock);
when(reconUtilsMock.getReconDbDir(any(OzoneConfiguration.class), anyString())).thenReturn(GenericTestUtils.getRandomizedTestDir());
ReconTestInjector reconTestInjector = new ReconTestInjector.Builder(temporaryFolder).withReconSqlDb().withReconOm(reconOMMetadataManager).withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)).addBinding(StorageContainerServiceProvider.class, mockScmServiceProvider).addBinding(OzoneStorageContainerManager.class, ReconStorageContainerManagerFacade.class).withContainerDB().addBinding(ClusterStateEndpoint.class).addBinding(NodeEndpoint.class).addBinding(MetricsServiceProviderFactory.class).addBinding(ContainerHealthSchemaManager.class).addBinding(UtilizationEndpoint.class).addBinding(ReconUtils.class, reconUtilsMock).addBinding(StorageContainerLocationProtocol.class, mockScmClient).build();
nodeEndpoint = reconTestInjector.getInstance(NodeEndpoint.class);
pipelineEndpoint = reconTestInjector.getInstance(PipelineEndpoint.class);
fileCountBySizeDao = getDao(FileCountBySizeDao.class);
GlobalStatsDao globalStatsDao = getDao(GlobalStatsDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition = getSchemaDefinition(UtilizationSchemaDefinition.class);
Configuration sqlConfiguration = reconTestInjector.getInstance(Configuration.class);
utilizationEndpoint = new UtilizationEndpoint(fileCountBySizeDao, utilizationSchemaDefinition);
fileSizeCountTask = new FileSizeCountTask(fileCountBySizeDao, utilizationSchemaDefinition);
tableCountTask = new TableCountTask(globalStatsDao, sqlConfiguration, reconOMMetadataManager);
reconScm = (ReconStorageContainerManagerFacade) reconTestInjector.getInstance(OzoneStorageContainerManager.class);
clusterStateEndpoint = new ClusterStateEndpoint(reconScm, globalStatsDao);
MetricsServiceProviderFactory metricsServiceProviderFactory = reconTestInjector.getInstance(MetricsServiceProviderFactory.class);
metricsProxyEndpoint = new MetricsProxyEndpoint(metricsServiceProviderFactory);
dslContext = getDslContext();
}
use of org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao in project ozone by apache.
the class TestStatsSchemaDefinition method testGlobalStatsCRUDOperations.
@Test
public void testGlobalStatsCRUDOperations() throws Exception {
Connection connection = getConnection();
DatabaseMetaData metaData = connection.getMetaData();
ResultSet resultSet = metaData.getTables(null, null, GLOBAL_STATS_TABLE_NAME, null);
while (resultSet.next()) {
Assert.assertEquals(GLOBAL_STATS_TABLE_NAME, resultSet.getString("TABLE_NAME"));
}
GlobalStatsDao dao = getDao(GlobalStatsDao.class);
long now = System.currentTimeMillis();
GlobalStats newRecord = new GlobalStats();
newRecord.setLastUpdatedTimestamp(new Timestamp(now));
newRecord.setKey("key1");
newRecord.setValue(500L);
// Create
dao.insert(newRecord);
GlobalStats newRecord2 = new GlobalStats();
newRecord2.setLastUpdatedTimestamp(new Timestamp(now + 1000L));
newRecord2.setKey("key2");
newRecord2.setValue(10L);
dao.insert(newRecord2);
// Read
GlobalStats dbRecord = dao.findById("key1");
Assert.assertEquals("key1", dbRecord.getKey());
Assert.assertEquals(Long.valueOf(500), dbRecord.getValue());
Assert.assertEquals(new Timestamp(now), dbRecord.getLastUpdatedTimestamp());
dbRecord = dao.findById("key2");
Assert.assertEquals("key2", dbRecord.getKey());
Assert.assertEquals(Long.valueOf(10), dbRecord.getValue());
Assert.assertEquals(new Timestamp(now + 1000L), dbRecord.getLastUpdatedTimestamp());
// Update
dbRecord.setValue(100L);
dbRecord.setLastUpdatedTimestamp(new Timestamp(now + 2000L));
dao.update(dbRecord);
// Read updated
dbRecord = dao.findById("key2");
Assert.assertEquals(new Timestamp(now + 2000L), dbRecord.getLastUpdatedTimestamp());
Assert.assertEquals(Long.valueOf(100L), dbRecord.getValue());
// Delete
dao.deleteById("key1");
// Verify
dbRecord = dao.findById("key1");
Assert.assertNull(dbRecord);
}
Aggregations