use of java.sql.ResultSet in project head by mifos.
the class ApplicationDatabaseOperation method doesEntityExist.
private boolean doesEntityExist(String entityCountQuery) throws SQLException {
ResultSet resultSet = null;
try {
int noOfOffices = 0;
resultSet = getStatement().executeQuery(entityCountQuery);
if (resultSet.next()) {
noOfOffices = resultSet.getInt(1);
}
resultSet.close();
closeConnection();
return (noOfOffices > 0);
} finally {
resultSet.close();
closeConnection();
}
}
use of java.sql.ResultSet in project hibernate-orm by hibernate.
the class JdbcTimeCustomTimeZoneTest method testTimeZone.
@Test
public void testTimeZone() {
connectionProvider.clear();
doInHibernate(this::sessionFactory, s -> {
Person person = new Person();
person.id = 1L;
s.persist(person);
});
assertEquals(1, connectionProvider.getPreparedStatements().size());
PreparedStatement ps = connectionProvider.getPreparedStatements().get(0);
try {
ArgumentCaptor<Calendar> calendarArgumentCaptor = ArgumentCaptor.forClass(Calendar.class);
verify(ps, times(1)).setTime(anyInt(), any(Time.class), calendarArgumentCaptor.capture());
assertEquals(TIME_ZONE, calendarArgumentCaptor.getValue().getTimeZone());
} catch (SQLException e) {
fail(e.getMessage());
}
connectionProvider.clear();
doInHibernate(this::sessionFactory, s -> {
s.doWork(connection -> {
try (Statement st = connection.createStatement()) {
try (ResultSet rs = st.executeQuery("select createdOn from Person")) {
while (rs.next()) {
Time time = rs.getTime(1);
Time offsetTime = Time.valueOf(OffsetTime.ofInstant(Instant.ofEpochMilli(0), TIME_ZONE.toZoneId()).toLocalTime());
assertEquals(offsetTime, time);
}
}
}
});
Person person = s.find(Person.class, 1L);
assertEquals(0, person.createdOn.getTime() % TimeUnit.DAYS.toSeconds(1));
});
}
use of java.sql.ResultSet in project pinot by linkedin.
the class HybridClusterIntegrationTest method setUp.
@BeforeClass
public void setUp() throws Exception {
//Clean up
ensureDirectoryExistsAndIsEmpty(_tmpDir);
ensureDirectoryExistsAndIsEmpty(_segmentDir);
ensureDirectoryExistsAndIsEmpty(_tarDir);
tableName = TABLE_NAME;
// Start Zk, Kafka and Pinot
startHybridCluster(10);
// Unpack the Avro files
TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class.getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir);
_tmpDir.mkdirs();
final List<File> avroFiles = getAllAvroFiles();
File schemaFile = getSchemaFile();
schema = Schema.fromFile(schemaFile);
addSchema(schemaFile, schema.getSchemaName());
final List<String> invertedIndexColumns = makeInvertedIndexColumns();
final String sortedColumn = makeSortedColumn();
// Create Pinot table
addHybridTable(tableName, "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null, false);
LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns);
// Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);
// Load data into H2
ExecutorService executor = Executors.newCachedThreadPool();
setupH2AndInsertAvro(avroFiles, executor);
// Create segments from Avro data
LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, tableName, false, null);
// Initialize query generator
setupQueryGenerator(avroFiles, executor);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.MINUTES);
// Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
final CountDownLatch latch = new CountDownLatch(1);
HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
manager.connect();
manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
@Override
public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) {
for (ExternalView externalView : externalViewList) {
if (externalView.getId().contains(tableName)) {
Set<String> partitionSet = externalView.getPartitionSet();
if (partitionSet.size() == offlineSegmentCount) {
int onlinePartitionCount = 0;
for (String partitionId : partitionSet) {
Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
if (partitionStateMap.containsValue("ONLINE")) {
onlinePartitionCount++;
}
}
if (onlinePartitionCount == offlineSegmentCount) {
// System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread");
latch.countDown();
}
}
}
}
}
});
// Upload the segments
int i = 0;
for (String segmentName : _tarDir.list()) {
// System.out.println("Uploading segment " + (i++) + " : " + segmentName);
File file = new File(_tarDir, segmentName);
FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
}
// Wait for all offline segments to be online
latch.await();
// Load realtime data into Kafka
LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);
// Wait until the Pinot event count matches with the number of events in the Avro files
int pinotRecordCount, h2RecordCount;
long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;
Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.execute("select count(*) from " + tableName);
ResultSet rs = statement.getResultSet();
rs.first();
h2RecordCount = rs.getInt(1);
rs.close();
waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}
use of java.sql.ResultSet in project head by mifos.
the class DefaultDatabaseLoader method loadDbFromSqlFiles.
private void loadDbFromSqlFiles(String dbToLoadName, String[] fileNames) throws SQLException, IOException {
if (!"".equals(dbToLoadName)) {
connection.setCatalog(dbToLoadName);
ResultSet res = connection.getMetaData().getTables(null, null, null, new String[] { "TABLE" });
if (res.next()) {
logger.info("Database " + dbToLoadName + " already populated");
} else {
InputStream[] inputStreams = MifosResourceUtil.getSQLFilesAsStreams(fileNames);
SqlExecutor.executeMultipleFiles(inputStreams, connection);
}
}
}
use of java.sql.ResultSet in project head by mifos.
the class Upgrade method countRows.
@SuppressWarnings("PMD.CloseResource")
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = { "OBL_UNSATISFIED_OBLIGATION", "SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE" }, justification = "The statement is closed and the query cannot be static.")
private int countRows(Connection connection, String tableName) throws SQLException {
int numFields = 0;
Statement statement = connection.createStatement();
try {
ResultSet results = statement.executeQuery("select count(*) from " + tableName);
if (!results.next()) {
throw new SystemException(SystemException.DEFAULT_KEY, "Query failed on table: " + tableName);
}
numFields = results.getInt(1);
} finally {
statement.close();
}
return numFields;
}
Aggregations