use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfRepsSequenceHandlerPostgreSQL method handleSerialTypeSequence.
protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
doneSequenceSet.addAll(tableSequenceMap.values());
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String sequenceName = null;
String tableSqlName = null;
Integer actualValue = null;
String sequenceSqlName = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
st = conn.createStatement();
final DatabaseMetaData metaData = conn.getMetaData();
final DfColumnExtractor columnHandler = new DfColumnExtractor();
final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
_log.info("...Incrementing serial type sequence");
final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
for (Entry<String, DfTableMeta> entry : entrySet) {
// clear elements that are also used exception message
tableInfo = null;
pkInfo = null;
sequenceName = null;
tableSqlName = null;
actualValue = null;
sequenceSqlName = null;
tableInfo = entry.getValue();
pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
continue;
}
final String primaryKeyColumnName = pkList.get(0);
if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
continue;
}
final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
if (columnInfo == null) {
continue;
}
final String defaultValue = columnInfo.getDefaultValue();
if (defaultValue == null) {
continue;
}
final String prefix = "nextval('";
if (!defaultValue.startsWith(prefix)) {
continue;
}
final String excludedPrefixString = defaultValue.substring(prefix.length());
final int endIndex = excludedPrefixString.indexOf("'");
if (endIndex < 0) {
continue;
}
sequenceName = excludedPrefixString.substring(0, endIndex);
if (doneSequenceSet.contains(sequenceName)) {
// already done
continue;
}
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
// because sequence names of other schemas have already been qualified
// sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
sequenceSqlName = sequenceName;
callSequenceLoop(st, sequenceSqlName, actualValue);
}
} catch (SQLException e) {
throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfOutsideSqlAnalyzer method execSQL.
// ===================================================================================
// Execution
// =========
@Override
protected void execSQL(String sql) {
checkRequiredSqlComment(sql);
ResultSet rs = null;
try {
DfCustomizeEntityInfo customizeEntityInfo = null;
boolean alreadyIncrementGoodSqlCount = false;
if (isTargetEntityMakingSql(sql)) {
{
final String executedSql = buildExecutedSql(sql);
checkStatement(executedSql);
rs = _currentStatement.executeQuery(executedSql);
}
_goodSqlCount++;
alreadyIncrementGoodSqlCount = true;
// for Customize Entity
final Map<String, DfColumnMeta> columnMetaMap = extractColumnMetaMap(sql, rs);
customizeEntityInfo = processCustomizeEntity(sql, columnMetaMap);
}
if (isTargetParameterBeanMakingSql(sql)) {
if (customizeEntityInfo == null) {
_log.info("*Only parameter-bean is created: the SQL was not executed.");
}
if (!alreadyIncrementGoodSqlCount) {
_goodSqlCount++;
}
// for Parameter Bean
processParameterBean(sql, customizeEntityInfo);
}
} catch (SQLException e) {
if (_runInfo.isErrorContinue()) {
_log.warn("Failed to execute: " + sql, e);
_sql2entityMeta.addExceptionInfo(_sqlFile.getName(), e.getMessage() + ln() + sql);
} else {
throwSQLFailureException(sql, e);
}
} finally {
if (rs != null) {
try {
rs.close();
} catch (SQLException ignored) {
_log.warn("Ignored exception: " + ignored.getMessage());
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method helpColumnDatetimePrecision.
protected void helpColumnDatetimePrecision(DfTableMeta tableMeta, List<DfColumnMeta> columnList) {
if (_datetimePrecisionAllMap != null) {
final String tableName = tableMeta.getTableName();
final Map<String, Map<String, Integer>> tableMap = _datetimePrecisionAllMap.get(tableMeta.getUnifiedSchema());
if (tableMap != null) {
// just in case
final Map<String, Integer> datetimePrecisionMap = tableMap.get(tableName);
if (datetimePrecisionMap != null) {
// just in case
for (DfColumnMeta column : columnList) {
column.acceptDatetimePrecision(datetimePrecisionMap);
}
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfSynonymExtractorOracle method getSynonymColumns.
// -----------------------------------------------------
// Supplementary Column Info
// -------------------------
protected List<DfColumnMeta> getSynonymColumns(Connection conn, UnifiedSchema synonymOwner, String synonymName) throws SQLException {
final List<DfColumnMeta> columnList = new ArrayList<DfColumnMeta>();
Statement st = null;
ResultSet rs = null;
try {
st = conn.createStatement();
final String synonymSqlName = synonymOwner.buildSchemaQualifiedName(synonymName);
final String sql = "select * from " + synonymSqlName + " where 0=1";
rs = st.executeQuery(sql);
final ResultSetMetaData metaData = rs.getMetaData();
int count = metaData.getColumnCount();
for (int i = 0; i < count; i++) {
int index = i + 1;
String columnName = metaData.getColumnName(index);
int columnType = metaData.getColumnType(index);
String columnTypeName = metaData.getColumnTypeName(index);
int precision = metaData.getPrecision(index);
int scale = metaData.getScale(index);
int nullableType = metaData.isNullable(index);
DfColumnMeta column = new DfColumnMeta();
column.setColumnName(columnName);
column.setJdbcDefValue(columnType);
column.setDbTypeName(columnTypeName);
column.setColumnSize(precision);
column.setDecimalDigits(scale);
column.setRequired(nullableType == ResultSetMetaData.columnNoNulls);
columnList.add(column);
}
return columnList;
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
}
}
if (rs != null) {
try {
rs.close();
} catch (SQLException ignored) {
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfAbstractDataWriterTest method test_processBoolean.
// -----------------------------------------------------
// Boolean
// -------
public void test_processBoolean() throws Exception {
// via XlsData
// ## Arrange ##
final DfXlsDataHandlingWriter impl = new DfXlsDataHandlingWriter(null, null) {
@Override
protected Class<?> getBindType(String tableName, DfColumnMeta columnMetaInfo) {
return BigDecimal.class;
}
};
Map<String, DfColumnMeta> columnMetaInfoMap = StringKeyMap.createAsCaseInsensitive();
DfColumnMeta info = new DfColumnMeta();
info.setColumnName("foo");
info.setColumnSize(3);
info.setJdbcDefValue(Types.NUMERIC);
columnMetaInfoMap.put("foo", info);
// ## Act ##
boolean actual = impl.processBoolean("tbl", "foo", "0", null, null, 0, columnMetaInfoMap, 3);
// ## Assert ##
log("actual=" + actual);
assertFalse(actual);
}
Aggregations