use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfRepsSequenceHandlerPostgreSQL method handleSerialTypeSequence.
protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
doneSequenceSet.addAll(tableSequenceMap.values());
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String sequenceName = null;
String tableSqlName = null;
Integer actualValue = null;
String sequenceSqlName = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
st = conn.createStatement();
final DatabaseMetaData metaData = conn.getMetaData();
final DfColumnExtractor columnHandler = new DfColumnExtractor();
final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
_log.info("...Incrementing serial type sequence");
final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
for (Entry<String, DfTableMeta> entry : entrySet) {
// clear elements that are also used exception message
tableInfo = null;
pkInfo = null;
sequenceName = null;
tableSqlName = null;
actualValue = null;
sequenceSqlName = null;
tableInfo = entry.getValue();
pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
continue;
}
final String primaryKeyColumnName = pkList.get(0);
if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
continue;
}
final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
if (columnInfo == null) {
continue;
}
final String defaultValue = columnInfo.getDefaultValue();
if (defaultValue == null) {
continue;
}
final String prefix = "nextval('";
if (!defaultValue.startsWith(prefix)) {
continue;
}
final String excludedPrefixString = defaultValue.substring(prefix.length());
final int endIndex = excludedPrefixString.indexOf("'");
if (endIndex < 0) {
continue;
}
sequenceName = excludedPrefixString.substring(0, endIndex);
if (doneSequenceSet.contains(sequenceName)) {
// already done
continue;
}
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
// because sequence names of other schemas have already been qualified
// sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
sequenceSqlName = sequenceName;
callSequenceLoop(st, sequenceSqlName, actualValue);
}
} catch (SQLException e) {
throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSynonymExtractorOracle method setupBasicConstraintInfo.
protected void setupBasicConstraintInfo(DfSynonymMeta info, UnifiedSchema tableOwner, String tableName, Connection conn) throws SQLException {
final DatabaseMetaData md = conn.getMetaData();
final DfPrimaryKeyMeta pkInfo = getPKList(md, tableOwner, tableName);
info.setPrimaryKey(pkInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (info.isSelectable()) {
// because it needs a select statement
for (String primaryKeyName : pkList) {
final boolean autoIncrement = isAutoIncrement(conn, tableOwner, tableName, primaryKeyName);
if (autoIncrement) {
info.setAutoIncrement(autoIncrement);
break;
}
}
}
{
final Map<String, Map<Integer, String>> uqMap = getUQMap(md, tableOwner, tableName, pkList);
info.setUniqueKeyMap(uqMap);
}
{
final Map<String, DfForeignKeyMeta> fkMap = getFKMap(conn, md, tableOwner, tableName);
// It's tentative information at this timing!
info.setForeignKeyMap(fkMap);
}
{
final Map<String, Map<Integer, String>> uqMap = info.getUniqueKeyMap();
final Map<String, Map<Integer, String>> indexMap = getIndexMap(md, tableOwner, tableName, uqMap);
info.setIndexMap(indexMap);
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSequenceHandlerJdbc method incrementSequenceToDataMax.
// ===================================================================================
// Increment Sequence
// ==================
public void incrementSequenceToDataMax(Map<String, String> tableSequenceMap) {
final Map<String, List<String>> skippedMap = DfCollectionUtil.newLinkedHashMap();
_log.info("...Incrementing sequences to max value of table data");
String tableName = null;
String sequenceName = null;
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String tableSqlName = null;
Integer actualValue = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
initializeTableInfo(conn);
st = conn.createStatement();
final Set<Entry<String, String>> entrySet = tableSequenceMap.entrySet();
for (Entry<String, String> entry : entrySet) {
// clear elements that are also used exception message
tableName = null;
sequenceName = null;
tableInfo = null;
pkInfo = null;
tableSqlName = null;
actualValue = null;
tableName = entry.getKey();
sequenceName = entry.getValue();
assertValidSequence(sequenceName, tableName);
tableInfo = findTableInfo(conn, tableName);
pkInfo = findPrimaryKeyInfo(conn, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
skippedMap.put(tableName, pkList);
continue;
}
final String primaryKeyColumnName = pkList.get(0);
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
callSequenceLoop(st, sequenceName, actualValue);
}
} catch (SQLException e) {
throwIncrementSequenceToDataMaxFailureException(tableName, sequenceName, tableInfo, pkInfo, tableSqlName, actualValue, DfJDBCException.voice(e));
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
if (!skippedMap.isEmpty()) {
_log.info("*Unsupported incrementing sequences(multiple-PK):");
final Set<Entry<String, List<String>>> skippedEntrySet = skippedMap.entrySet();
for (Entry<String, List<String>> skippedEntry : skippedEntrySet) {
final String skippedTableName = skippedEntry.getKey();
final List<String> pkList = skippedEntry.getValue();
_log.info(" " + skippedTableName + ": pk=" + pkList);
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSequenceHandlerPostgreSQL method handleSerialTypeSequence.
protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
doneSequenceSet.addAll(tableSequenceMap.values());
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String sequenceName = null;
String tableSqlName = null;
Integer actualValue = null;
String sequenceSqlName = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
st = conn.createStatement();
final DatabaseMetaData metaData = conn.getMetaData();
final DfColumnExtractor columnHandler = new DfColumnExtractor();
final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
_log.info("...Incrementing serial type sequence");
final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
for (Entry<String, DfTableMeta> entry : entrySet) {
// clear elements that are also used exception message
tableInfo = null;
pkInfo = null;
sequenceName = null;
tableSqlName = null;
actualValue = null;
sequenceSqlName = null;
tableInfo = entry.getValue();
pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
continue;
}
final String primaryKeyColumnName = pkList.get(0);
if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
continue;
}
final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
if (columnInfo == null) {
continue;
}
final String defaultValue = columnInfo.getDefaultValue();
if (defaultValue == null) {
continue;
}
final String prefix = "nextval('";
if (!defaultValue.startsWith(prefix)) {
continue;
}
final String excludedPrefixString = defaultValue.substring(prefix.length());
final int endIndex = excludedPrefixString.indexOf("'");
if (endIndex < 0) {
continue;
}
sequenceName = excludedPrefixString.substring(0, endIndex);
if (doneSequenceSet.contains(sequenceName)) {
// already done
continue;
}
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
// because sequence names of other schemas have already been qualified
// sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
sequenceSqlName = sequenceName;
callSequenceLoop(st, sequenceSqlName, actualValue);
}
} catch (SQLException e) {
throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfUniqueKeyExtractor method doGetPrimaryKey.
protected DfPrimaryKeyMeta doGetPrimaryKey(DatabaseMetaData metaData, UnifiedSchema unifiedSchema, String tableName, boolean retry) throws SQLException {
final DfPrimaryKeyMeta info = new DfPrimaryKeyMeta();
if (isPrimaryKeyExtractingUnsupported()) {
if (isDatabaseMsAccess()) {
return processMSAccess(metaData, unifiedSchema, tableName, info);
}
return info;
}
ResultSet rs = null;
try {
rs = extractPrimaryKeyMetaData(metaData, unifiedSchema, tableName, retry);
if (rs == null) {
return info;
}
// MySQL might return (actually returned) unordered list so sort it by the map
// getting ordinal was implemented recently (1.0.5G) so it has just-in-case process
final TreeMap<Integer, String> positionColumnNameMap = new TreeMap<Integer, String>();
final Map<Integer, String> positionPkNameMap = new HashMap<Integer, String>();
int justInCaseIndex = 100001;
while (rs.next()) {
final String metaTableName = rs.getString(3);
if (checkMetaTableDiffIfNeeds(tableName, metaTableName)) {
continue;
}
final String columnName = rs.getString(4);
final String posStr = rs.getString(5);
Integer ordinalPosition = null;
try {
ordinalPosition = Integer.valueOf(posStr);
} catch (NumberFormatException continued) {
warnPrimaryKeyPositionNotNumberException(tableName, columnName, posStr);
// just in case
ordinalPosition = justInCaseIndex;
++justInCaseIndex;
}
final String pkName = rs.getString(6);
positionColumnNameMap.put(ordinalPosition, columnName);
positionPkNameMap.put(ordinalPosition, pkName);
}
for (Entry<Integer, String> entry : positionColumnNameMap.entrySet()) {
final Integer pkPosition = entry.getKey();
final String columnName = entry.getValue();
final String pkName = positionPkNameMap.get(pkPosition);
info.addPrimaryKey(columnName, pkName, pkPosition);
}
} finally {
if (rs != null) {
rs.close();
}
}
return info;
}
Aggregations