use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfUniqueKeyExtractor method getPrimaryKey.
/**
* Retrieves an info of the columns composing the primary key for a given table.
* @param metaData JDBC meta data. (NotNull)
* @param unifiedSchema The unified schema that can contain catalog name and no-name mark. (NullAllowed)
* @param tableName The name of table. (NotNull, CaseInsensitiveByOption)
* @return The meta information of primary keys. (NotNull)
* @throws SQLException When it fails to handle the SQL.
*/
public DfPrimaryKeyMeta getPrimaryKey(DatabaseMetaData metaData, UnifiedSchema unifiedSchema, String tableName) throws SQLException {
final String translatedName = translateTableCaseName(tableName);
DfPrimaryKeyMeta info = doGetPrimaryKey(metaData, unifiedSchema, translatedName, false);
if (isRetryCaseInsensitivePrimaryKey()) {
if (!info.hasPrimaryKey() && !translatedName.equals(translatedName.toLowerCase())) {
// retry by lower case
info = doGetPrimaryKey(metaData, unifiedSchema, translatedName.toLowerCase(), true);
}
if (!info.hasPrimaryKey() && !translatedName.equals(translatedName.toUpperCase())) {
// retry by upper case
info = doGetPrimaryKey(metaData, unifiedSchema, translatedName.toUpperCase(), true);
}
}
return info;
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfRepsSequenceHandlerJdbc method incrementSequenceToDataMax.
// ===================================================================================
// Increment Sequence
// ==================
public void incrementSequenceToDataMax(Map<String, String> tableSequenceMap) {
final Map<String, List<String>> skippedMap = DfCollectionUtil.newLinkedHashMap();
_log.info("...Incrementing sequences to max value of table data");
String tableName = null;
String sequenceName = null;
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String tableSqlName = null;
Integer actualValue = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
initializeTableInfo(conn);
st = conn.createStatement();
final Set<Entry<String, String>> entrySet = tableSequenceMap.entrySet();
for (Entry<String, String> entry : entrySet) {
// clear elements that are also used exception message
tableName = null;
sequenceName = null;
tableInfo = null;
pkInfo = null;
tableSqlName = null;
actualValue = null;
tableName = entry.getKey();
sequenceName = entry.getValue();
assertValidSequence(sequenceName, tableName);
tableInfo = findTableInfo(conn, tableName);
pkInfo = findPrimaryKeyInfo(conn, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
skippedMap.put(tableName, pkList);
continue;
}
final String primaryKeyColumnName = pkList.get(0);
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
callSequenceLoop(st, sequenceName, actualValue);
}
} catch (SQLException e) {
throwIncrementSequenceToDataMaxFailureException(tableName, sequenceName, tableInfo, pkInfo, tableSqlName, actualValue, DfJDBCException.voice(e));
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
if (!skippedMap.isEmpty()) {
_log.info("*Unsupported incrementing sequences(multiple-PK):");
final Set<Entry<String, List<String>>> skippedEntrySet = skippedMap.entrySet();
for (Entry<String, List<String>> skippedEntry : skippedEntrySet) {
final String skippedTableName = skippedEntry.getKey();
final List<String> pkList = skippedEntry.getValue();
_log.info(" " + skippedTableName + ": pk=" + pkList);
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSynonymExtractorOracle method getDBLinkSynonymPKInfo.
// -----------------------------------------------------
// For DB Link Synonym
// -------------------
protected DfPrimaryKeyMeta getDBLinkSynonymPKInfo(Connection conn, String tableName, String dbLinkName) throws SQLException {
final DfPrimaryKeyMeta pkInfo = new DfPrimaryKeyMeta();
StringBuilder sb = new StringBuilder();
sb.append("select cols.OWNER, cols.CONSTRAINT_NAME, cols.TABLE_NAME, cols.COLUMN_NAME, cols.POSITION");
sb.append(" from USER_CONS_COLUMNS@" + dbLinkName + " cols");
sb.append(" left outer join USER_CONSTRAINTS@" + dbLinkName + " cons");
sb.append(" on cols.CONSTRAINT_NAME = cons.CONSTRAINT_NAME");
sb.append(" where cols.TABLE_NAME = '").append(tableName).append("'");
sb.append(" and cons.CONSTRAINT_TYPE = 'P'");
sb.append(" order by cols.POSITION");
Statement st = null;
ResultSet rs = null;
try {
st = conn.createStatement();
rs = st.executeQuery(sb.toString());
while (rs.next()) {
final String columnName = rs.getString("COLUMN_NAME");
final String pkName = rs.getString("CONSTRAINT_NAME");
final Integer pkPosition = rs.getInt("POSITION");
pkInfo.addPrimaryKey(columnName, pkName, pkPosition);
}
return pkInfo;
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
}
}
if (rs != null) {
try {
rs.close();
} catch (SQLException ignored) {
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSynonymExtractorOracle method setupDBLinkSynonym.
protected DfSynonymMeta setupDBLinkSynonym(Connection conn, DfSynonymMeta info, Map<String, Map<String, SynonymNativeInfo>> dbLinkSynonymNativeMap) throws SQLException {
if (!info.isSelectable()) {
// e.g. procedure synonym
return info;
}
final String tableName = info.getTableName();
final String dbLinkName = info.getDBLinkName();
final String realTableName = translateTableName(tableName, dbLinkName, dbLinkSynonymNativeMap);
final DfPrimaryKeyMeta pkInfo = getDBLinkSynonymPKInfo(conn, realTableName, dbLinkName);
info.setPrimaryKey(pkInfo);
final Map<String, Map<Integer, String>> uniqueKeyMap = getDBLinkSynonymUQMap(conn, realTableName, dbLinkName);
info.setUniqueKeyMap(uniqueKeyMap);
// Foreign Key and Index of DBLink are unsupported.
info.setForeignKeyMap(new LinkedHashMap<String, DfForeignKeyMeta>());
info.setIndexMap(new LinkedHashMap<String, Map<Integer, String>>());
return info;
}
use of org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method getPrimaryColumnMetaInfo.
// -----------------------------------------------------
// Primary Key
// -----------
/**
* Get the meta information of primary key.
* @param metaData The meta data of a database. (NotNull)
* @param tableMeta The meta information of table. (NotNull)
* @return The meta information of primary key. (NotNull)
* @throws SQLException When it fails to handle the SQL.
*/
protected DfPrimaryKeyMeta getPrimaryColumnMetaInfo(DatabaseMetaData metaData, DfTableMeta tableMeta) throws SQLException {
final DfPrimaryKeyMeta pkInfo = _uniqueKeyExtractor.getPrimaryKey(metaData, tableMeta);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (!canHandleSynonym(tableMeta) || !pkList.isEmpty()) {
return pkInfo;
}
final DfSynonymMeta synonym = getSynonymMetaInfo(tableMeta);
if (synonym != null) {
return synonym.getPrimaryKey();
} else {
return pkInfo;
}
}
Aggregations