use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.
the class DfDelimiterDataWriterImpl method setupColumnNameList.
// ===================================================================================
// Column Name List
// ================
protected void setupColumnNameList(String dataDirectory, File dataFile, String tableDbName, Map<String, DfColumnMeta> columnMetaMap, FirstLineInfo firstLineInfo, List<String> columnNameList) {
columnNameList.addAll(firstLineInfo.getColumnNameList());
if (columnNameList.isEmpty()) {
throwDelimiterDataColumnDefNotFoundException(_fileName, tableDbName);
}
if (isCheckColumnDef(dataDirectory)) {
checkColumnDef(dataFile, tableDbName, columnNameList, columnMetaMap);
}
final StringSet columnSet = StringSet.createAsFlexible();
columnSet.addAll(columnNameList);
final List<String> additionalColumnList = new ArrayList<String>();
for (String defaultColumn : _defaultValueMap.keySet()) {
if (columnSet.contains(defaultColumn)) {
continue;
}
if (columnMetaMap.containsKey(defaultColumn)) {
// only existing column in DB
additionalColumnList.add(defaultColumn);
}
}
// defined columns + default columns (existing in DB)
columnNameList.addAll(additionalColumnList);
}
use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.
the class DfRepsSequenceHandlerPostgreSQL method handleSerialTypeSequence.
protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
doneSequenceSet.addAll(tableSequenceMap.values());
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String sequenceName = null;
String tableSqlName = null;
Integer actualValue = null;
String sequenceSqlName = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
st = conn.createStatement();
final DatabaseMetaData metaData = conn.getMetaData();
final DfColumnExtractor columnHandler = new DfColumnExtractor();
final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
_log.info("...Incrementing serial type sequence");
final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
for (Entry<String, DfTableMeta> entry : entrySet) {
// clear elements that are also used exception message
tableInfo = null;
pkInfo = null;
sequenceName = null;
tableSqlName = null;
actualValue = null;
sequenceSqlName = null;
tableInfo = entry.getValue();
pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
continue;
}
final String primaryKeyColumnName = pkList.get(0);
if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
continue;
}
final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
if (columnInfo == null) {
continue;
}
final String defaultValue = columnInfo.getDefaultValue();
if (defaultValue == null) {
continue;
}
final String prefix = "nextval('";
if (!defaultValue.startsWith(prefix)) {
continue;
}
final String excludedPrefixString = defaultValue.substring(prefix.length());
final int endIndex = excludedPrefixString.indexOf("'");
if (endIndex < 0) {
continue;
}
sequenceName = excludedPrefixString.substring(0, endIndex);
if (doneSequenceSet.contains(sequenceName)) {
// already done
continue;
}
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
// because sequence names of other schemas have already been qualified
// sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
sequenceSqlName = sequenceName;
callSequenceLoop(st, sequenceSqlName, actualValue);
}
} catch (SQLException e) {
throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
}
use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.
the class DfSequenceHandlerPostgreSQL method handleSerialTypeSequence.
protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
doneSequenceSet.addAll(tableSequenceMap.values());
DfTableMeta tableInfo = null;
DfPrimaryKeyMeta pkInfo = null;
String sequenceName = null;
String tableSqlName = null;
Integer actualValue = null;
String sequenceSqlName = null;
Connection conn = null;
Statement st = null;
try {
conn = _dataSource.getConnection();
st = conn.createStatement();
final DatabaseMetaData metaData = conn.getMetaData();
final DfColumnExtractor columnHandler = new DfColumnExtractor();
final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
_log.info("...Incrementing serial type sequence");
final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
for (Entry<String, DfTableMeta> entry : entrySet) {
// clear elements that are also used exception message
tableInfo = null;
pkInfo = null;
sequenceName = null;
tableSqlName = null;
actualValue = null;
sequenceSqlName = null;
tableInfo = entry.getValue();
pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
final List<String> pkList = pkInfo.getPrimaryKeyList();
if (pkList.size() != 1) {
continue;
}
final String primaryKeyColumnName = pkList.get(0);
if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
continue;
}
final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
if (columnInfo == null) {
continue;
}
final String defaultValue = columnInfo.getDefaultValue();
if (defaultValue == null) {
continue;
}
final String prefix = "nextval('";
if (!defaultValue.startsWith(prefix)) {
continue;
}
final String excludedPrefixString = defaultValue.substring(prefix.length());
final int endIndex = excludedPrefixString.indexOf("'");
if (endIndex < 0) {
continue;
}
sequenceName = excludedPrefixString.substring(0, endIndex);
if (doneSequenceSet.contains(sequenceName)) {
// already done
continue;
}
tableSqlName = tableInfo.getTableSqlName();
final Integer count = selectCount(st, tableSqlName);
if (count == null || count == 0) {
// It is not necessary to increment because the table has no data.
continue;
}
actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
if (actualValue == null) {
// It is not necessary to increment because the table has no data.
continue;
}
// because sequence names of other schemas have already been qualified
// sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
sequenceSqlName = sequenceName;
callSequenceLoop(st, sequenceSqlName, actualValue);
}
} catch (SQLException e) {
throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
} finally {
if (st != null) {
try {
st.close();
} catch (SQLException ignored) {
_log.info("Statement.close() threw the exception!", ignored);
}
}
if (conn != null) {
try {
conn.close();
} catch (SQLException ignored) {
_log.info("Connection.close() threw the exception!", ignored);
}
}
}
}
use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.
the class Table method doFindExistingForeignKey.
protected ForeignKey doFindExistingForeignKey(String foreignTableName, List<String> localColumnNameList, List<String> foreignColumnNameList, String fixedSuffix, boolean compareSuffix, boolean compareLocalColumn) {
final StringSet localColumnNameSet = StringSet.createAsFlexibleOrdered();
localColumnNameSet.addAll(localColumnNameList);
final StringSet foreignColumnNameSet = StringSet.createAsFlexibleOrdered();
foreignColumnNameSet.addAll(foreignColumnNameList);
for (ForeignKey fk : getForeignKeys()) {
if (!Srl.equalsFlexible(foreignTableName, fk.getForeignTablePureName())) {
continue;
}
if (compareSuffix && !Srl.equalsFlexible(fixedSuffix, fk.getFixedSuffix())) {
continue;
}
final StringSet currentLocalColumnNameSet = StringSet.createAsFlexibleOrdered();
currentLocalColumnNameSet.addAll(fk.getLocalColumnNameList());
if (compareLocalColumn && !localColumnNameSet.equalsUnderCharOption(currentLocalColumnNameSet)) {
continue;
}
final StringSet currentForeignColumnNameSet = StringSet.createAsFlexibleOrdered();
currentForeignColumnNameSet.addAll(fk.getForeignColumnNameList());
if (!foreignColumnNameSet.equalsUnderCharOption(currentForeignColumnNameSet)) {
continue;
}
// first-found one
return fk;
}
return null;
}
use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.
the class DfColumnExtractor method doGetColumnList.
protected List<DfColumnMeta> doGetColumnList(DatabaseMetaData metaData, UnifiedSchema unifiedSchema, String tableName, boolean retry) throws SQLException {
final List<DfColumnMeta> columnList = DfCollectionUtil.newArrayList();
// Column names for duplicate check
final StringSet columnNameSet = StringSet.createAsFlexible();
// Duplicate objects for warning log
final StringSet duplicateTableNameSet = StringSet.createAsFlexible();
final StringSet duplicateColumnNameSet = StringSet.createAsFlexible();
ResultSet rs = null;
try {
rs = extractColumnMetaData(metaData, unifiedSchema, tableName, retry);
if (rs == null) {
return DfCollectionUtil.newArrayList();
}
while (rs.next()) {
// /- - - - - - - - - - - - - - - - - - - - - - - - - - -
// same policy of table process (see DfTableHandler.java)
// - - - - - - - - - -/
final String columnName = rs.getString(4);
if (isColumnExcept(unifiedSchema, tableName, columnName)) {
continue;
}
final String metaTableName = rs.getString(3);
if (checkMetaTableDiffIfNeeds(tableName, metaTableName)) {
continue;
}
// filter duplicate objects
if (columnNameSet.contains(columnName)) {
duplicateTableNameSet.add(metaTableName);
duplicateColumnNameSet.add(columnName);
// ignored with warning
continue;
}
columnNameSet.add(columnName);
final Integer jdbcTypeCode = Integer.valueOf(rs.getString(5));
final String dbTypeName = rs.getString(6);
final Integer columnSize = Integer.valueOf(rs.getInt(7));
final Integer decimalDigits = rs.getInt(9);
final Integer nullType = Integer.valueOf(rs.getInt(11));
final String columnComment = rs.getString(12);
final String defaultValue = rs.getString(13);
final DfColumnMeta columnMeta = new DfColumnMeta();
columnMeta.setTableName(metaTableName);
columnMeta.setColumnName(columnName);
columnMeta.setJdbcDefValue(jdbcTypeCode);
columnMeta.setDbTypeName(dbTypeName);
columnMeta.setColumnSize(columnSize);
columnMeta.setDecimalDigits(decimalDigits);
columnMeta.setRequired(nullType == 0);
columnMeta.setColumnComment(columnComment);
columnMeta.setDefaultValue(filterDefaultValue(defaultValue));
columnList.add(columnMeta);
}
} finally {
if (rs != null) {
rs.close();
}
}
// Show duplicate objects if exists
if (!duplicateColumnNameSet.isEmpty()) {
String msg = "*Duplicate meta data was found:";
msg = msg + "\n[" + tableName + "]";
msg = msg + "\n duplicate tables = " + duplicateTableNameSet;
msg = msg + "\n duplicate columns = " + duplicateColumnNameSet;
_log.info(msg);
}
return columnList;
}
Aggregations