Search in sources :

Example 1 with StringSet

use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.

the class DfDelimiterDataWriterImpl method setupColumnNameList.

// ===================================================================================
// Column Name List
// ================
protected void setupColumnNameList(String dataDirectory, File dataFile, String tableDbName, Map<String, DfColumnMeta> columnMetaMap, FirstLineInfo firstLineInfo, List<String> columnNameList) {
    columnNameList.addAll(firstLineInfo.getColumnNameList());
    if (columnNameList.isEmpty()) {
        throwDelimiterDataColumnDefNotFoundException(_fileName, tableDbName);
    }
    if (isCheckColumnDef(dataDirectory)) {
        checkColumnDef(dataFile, tableDbName, columnNameList, columnMetaMap);
    }
    final StringSet columnSet = StringSet.createAsFlexible();
    columnSet.addAll(columnNameList);
    final List<String> additionalColumnList = new ArrayList<String>();
    for (String defaultColumn : _defaultValueMap.keySet()) {
        if (columnSet.contains(defaultColumn)) {
            continue;
        }
        if (columnMetaMap.containsKey(defaultColumn)) {
            // only existing column in DB
            additionalColumnList.add(defaultColumn);
        }
    }
    // defined columns + default columns (existing in DB)
    columnNameList.addAll(additionalColumnList);
}
Also used : StringSet(org.dbflute.helper.StringSet) ArrayList(java.util.ArrayList)

Example 2 with StringSet

use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.

the class DfRepsSequenceHandlerPostgreSQL method handleSerialTypeSequence.

protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
    final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
    doneSequenceSet.addAll(tableSequenceMap.values());
    DfTableMeta tableInfo = null;
    DfPrimaryKeyMeta pkInfo = null;
    String sequenceName = null;
    String tableSqlName = null;
    Integer actualValue = null;
    String sequenceSqlName = null;
    Connection conn = null;
    Statement st = null;
    try {
        conn = _dataSource.getConnection();
        st = conn.createStatement();
        final DatabaseMetaData metaData = conn.getMetaData();
        final DfColumnExtractor columnHandler = new DfColumnExtractor();
        final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
        _log.info("...Incrementing serial type sequence");
        final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
        for (Entry<String, DfTableMeta> entry : entrySet) {
            // clear elements that are also used exception message
            tableInfo = null;
            pkInfo = null;
            sequenceName = null;
            tableSqlName = null;
            actualValue = null;
            sequenceSqlName = null;
            tableInfo = entry.getValue();
            pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
            final List<String> pkList = pkInfo.getPrimaryKeyList();
            if (pkList.size() != 1) {
                continue;
            }
            final String primaryKeyColumnName = pkList.get(0);
            if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
                continue;
            }
            final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
            final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
            if (columnInfo == null) {
                continue;
            }
            final String defaultValue = columnInfo.getDefaultValue();
            if (defaultValue == null) {
                continue;
            }
            final String prefix = "nextval('";
            if (!defaultValue.startsWith(prefix)) {
                continue;
            }
            final String excludedPrefixString = defaultValue.substring(prefix.length());
            final int endIndex = excludedPrefixString.indexOf("'");
            if (endIndex < 0) {
                continue;
            }
            sequenceName = excludedPrefixString.substring(0, endIndex);
            if (doneSequenceSet.contains(sequenceName)) {
                // already done
                continue;
            }
            tableSqlName = tableInfo.getTableSqlName();
            final Integer count = selectCount(st, tableSqlName);
            if (count == null || count == 0) {
                // It is not necessary to increment because the table has no data.
                continue;
            }
            actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
            if (actualValue == null) {
                // It is not necessary to increment because the table has no data.
                continue;
            }
            // because sequence names of other schemas have already been qualified
            // sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
            sequenceSqlName = sequenceName;
            callSequenceLoop(st, sequenceSqlName, actualValue);
        }
    } catch (SQLException e) {
        throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
    } finally {
        if (st != null) {
            try {
                st.close();
            } catch (SQLException ignored) {
                _log.info("Statement.close() threw the exception!", ignored);
            }
        }
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException ignored) {
                _log.info("Connection.close() threw the exception!", ignored);
            }
        }
    }
}
Also used : DfPrimaryKeyMeta(org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta) DfColumnMeta(org.dbflute.logic.jdbc.metadata.info.DfColumnMeta) SQLException(java.sql.SQLException) Statement(java.sql.Statement) DfAutoIncrementExtractor(org.dbflute.logic.jdbc.metadata.basic.DfAutoIncrementExtractor) Connection(java.sql.Connection) DatabaseMetaData(java.sql.DatabaseMetaData) Entry(java.util.Map.Entry) DfColumnExtractor(org.dbflute.logic.jdbc.metadata.basic.DfColumnExtractor) StringSet(org.dbflute.helper.StringSet) DfTableMeta(org.dbflute.logic.jdbc.metadata.info.DfTableMeta)

Example 3 with StringSet

use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.

the class DfSequenceHandlerPostgreSQL method handleSerialTypeSequence.

protected void handleSerialTypeSequence(Map<String, String> tableSequenceMap) {
    final StringSet doneSequenceSet = StringSet.createAsFlexibleOrdered();
    doneSequenceSet.addAll(tableSequenceMap.values());
    DfTableMeta tableInfo = null;
    DfPrimaryKeyMeta pkInfo = null;
    String sequenceName = null;
    String tableSqlName = null;
    Integer actualValue = null;
    String sequenceSqlName = null;
    Connection conn = null;
    Statement st = null;
    try {
        conn = _dataSource.getConnection();
        st = conn.createStatement();
        final DatabaseMetaData metaData = conn.getMetaData();
        final DfColumnExtractor columnHandler = new DfColumnExtractor();
        final DfAutoIncrementExtractor autoIncrementHandler = new DfAutoIncrementExtractor();
        _log.info("...Incrementing serial type sequence");
        final Set<Entry<String, DfTableMeta>> entrySet = _tableMap.entrySet();
        for (Entry<String, DfTableMeta> entry : entrySet) {
            // clear elements that are also used exception message
            tableInfo = null;
            pkInfo = null;
            sequenceName = null;
            tableSqlName = null;
            actualValue = null;
            sequenceSqlName = null;
            tableInfo = entry.getValue();
            pkInfo = _uniqueKeyHandler.getPrimaryKey(metaData, tableInfo);
            final List<String> pkList = pkInfo.getPrimaryKeyList();
            if (pkList.size() != 1) {
                continue;
            }
            final String primaryKeyColumnName = pkList.get(0);
            if (!autoIncrementHandler.isAutoIncrementColumn(conn, tableInfo, primaryKeyColumnName)) {
                continue;
            }
            final Map<String, DfColumnMeta> columnMap = columnHandler.getColumnMap(metaData, tableInfo);
            final DfColumnMeta columnInfo = columnMap.get(primaryKeyColumnName);
            if (columnInfo == null) {
                continue;
            }
            final String defaultValue = columnInfo.getDefaultValue();
            if (defaultValue == null) {
                continue;
            }
            final String prefix = "nextval('";
            if (!defaultValue.startsWith(prefix)) {
                continue;
            }
            final String excludedPrefixString = defaultValue.substring(prefix.length());
            final int endIndex = excludedPrefixString.indexOf("'");
            if (endIndex < 0) {
                continue;
            }
            sequenceName = excludedPrefixString.substring(0, endIndex);
            if (doneSequenceSet.contains(sequenceName)) {
                // already done
                continue;
            }
            tableSqlName = tableInfo.getTableSqlName();
            final Integer count = selectCount(st, tableSqlName);
            if (count == null || count == 0) {
                // It is not necessary to increment because the table has no data.
                continue;
            }
            actualValue = selectDataMax(st, tableInfo, primaryKeyColumnName);
            if (actualValue == null) {
                // It is not necessary to increment because the table has no data.
                continue;
            }
            // because sequence names of other schemas have already been qualified
            // sequenceSqlName = tableInfo.getUnifiedSchema().buildSqlName(sequenceName);
            sequenceSqlName = sequenceName;
            callSequenceLoop(st, sequenceSqlName, actualValue);
        }
    } catch (SQLException e) {
        throwSerialTypeSequenceHandlingFailureException(tableInfo, pkInfo, sequenceName, tableSqlName, actualValue, sequenceSqlName, e);
    } finally {
        if (st != null) {
            try {
                st.close();
            } catch (SQLException ignored) {
                _log.info("Statement.close() threw the exception!", ignored);
            }
        }
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException ignored) {
                _log.info("Connection.close() threw the exception!", ignored);
            }
        }
    }
}
Also used : DfPrimaryKeyMeta(org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta) DfColumnMeta(org.dbflute.logic.jdbc.metadata.info.DfColumnMeta) SQLException(java.sql.SQLException) Statement(java.sql.Statement) DfAutoIncrementExtractor(org.dbflute.logic.jdbc.metadata.basic.DfAutoIncrementExtractor) Connection(java.sql.Connection) DatabaseMetaData(java.sql.DatabaseMetaData) Entry(java.util.Map.Entry) DfColumnExtractor(org.dbflute.logic.jdbc.metadata.basic.DfColumnExtractor) StringSet(org.dbflute.helper.StringSet) DfTableMeta(org.dbflute.logic.jdbc.metadata.info.DfTableMeta)

Example 4 with StringSet

use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.

the class Table method doFindExistingForeignKey.

protected ForeignKey doFindExistingForeignKey(String foreignTableName, List<String> localColumnNameList, List<String> foreignColumnNameList, String fixedSuffix, boolean compareSuffix, boolean compareLocalColumn) {
    final StringSet localColumnNameSet = StringSet.createAsFlexibleOrdered();
    localColumnNameSet.addAll(localColumnNameList);
    final StringSet foreignColumnNameSet = StringSet.createAsFlexibleOrdered();
    foreignColumnNameSet.addAll(foreignColumnNameList);
    for (ForeignKey fk : getForeignKeys()) {
        if (!Srl.equalsFlexible(foreignTableName, fk.getForeignTablePureName())) {
            continue;
        }
        if (compareSuffix && !Srl.equalsFlexible(fixedSuffix, fk.getFixedSuffix())) {
            continue;
        }
        final StringSet currentLocalColumnNameSet = StringSet.createAsFlexibleOrdered();
        currentLocalColumnNameSet.addAll(fk.getLocalColumnNameList());
        if (compareLocalColumn && !localColumnNameSet.equalsUnderCharOption(currentLocalColumnNameSet)) {
            continue;
        }
        final StringSet currentForeignColumnNameSet = StringSet.createAsFlexibleOrdered();
        currentForeignColumnNameSet.addAll(fk.getForeignColumnNameList());
        if (!foreignColumnNameSet.equalsUnderCharOption(currentForeignColumnNameSet)) {
            continue;
        }
        // first-found one
        return fk;
    }
    return null;
}
Also used : StringSet(org.dbflute.helper.StringSet)

Example 5 with StringSet

use of org.dbflute.helper.StringSet in project dbflute-core by dbflute.

the class DfColumnExtractor method doGetColumnList.

protected List<DfColumnMeta> doGetColumnList(DatabaseMetaData metaData, UnifiedSchema unifiedSchema, String tableName, boolean retry) throws SQLException {
    final List<DfColumnMeta> columnList = DfCollectionUtil.newArrayList();
    // Column names for duplicate check
    final StringSet columnNameSet = StringSet.createAsFlexible();
    // Duplicate objects for warning log
    final StringSet duplicateTableNameSet = StringSet.createAsFlexible();
    final StringSet duplicateColumnNameSet = StringSet.createAsFlexible();
    ResultSet rs = null;
    try {
        rs = extractColumnMetaData(metaData, unifiedSchema, tableName, retry);
        if (rs == null) {
            return DfCollectionUtil.newArrayList();
        }
        while (rs.next()) {
            // /- - - - - - - - - - - - - - - - - - - - - - - - - - -
            // same policy of table process (see DfTableHandler.java)
            // - - - - - - - - - -/
            final String columnName = rs.getString(4);
            if (isColumnExcept(unifiedSchema, tableName, columnName)) {
                continue;
            }
            final String metaTableName = rs.getString(3);
            if (checkMetaTableDiffIfNeeds(tableName, metaTableName)) {
                continue;
            }
            // filter duplicate objects
            if (columnNameSet.contains(columnName)) {
                duplicateTableNameSet.add(metaTableName);
                duplicateColumnNameSet.add(columnName);
                // ignored with warning
                continue;
            }
            columnNameSet.add(columnName);
            final Integer jdbcTypeCode = Integer.valueOf(rs.getString(5));
            final String dbTypeName = rs.getString(6);
            final Integer columnSize = Integer.valueOf(rs.getInt(7));
            final Integer decimalDigits = rs.getInt(9);
            final Integer nullType = Integer.valueOf(rs.getInt(11));
            final String columnComment = rs.getString(12);
            final String defaultValue = rs.getString(13);
            final DfColumnMeta columnMeta = new DfColumnMeta();
            columnMeta.setTableName(metaTableName);
            columnMeta.setColumnName(columnName);
            columnMeta.setJdbcDefValue(jdbcTypeCode);
            columnMeta.setDbTypeName(dbTypeName);
            columnMeta.setColumnSize(columnSize);
            columnMeta.setDecimalDigits(decimalDigits);
            columnMeta.setRequired(nullType == 0);
            columnMeta.setColumnComment(columnComment);
            columnMeta.setDefaultValue(filterDefaultValue(defaultValue));
            columnList.add(columnMeta);
        }
    } finally {
        if (rs != null) {
            rs.close();
        }
    }
    // Show duplicate objects if exists
    if (!duplicateColumnNameSet.isEmpty()) {
        String msg = "*Duplicate meta data was found:";
        msg = msg + "\n[" + tableName + "]";
        msg = msg + "\n  duplicate tables = " + duplicateTableNameSet;
        msg = msg + "\n  duplicate columns = " + duplicateColumnNameSet;
        _log.info(msg);
    }
    return columnList;
}
Also used : DfColumnMeta(org.dbflute.logic.jdbc.metadata.info.DfColumnMeta) StringSet(org.dbflute.helper.StringSet) ResultSet(java.sql.ResultSet)

Aggregations

StringSet (org.dbflute.helper.StringSet)10 DfColumnMeta (org.dbflute.logic.jdbc.metadata.info.DfColumnMeta)4 ArrayList (java.util.ArrayList)3 Connection (java.sql.Connection)2 DatabaseMetaData (java.sql.DatabaseMetaData)2 SQLException (java.sql.SQLException)2 Statement (java.sql.Statement)2 Entry (java.util.Map.Entry)2 DfAutoIncrementExtractor (org.dbflute.logic.jdbc.metadata.basic.DfAutoIncrementExtractor)2 DfColumnExtractor (org.dbflute.logic.jdbc.metadata.basic.DfColumnExtractor)2 DfPrimaryKeyMeta (org.dbflute.logic.jdbc.metadata.info.DfPrimaryKeyMeta)2 DfTableMeta (org.dbflute.logic.jdbc.metadata.info.DfTableMeta)2 ResultSet (java.sql.ResultSet)1 List (java.util.List)1 Map (java.util.Map)1 ForeignKey (org.apache.torque.engine.database.model.ForeignKey)1 Table (org.apache.torque.engine.database.model.Table)1 DfIllegalPropertySettingException (org.dbflute.exception.DfIllegalPropertySettingException)1 StringKeyMap (org.dbflute.helper.StringKeyMap)1 DfProcedureArgumentInfo (org.dbflute.logic.jdbc.metadata.info.DfProcedureArgumentInfo)1