Search in sources :

Example 1 with DfDelimiterDataRegistrationFailureException

use of org.dbflute.exception.DfDelimiterDataRegistrationFailureException in project dbflute-core by dbflute.

the class DfDelimiterDataHandlerImpl method writeSeveralData.

// ===================================================================================
// Main
// ====
public DfDelimiterDataResultInfo writeSeveralData(DfDelimiterDataResource resource, DfLoadedDataInfo loadedDataInfo) {
    final DfDelimiterDataResultInfo resultInfo = new DfDelimiterDataResultInfo();
    final String basePath = resource.getBasePath();
    final File baseDir = new File(basePath);
    final String[] dataDirectoryElements = baseDir.list(new FilenameFilter() {

        public boolean accept(File dir, String name) {
            return !name.startsWith(".");
        }
    });
    if (dataDirectoryElements == null) {
        return resultInfo;
    }
    final FilenameFilter filter = createFilenameFilter(resource.getFileType());
    try {
        for (String encoding : dataDirectoryElements) {
            if (isUnsupportedEncodingDirectory(encoding)) {
                _log.warn("The encoding(directory name) is unsupported: encoding=" + encoding);
                continue;
            }
            final String dataDirectory = basePath + "/" + encoding;
            final File encodingNameDirectory = new File(dataDirectory);
            final String[] fileNameList = encodingNameDirectory.list(filter);
            final Comparator<String> fileNameAscComparator = new Comparator<String>() {

                public int compare(String o1, String o2) {
                    return o1.compareTo(o2);
                }
            };
            final SortedSet<String> sortedFileNameSet = new TreeSet<String>(fileNameAscComparator);
            for (String fileName : fileNameList) {
                sortedFileNameSet.add(fileName);
            }
            final Map<String, Map<String, String>> convertValueMap = getConvertValueMap(resource, encoding);
            final Map<String, String> defaultValueMap = getDefaultValueMap(resource, encoding);
            for (String fileName : sortedFileNameSet) {
                final String fileNamePath = dataDirectory + "/" + fileName;
                final DfDelimiterDataWriterImpl writer = new DfDelimiterDataWriterImpl(_dataSource, _unifiedSchema);
                writer.setLoggingInsertSql(isLoggingInsertSql());
                writer.setFileName(fileNamePath);
                writer.setEncoding(encoding);
                writer.setDelimiter(resource.getDelimiter());
                writer.setConvertValueMap(convertValueMap);
                writer.setDefaultValueMap(defaultValueMap);
                writer.setSuppressBatchUpdate(isSuppressBatchUpdate());
                writer.setSuppressCheckColumnDef(isSuppressCheckColumnDef());
                writer.setSuppressCheckImplicitSet(isSuppressCheckImplicitSet());
                writer.setDataWritingInterceptor(_dataWritingInterceptor);
                writer.setDefaultValueProp(_defaultValueProp);
                writer.setLoadingControlProp(_loadingControlProp);
                writer.writeData(resultInfo);
                prepareImplicitClassificationLazyCheck(loadedDataInfo, writer);
                final String loadType = resource.getLoadType();
                final String fileType = resource.getFileType();
                final boolean warned = resultInfo.containsColumnCountDiff(fileNamePath);
                loadedDataInfo.addLoadedFile(loadType, fileType, encoding, fileName, warned);
            }
            outputResultMark(resource, resultInfo, dataDirectory);
        }
    } catch (IOException e) {
        String msg = "Failed to register delimiter data: " + resource;
        throw new DfDelimiterDataRegistrationFailureException(msg, e);
    }
    return resultInfo;
}
Also used : DfDelimiterDataResultInfo(org.dbflute.logic.replaceschema.loaddata.DfDelimiterDataResultInfo) IOException(java.io.IOException) Comparator(java.util.Comparator) FilenameFilter(java.io.FilenameFilter) TreeSet(java.util.TreeSet) File(java.io.File) Map(java.util.Map) DfDelimiterDataRegistrationFailureException(org.dbflute.exception.DfDelimiterDataRegistrationFailureException)

Example 2 with DfDelimiterDataRegistrationFailureException

use of org.dbflute.exception.DfDelimiterDataRegistrationFailureException in project dbflute-core by dbflute.

the class DfDelimiterDataWriteSqlBuilder method createBasicColumnValueMap.

// ===================================================================================
// SQL Parts
// =========
protected Map<String, String> createBasicColumnValueMap() {
    if (_basicColumnValueMap != null) {
        return _basicColumnValueMap;
    }
    _basicColumnValueMap = new LinkedHashMap<String, String>();
    int columnCount = -1;
    for (String columnName : _columnNameList) {
        columnCount++;
        if (!_columnMetaMap.isEmpty() && !_columnMetaMap.containsKey(columnName)) {
            // changed logic at setupColumnNameList() in writer like this:
            // "added columns for default value are existing in DB"
            // by jflute (2017/03/26)
            // if (hasDefaultValue(columnName)) {
            // continue;
            // }
            handleNotFoundColumn(columnName);
            continue;
        }
        final String value;
        try {
            value = columnCount < _valueList.size() ? _valueList.get(columnCount) : null;
        } catch (RuntimeException e) {
            String msg = buildDelimiterDataRegistrationFailureMessage(columnCount);
            throw new DfDelimiterDataRegistrationFailureException(msg, e);
        }
        if (!_columnMetaMap.isEmpty() && _columnMetaMap.containsKey(columnName)) {
            String realDbName = _columnMetaMap.get(columnName).getColumnName();
            _basicColumnValueMap.put(realDbName, value);
        } else {
            _basicColumnValueMap.put(columnName, value);
        }
    }
    return _basicColumnValueMap;
}
Also used : DfDelimiterDataRegistrationFailureException(org.dbflute.exception.DfDelimiterDataRegistrationFailureException)

Example 3 with DfDelimiterDataRegistrationFailureException

use of org.dbflute.exception.DfDelimiterDataRegistrationFailureException in project dbflute-core by dbflute.

the class DfDelimiterDataWriterImpl method doWriteData.

// -----------------------------------------------------
// Write Data
// ----------
protected void doWriteData(DfDelimiterDataResultInfo resultInfo, boolean forcedlySuppressBatch, int offsetRowCount) throws IOException {
    final String dataDirectory = Srl.substringLastFront(_filePath, "/");
    final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory);
    final String tableDbName = extractTableDbName();
    final Map<String, DfColumnMeta> columnMetaMap = getColumnMetaMap(tableDbName);
    if (columnMetaMap.isEmpty()) {
        throwTableNotFoundException(_filePath, tableDbName);
    }
    // process before handling table
    beforeHandlingTable(tableDbName, columnMetaMap);
    // fixedly
    final String lineSeparatorInValue = "\n";
    final File dataFile = new File(_filePath);
    final boolean canBatchUpdate = canBatchUpdate(forcedlySuppressBatch, dataDirectory);
    final StringBuilder lineStringSb = new StringBuilder();
    final StringBuilder preContinuedSb = new StringBuilder();
    final List<String> columnNameList = new ArrayList<String>();
    final List<String> columnValueList = new ArrayList<String>();
    List<String> valueListSnapshot = null;
    // not line on file, as registered record
    int rowNumber = 0;
    String executedSql = null;
    // may committed per limit size, for skip in retry
    int committedRowCount = 0;
    FileInputStream fis = null;
    InputStreamReader ir = null;
    BufferedReader br = null;
    Connection conn = null;
    PreparedStatement ps = null;
    try {
        fis = new FileInputStream(dataFile);
        ir = new InputStreamReader(fis, _encoding);
        br = new BufferedReader(ir);
        DfDelimiterDataFirstLineInfo firstLineInfo = null;
        int loopIndex = -1;
        // current registered size to prepared statement
        int addedBatchSize = 0;
        while (true) {
            ++loopIndex;
            {
                final String readLine = br.readLine();
                if (readLine == null) {
                    break;
                }
                clearAppend(lineStringSb, readLine);
            }
            // - - - - - - - - - -/
            if (loopIndex == 0) {
                firstLineInfo = analyzeFirstLine(lineStringSb.toString(), _delimiter);
                setupColumnNameList(columnNameList, dataDirectory, dataFile, tableDbName, firstLineInfo, columnMetaMap);
                continue;
            }
            // /- - - - - - - - - - - - - - -
            // analyze values in line strings
            // - - - - - - - - - -/
            // might be clear-appended
            filterLineStringIfNeeds(lineStringSb);
            {
                if (preContinuedSb.length() > 0) {
                    // done performance tuning, suppress incremental strings from many line separators by jflute (2018/03/02)
                    // it needs to change lineString, preContinueString to StringBuilder type...
                    // lineString = preContinueString + "\n" + lineString; (2021/01/21)
                    // and insert has array-copy so may not be fast
                    // lineStringSb.insert(0, "\n").insert(0, preContinuedSb); (2021/01/21)
                    // used only here so changing is no problem
                    preContinuedSb.append(lineSeparatorInValue).append(lineStringSb);
                    clearAppend(lineStringSb, preContinuedSb);
                }
                final DfDelimiterDataValueLineInfo valueLineInfo = analyzeValueLine(lineStringSb.toString(), _delimiter);
                // empty string resolved later
                final List<String> extractedList = valueLineInfo.getValueList();
                if (valueLineInfo.isContinueNextLine()) {
                    clearAppend(preContinuedSb, extractedList.remove(extractedList.size() - 1));
                    columnValueList.addAll(extractedList);
                    // keeping valueList that has previous values
                    continue;
                }
                columnValueList.addAll(extractedList);
            }
            // - - - - - - - - - -/
            if (isDifferentColumnValueCount(firstLineInfo, columnValueList)) {
                handleDifferentColumnValueCount(resultInfo, dataDirectory, tableDbName, firstLineInfo, columnValueList);
                // clear temporary variables
                clear(preContinuedSb);
                columnValueList.clear();
                valueListSnapshot = null;
                continue;
            }
            // *valid record is prepared here
            ++rowNumber;
            valueListSnapshot = columnValueList;
            if (rowNumber <= offsetRowCount) {
                // basically only when retry
                // clear temporary variables
                clear(preContinuedSb);
                columnValueList.clear();
                valueListSnapshot = null;
                // e.g. 1 ~ 100000 rows if 100000 already committed
                continue;
            }
            // /- - - - - - - - - - - - - - - -
            // process registration to database
            // - - - - - - - - - -/
            final DfDelimiterDataWriteSqlBuilder sqlBuilder = createSqlBuilder(resultInfo, tableDbName, columnMetaMap, columnNameList, columnValueList);
            if (conn == null) {
                conn = _dataSource.getConnection();
            }
            if (ps == null) {
                // for performance (suppress implicit transaction per SQL)
                beginTransaction(conn);
                executedSql = sqlBuilder.buildSql();
                ps = prepareStatement(conn, executedSql);
            }
            final Map<String, Object> columnValueMap = sqlBuilder.setupParameter();
            final Set<String> sysdateColumnSet = sqlBuilder.getSysdateColumnSet();
            resolveRelativeDate(dataDirectory, tableDbName, columnValueMap, columnMetaMap, sysdateColumnSet, rowNumber);
            handleLoggingInsert(tableDbName, columnValueMap, loggingInsertType, rowNumber);
            int bindCount = 1;
            for (Entry<String, Object> entry : columnValueMap.entrySet()) {
                final String columnName = entry.getKey();
                final Object obj = entry.getValue();
                // - - - - - - - - - -/
                if (processNull(dataDirectory, tableDbName, columnName, obj, ps, bindCount, columnMetaMap, rowNumber)) {
                    bindCount++;
                    continue;
                }
                // It registers the value to statement by the type.
                if (processNotNullNotString(dataDirectory, tableDbName, columnName, obj, conn, ps, bindCount, columnMetaMap, rowNumber)) {
                    bindCount++;
                    continue;
                }
                // /- - - - - - - - - - - - - - - - - -
                // process NotNull and StringExpression
                // - - - - - - - - - -/
                final String value = (String) obj;
                processNotNullString(dataDirectory, dataFile, tableDbName, columnName, value, conn, ps, bindCount, columnMetaMap, rowNumber);
                bindCount++;
            }
            if (canBatchUpdate) {
                // mainly here
                ps.addBatch();
            } else {
                ps.execute();
            }
            ++addedBatchSize;
            if (isBatchLimit(dataDirectory, addedBatchSize)) {
                // transaction scope
                if (canBatchUpdate) {
                    // mainly here
                    // this is supported in only delimiter data writer because delimiter data can treat large data
                    // (actually needed, GC overhead limit exceeded when 1000000 records to MySQL, 2021/01/20)
                    // to avoid OutOfMemory
                    ps.executeBatch();
                }
                commitTransaction(conn);
                committedRowCount = committedRowCount + addedBatchSize;
                addedBatchSize = 0;
                close(ps);
                ps = null;
            }
            // *one record is finished here
            // clear temporary variables
            // if an exception occurs from execute() or addBatch(),
            // this valueList is to be information for debug
            clear(preContinuedSb);
            columnValueList.clear();
        // keep here for retry
        // valueListSnapshot = null;
        }
        if (ps != null && addedBatchSize > 0) {
            if (canBatchUpdate) {
                // mainly here
                ps.executeBatch();
            }
            commitTransaction(conn);
            committedRowCount = committedRowCount + addedBatchSize;
        }
        noticeLoadedRowSize(tableDbName, rowNumber);
        resultInfo.registerLoadedMeta(dataDirectory, _filePath, rowNumber);
        checkImplicitClassification(dataFile, tableDbName, columnNameList);
    } catch (SQLException e) {
        // request retry if it needs (e.g. execution exception of batch insert)
        // the snapshot is used only when retry failure basically
        final DfJDBCException wrapped = DfJDBCException.voice(e);
        final String msg = buildFailureMessage(_filePath, tableDbName, executedSql, columnValueList, wrapped);
        throw new DfDelimiterDataRegistrationFailureException(msg, wrapped.getNextException()).retryIfNeeds(createRetryResource(canBatchUpdate, committedRowCount)).snapshotRow(createRowSnapshot(columnNameList, valueListSnapshot, rowNumber));
    } catch (RuntimeException e) {
        // unneeded snapshot at this side but just in case (or changing determination future)
        final String msg = buildFailureMessage(_filePath, tableDbName, executedSql, columnValueList, null);
        throw new DfDelimiterDataRegistrationFailureException(msg, e).snapshotRow(createRowSnapshot(columnNameList, valueListSnapshot, rowNumber));
    } finally {
        closeStream(fis, ir, br);
        try {
            rollbackTransaction(conn);
        } catch (SQLException continued) {
            _log.info("Failed to rollback the delimiter data transaction.", continued);
        }
        close(ps);
        close(conn);
        // process after (finally) handling table
        finallyHandlingTable(tableDbName, columnMetaMap);
    }
}
Also used : LoggingInsertType(org.dbflute.logic.replaceschema.loaddata.base.dataprop.DfLoadingControlProp.LoggingInsertType) SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) DfJDBCException(org.dbflute.exception.DfJDBCException) ArrayList(java.util.ArrayList) List(java.util.List) DfDelimiterDataFirstLineInfo(org.dbflute.logic.replaceschema.loaddata.delimiter.line.DfDelimiterDataFirstLineInfo) DfDelimiterDataRegistrationFailureException(org.dbflute.exception.DfDelimiterDataRegistrationFailureException) DfColumnMeta(org.dbflute.logic.jdbc.metadata.info.DfColumnMeta) InputStreamReader(java.io.InputStreamReader) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) FileInputStream(java.io.FileInputStream) DfDelimiterDataValueLineInfo(org.dbflute.logic.replaceschema.loaddata.delimiter.line.DfDelimiterDataValueLineInfo) BufferedReader(java.io.BufferedReader) File(java.io.File)

Example 4 with DfDelimiterDataRegistrationFailureException

use of org.dbflute.exception.DfDelimiterDataRegistrationFailureException in project dbflute-core by dbflute.

the class DfDelimiterDataWriterImpl method writeData.

// ===================================================================================
// Write
// =====
public void writeData(DfDelimiterDataResultInfo resultInfo) throws IOException {
    _log.info("/= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = ");
    _log.info("writeData(" + _fileName + ")");
    _log.info("= = = = = = =/");
    FileInputStream fis = null;
    InputStreamReader ir = null;
    BufferedReader br = null;
    final String dataDirectory = Srl.substringLastFront(_fileName, "/");
    final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory);
    final String tableDbName = extractTableDbName();
    final Map<String, DfColumnMeta> columnMetaMap = getColumnMetaMap(tableDbName);
    if (columnMetaMap.isEmpty()) {
        throwTableNotFoundException(_fileName, tableDbName);
    }
    // process before handling table
    beforeHandlingTable(tableDbName, columnMetaMap);
    String lineString = null;
    String preContinueString = null;
    String executedSql = null;
    final List<String> columnNameList = new ArrayList<String>();
    final List<String> valueList = new ArrayList<String>();
    final boolean canBatchUpdate = !isMergedSuppressBatchUpdate(dataDirectory);
    final File dataFile = new File(_fileName);
    Connection conn = null;
    PreparedStatement ps = null;
    try {
        fis = new FileInputStream(dataFile);
        ir = new InputStreamReader(fis, _encoding);
        br = new BufferedReader(ir);
        FirstLineInfo firstLineInfo = null;
        int loopIndex = -1;
        int rowNumber = 0;
        int addedBatchSize = 0;
        while (true) {
            ++loopIndex;
            lineString = br.readLine();
            if (lineString == null) {
                break;
            }
            // - - - - - - - - - -/
            if (loopIndex == 0) {
                firstLineInfo = analyzeFirstLineInfo(_delimiter, lineString);
                setupColumnNameList(dataDirectory, dataFile, tableDbName, columnMetaMap, firstLineInfo, columnNameList);
                continue;
            }
            // /- - - - - - - - - - - - - - -
            // analyze values in line strings
            // - - - - - - - - - -/
            lineString = filterLineString(lineString);
            {
                if (preContinueString != null && !preContinueString.equals("")) {
                    lineString = preContinueString + "\n" + lineString;
                }
                final ValueLineInfo valueLineInfo = arrangeValueList(lineString, _delimiter);
                // empty string resolved later
                final List<String> ls = valueLineInfo.getValueList();
                if (valueLineInfo.isContinueNextLine()) {
                    preContinueString = ls.remove(ls.size() - 1);
                    valueList.addAll(ls);
                    continue;
                }
                valueList.addAll(ls);
            }
            // - - - - - - - - - -/
            if (isDifferentColumnValueCount(firstLineInfo, valueList)) {
                handleDifferentColumnValueCount(resultInfo, dataDirectory, tableDbName, firstLineInfo, valueList);
                // clear temporary variables
                valueList.clear();
                preContinueString = null;
                continue;
            }
            // *valid record is prepared here
            ++rowNumber;
            // /- - - - - - - - - - - - - - - -
            // process registration to database
            // - - - - - - - - - -/
            final DfDelimiterDataWriteSqlBuilder sqlBuilder = createSqlBuilder(resultInfo, tableDbName, columnMetaMap, columnNameList, valueList);
            if (conn == null) {
                conn = _dataSource.getConnection();
            }
            if (ps == null) {
                // for performance (suppress implicit transaction per SQL)
                beginTransaction(conn);
                executedSql = sqlBuilder.buildSql();
                ps = prepareStatement(conn, executedSql);
            }
            final Map<String, Object> columnValueMap = sqlBuilder.setupParameter();
            final Set<String> sysdateColumnSet = sqlBuilder.getSysdateColumnSet();
            resolveRelativeDate(dataDirectory, tableDbName, columnValueMap, columnMetaMap, sysdateColumnSet, rowNumber);
            handleLoggingInsert(tableDbName, columnValueMap, loggingInsertType, rowNumber);
            int bindCount = 1;
            for (Entry<String, Object> entry : columnValueMap.entrySet()) {
                final String columnName = entry.getKey();
                final Object obj = entry.getValue();
                // - - - - - - - - - -/
                if (processNull(dataDirectory, tableDbName, columnName, obj, ps, bindCount, columnMetaMap, rowNumber)) {
                    bindCount++;
                    continue;
                }
                // It registers the value to statement by the type.
                if (processNotNullNotString(dataDirectory, tableDbName, columnName, obj, conn, ps, bindCount, columnMetaMap, rowNumber)) {
                    bindCount++;
                    continue;
                }
                // /- - - - - - - - - - - - - - - - - -
                // process NotNull and StringExpression
                // - - - - - - - - - -/
                final String value = (String) obj;
                processNotNullString(dataDirectory, dataFile, tableDbName, columnName, value, conn, ps, bindCount, columnMetaMap, rowNumber);
                bindCount++;
            }
            if (canBatchUpdate) {
                // mainly here
                ps.addBatch();
            } else {
                ps.execute();
            }
            ++addedBatchSize;
            if (isBatchSizeLimit(addedBatchSize)) {
                // transaction scope
                if (canBatchUpdate) {
                    // mainly here
                    // this is supported in only delimiter data writer
                    // because delimiter data can treat large data
                    // to avoid OutOfMemory
                    ps.executeBatch();
                }
                commitTransaction(conn);
                addedBatchSize = 0;
                close(ps);
                ps = null;
            }
            // *one record is finished here
            // clear temporary variables
            // if an exception occurs from execute() or addBatch(),
            // this valueList is to be information for debug
            valueList.clear();
            preContinueString = null;
        }
        if (ps != null && addedBatchSize > 0) {
            if (canBatchUpdate) {
                // mainly here
                ps.executeBatch();
            }
            commitTransaction(conn);
        }
        noticeLoadedRowSize(tableDbName, rowNumber);
        resultInfo.registerLoadedMeta(dataDirectory, _fileName, rowNumber);
        checkImplicitClassification(dataFile, tableDbName, columnNameList);
    } catch (FileNotFoundException e) {
        throw e;
    } catch (IOException e) {
        throw e;
    } catch (SQLException e) {
        DfJDBCException wrapped = DfJDBCException.voice(e);
        String msg = buildRegExpMessage(_fileName, tableDbName, executedSql, valueList, wrapped);
        throw new DfDelimiterDataRegistrationFailureException(msg, wrapped.getNextException());
    } catch (RuntimeException e) {
        String msg = buildRegExpMessage(_fileName, tableDbName, executedSql, valueList, null);
        throw new DfDelimiterDataRegistrationFailureException(msg, e);
    } finally {
        closeStream(fis, ir, br);
        commitJustInCase(conn);
        close(ps);
        close(conn);
        // process after (finally) handling table
        finallyHandlingTable(tableDbName, columnMetaMap);
    }
}
Also used : LoggingInsertType(org.dbflute.logic.replaceschema.loaddata.impl.dataprop.DfLoadingControlProp.LoggingInsertType) SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) DfJDBCException(org.dbflute.exception.DfJDBCException) ArrayList(java.util.ArrayList) List(java.util.List) DfDelimiterDataRegistrationFailureException(org.dbflute.exception.DfDelimiterDataRegistrationFailureException) DfColumnMeta(org.dbflute.logic.jdbc.metadata.info.DfColumnMeta) InputStreamReader(java.io.InputStreamReader) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream) BufferedReader(java.io.BufferedReader) File(java.io.File)

Example 5 with DfDelimiterDataRegistrationFailureException

use of org.dbflute.exception.DfDelimiterDataRegistrationFailureException in project dbflute-core by dbflute.

the class DfDelimiterDataHandlerImpl method writeSeveralData.

// ===================================================================================
// Main
// ====
public DfDelimiterDataResultInfo writeSeveralData(DfDelimiterDataResource resource, DfLoadedDataInfo loadedDataInfo) {
    final DfDelimiterDataResultInfo resultInfo = new DfDelimiterDataResultInfo();
    final String basePath = resource.getBasePath();
    final File baseDir = new File(basePath);
    final String[] dataDirElements = baseDir.list((dir, name) -> !name.startsWith("."));
    if (dataDirElements == null) {
        return resultInfo;
    }
    try {
        for (String encoding : dataDirElements) {
            if (isUnsupportedEncodingDirectory(encoding)) {
                _log.warn("The encoding(directory name) is unsupported: encoding=" + encoding);
                continue;
            }
            final String dataDirPath = basePath + "/" + encoding;
            final File encodingNameDir = new File(dataDirPath);
            final String[] fileNameList = encodingNameDir.list((dir, name) -> name.endsWith("." + resource.getFileType()));
            final SortedSet<String> sortedFileNameSet = new TreeSet<String>((o1, o2) -> o1.compareTo(o2));
            for (String fileName : fileNameList) {
                sortedFileNameSet.add(fileName);
            }
            final Map<String, Map<String, String>> convertValueMap = getConvertValueMap(resource, encoding);
            final Map<String, String> defaultValueMap = getDefaultValueMap(resource, encoding);
            for (String fileName : sortedFileNameSet) {
                final String filePath = dataDirPath + "/" + fileName;
                final DfDelimiterDataWriterImpl writer = new DfDelimiterDataWriterImpl(_dataSource, _unifiedSchema);
                writer.setLoggingInsertSql(isLoggingInsertSql());
                writer.setFilePath(filePath);
                writer.setEncoding(encoding);
                writer.setDelimiter(resource.getDelimiter());
                writer.setConvertValueMap(convertValueMap);
                writer.setDefaultValueMap(defaultValueMap);
                writer.setSuppressBatchUpdate(isSuppressBatchUpdate());
                writer.setSuppressCheckColumnDef(isSuppressCheckColumnDef());
                writer.setSuppressCheckImplicitSet(isSuppressCheckImplicitSet());
                writer.setDataWritingInterceptor(_dataWritingInterceptor);
                writer.setConvertValueProp(_convertValueProp);
                writer.setDefaultValueProp(_defaultValueProp);
                writer.setLoadingControlProp(_loadingControlProp);
                writer.writeData(resultInfo);
                prepareImplicitClassificationLazyCheck(loadedDataInfo, writer);
                final String loadType = resource.getLoadType();
                final String fileType = resource.getFileType();
                final boolean warned = resultInfo.containsColumnCountDiff(filePath);
                loadedDataInfo.addLoadedFile(loadType, fileType, encoding, fileName, warned);
            }
            outputResultMark(resource, resultInfo, dataDirPath);
        }
    } catch (IOException e) {
        String msg = "Failed to register delimiter data: " + resource;
        throw new DfDelimiterDataRegistrationFailureException(msg, e);
    }
    return resultInfo;
}
Also used : IOException(java.io.IOException) TreeSet(java.util.TreeSet) File(java.io.File) Map(java.util.Map) DfDelimiterDataRegistrationFailureException(org.dbflute.exception.DfDelimiterDataRegistrationFailureException)

Aggregations

DfDelimiterDataRegistrationFailureException (org.dbflute.exception.DfDelimiterDataRegistrationFailureException)6 File (java.io.File)4 IOException (java.io.IOException)3 BufferedReader (java.io.BufferedReader)2 FileInputStream (java.io.FileInputStream)2 InputStreamReader (java.io.InputStreamReader)2 Connection (java.sql.Connection)2 PreparedStatement (java.sql.PreparedStatement)2 SQLException (java.sql.SQLException)2 ArrayList (java.util.ArrayList)2 List (java.util.List)2 Map (java.util.Map)2 TreeSet (java.util.TreeSet)2 DfJDBCException (org.dbflute.exception.DfJDBCException)2 DfColumnMeta (org.dbflute.logic.jdbc.metadata.info.DfColumnMeta)2 FileNotFoundException (java.io.FileNotFoundException)1 FilenameFilter (java.io.FilenameFilter)1 Comparator (java.util.Comparator)1 DfDelimiterDataResultInfo (org.dbflute.logic.replaceschema.loaddata.DfDelimiterDataResultInfo)1 LoggingInsertType (org.dbflute.logic.replaceschema.loaddata.base.dataprop.DfLoadingControlProp.LoggingInsertType)1