use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfDelimiterDataWriterImpl method doWriteData.
// -----------------------------------------------------
// Write Data
// ----------
protected void doWriteData(DfDelimiterDataResultInfo resultInfo, boolean forcedlySuppressBatch, int offsetRowCount) throws IOException {
final String dataDirectory = Srl.substringLastFront(_filePath, "/");
final LoggingInsertType loggingInsertType = getLoggingInsertType(dataDirectory);
final String tableDbName = extractTableDbName();
final Map<String, DfColumnMeta> columnMetaMap = getColumnMetaMap(tableDbName);
if (columnMetaMap.isEmpty()) {
throwTableNotFoundException(_filePath, tableDbName);
}
// process before handling table
beforeHandlingTable(tableDbName, columnMetaMap);
// fixedly
final String lineSeparatorInValue = "\n";
final File dataFile = new File(_filePath);
final boolean canBatchUpdate = canBatchUpdate(forcedlySuppressBatch, dataDirectory);
final StringBuilder lineStringSb = new StringBuilder();
final StringBuilder preContinuedSb = new StringBuilder();
final List<String> columnNameList = new ArrayList<String>();
final List<String> columnValueList = new ArrayList<String>();
List<String> valueListSnapshot = null;
// not line on file, as registered record
int rowNumber = 0;
String executedSql = null;
// may committed per limit size, for skip in retry
int committedRowCount = 0;
FileInputStream fis = null;
InputStreamReader ir = null;
BufferedReader br = null;
Connection conn = null;
PreparedStatement ps = null;
try {
fis = new FileInputStream(dataFile);
ir = new InputStreamReader(fis, _encoding);
br = new BufferedReader(ir);
DfDelimiterDataFirstLineInfo firstLineInfo = null;
int loopIndex = -1;
// current registered size to prepared statement
int addedBatchSize = 0;
while (true) {
++loopIndex;
{
final String readLine = br.readLine();
if (readLine == null) {
break;
}
clearAppend(lineStringSb, readLine);
}
// - - - - - - - - - -/
if (loopIndex == 0) {
firstLineInfo = analyzeFirstLine(lineStringSb.toString(), _delimiter);
setupColumnNameList(columnNameList, dataDirectory, dataFile, tableDbName, firstLineInfo, columnMetaMap);
continue;
}
// /- - - - - - - - - - - - - - -
// analyze values in line strings
// - - - - - - - - - -/
// might be clear-appended
filterLineStringIfNeeds(lineStringSb);
{
if (preContinuedSb.length() > 0) {
// done performance tuning, suppress incremental strings from many line separators by jflute (2018/03/02)
// it needs to change lineString, preContinueString to StringBuilder type...
// lineString = preContinueString + "\n" + lineString; (2021/01/21)
// and insert has array-copy so may not be fast
// lineStringSb.insert(0, "\n").insert(0, preContinuedSb); (2021/01/21)
// used only here so changing is no problem
preContinuedSb.append(lineSeparatorInValue).append(lineStringSb);
clearAppend(lineStringSb, preContinuedSb);
}
final DfDelimiterDataValueLineInfo valueLineInfo = analyzeValueLine(lineStringSb.toString(), _delimiter);
// empty string resolved later
final List<String> extractedList = valueLineInfo.getValueList();
if (valueLineInfo.isContinueNextLine()) {
clearAppend(preContinuedSb, extractedList.remove(extractedList.size() - 1));
columnValueList.addAll(extractedList);
// keeping valueList that has previous values
continue;
}
columnValueList.addAll(extractedList);
}
// - - - - - - - - - -/
if (isDifferentColumnValueCount(firstLineInfo, columnValueList)) {
handleDifferentColumnValueCount(resultInfo, dataDirectory, tableDbName, firstLineInfo, columnValueList);
// clear temporary variables
clear(preContinuedSb);
columnValueList.clear();
valueListSnapshot = null;
continue;
}
// *valid record is prepared here
++rowNumber;
valueListSnapshot = columnValueList;
if (rowNumber <= offsetRowCount) {
// basically only when retry
// clear temporary variables
clear(preContinuedSb);
columnValueList.clear();
valueListSnapshot = null;
// e.g. 1 ~ 100000 rows if 100000 already committed
continue;
}
// /- - - - - - - - - - - - - - - -
// process registration to database
// - - - - - - - - - -/
final DfDelimiterDataWriteSqlBuilder sqlBuilder = createSqlBuilder(resultInfo, tableDbName, columnMetaMap, columnNameList, columnValueList);
if (conn == null) {
conn = _dataSource.getConnection();
}
if (ps == null) {
// for performance (suppress implicit transaction per SQL)
beginTransaction(conn);
executedSql = sqlBuilder.buildSql();
ps = prepareStatement(conn, executedSql);
}
final Map<String, Object> columnValueMap = sqlBuilder.setupParameter();
final Set<String> sysdateColumnSet = sqlBuilder.getSysdateColumnSet();
resolveRelativeDate(dataDirectory, tableDbName, columnValueMap, columnMetaMap, sysdateColumnSet, rowNumber);
handleLoggingInsert(tableDbName, columnValueMap, loggingInsertType, rowNumber);
int bindCount = 1;
for (Entry<String, Object> entry : columnValueMap.entrySet()) {
final String columnName = entry.getKey();
final Object obj = entry.getValue();
// - - - - - - - - - -/
if (processNull(dataDirectory, tableDbName, columnName, obj, ps, bindCount, columnMetaMap, rowNumber)) {
bindCount++;
continue;
}
// It registers the value to statement by the type.
if (processNotNullNotString(dataDirectory, tableDbName, columnName, obj, conn, ps, bindCount, columnMetaMap, rowNumber)) {
bindCount++;
continue;
}
// /- - - - - - - - - - - - - - - - - -
// process NotNull and StringExpression
// - - - - - - - - - -/
final String value = (String) obj;
processNotNullString(dataDirectory, dataFile, tableDbName, columnName, value, conn, ps, bindCount, columnMetaMap, rowNumber);
bindCount++;
}
if (canBatchUpdate) {
// mainly here
ps.addBatch();
} else {
ps.execute();
}
++addedBatchSize;
if (isBatchLimit(dataDirectory, addedBatchSize)) {
// transaction scope
if (canBatchUpdate) {
// mainly here
// this is supported in only delimiter data writer because delimiter data can treat large data
// (actually needed, GC overhead limit exceeded when 1000000 records to MySQL, 2021/01/20)
// to avoid OutOfMemory
ps.executeBatch();
}
commitTransaction(conn);
committedRowCount = committedRowCount + addedBatchSize;
addedBatchSize = 0;
close(ps);
ps = null;
}
// *one record is finished here
// clear temporary variables
// if an exception occurs from execute() or addBatch(),
// this valueList is to be information for debug
clear(preContinuedSb);
columnValueList.clear();
// keep here for retry
// valueListSnapshot = null;
}
if (ps != null && addedBatchSize > 0) {
if (canBatchUpdate) {
// mainly here
ps.executeBatch();
}
commitTransaction(conn);
committedRowCount = committedRowCount + addedBatchSize;
}
noticeLoadedRowSize(tableDbName, rowNumber);
resultInfo.registerLoadedMeta(dataDirectory, _filePath, rowNumber);
checkImplicitClassification(dataFile, tableDbName, columnNameList);
} catch (SQLException e) {
// request retry if it needs (e.g. execution exception of batch insert)
// the snapshot is used only when retry failure basically
final DfJDBCException wrapped = DfJDBCException.voice(e);
final String msg = buildFailureMessage(_filePath, tableDbName, executedSql, columnValueList, wrapped);
throw new DfDelimiterDataRegistrationFailureException(msg, wrapped.getNextException()).retryIfNeeds(createRetryResource(canBatchUpdate, committedRowCount)).snapshotRow(createRowSnapshot(columnNameList, valueListSnapshot, rowNumber));
} catch (RuntimeException e) {
// unneeded snapshot at this side but just in case (or changing determination future)
final String msg = buildFailureMessage(_filePath, tableDbName, executedSql, columnValueList, null);
throw new DfDelimiterDataRegistrationFailureException(msg, e).snapshotRow(createRowSnapshot(columnNameList, valueListSnapshot, rowNumber));
} finally {
closeStream(fis, ir, br);
try {
rollbackTransaction(conn);
} catch (SQLException continued) {
_log.info("Failed to rollback the delimiter data transaction.", continued);
}
close(ps);
close(conn);
// process after (finally) handling table
finallyHandlingTable(tableDbName, columnMetaMap);
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfDelimiterDataWriterImpl method createSqlBuilder.
protected DfDelimiterDataWriteSqlBuilder createSqlBuilder(DfDelimiterDataResultInfo resultInfo, String tableDbName, final Map<String, DfColumnMeta> columnMetaMap, List<String> columnNameList, List<String> valueList) {
final DfDelimiterDataWriteSqlBuilder sqlBuilder = new DfDelimiterDataWriteSqlBuilder();
sqlBuilder.setTableDbName(tableDbName);
sqlBuilder.setColumnMetaMap(columnMetaMap);
sqlBuilder.setColumnNameList(columnNameList);
sqlBuilder.setValueList(valueList);
sqlBuilder.setNotFoundColumnMap(resultInfo.getNotFoundColumnMap());
sqlBuilder.setConvertValueMap(_convertValueMap);
sqlBuilder.setDefaultValueMap(_defaultValueMap);
sqlBuilder.setBindTypeProvider(new DfColumnBindTypeProvider() {
public Class<?> provide(String tableName, DfColumnMeta columnMeta) {
return getBindType(tableName, columnMeta);
}
});
sqlBuilder.setDefaultValueProp(_defaultValueProp);
return sqlBuilder;
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfXlsDataHandlingWriter method convertColumnValueIfNeeds.
// ===================================================================================
// Column Value Filter
// ===================
protected void convertColumnValueIfNeeds(String dataDirectory, String tableName, Map<String, Object> columnValueMap, Map<String, DfColumnMeta> columnMetaMap) {
// handling both convertValueMap and defaultValueMap
final Map<String, Map<String, String>> convertValueMap = getConvertValueMap(dataDirectory);
final Map<String, String> defaultValueMap = getDefaultValueMap(dataDirectory);
// so it does not need to convert here
if (// no convert
(convertValueMap == null || convertValueMap.isEmpty()) && (defaultValueMap == null || defaultValueMap.isEmpty())) {
// and no default
return;
}
final DfColumnBindTypeProvider bindTypeProvider = new DfColumnBindTypeProvider() {
public Class<?> provide(String tableName, DfColumnMeta columnMeta) {
return getBindType(tableName, columnMeta);
}
};
final DfColumnValueConverter converter = new DfColumnValueConverter(convertValueMap, defaultValueMap, bindTypeProvider);
converter.convert(tableName, columnValueMap, columnMetaMap);
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfXlsDataHandlingWriter method filterValidColumn.
protected void filterValidColumn(final DfDataSet dataSet) {
for (int i = 0; i < dataSet.getTableSize(); i++) {
final DfDataTable table = dataSet.getTable(i);
final String tableName = table.getTableDbName();
final Map<String, DfColumnMeta> metaMetaMap = getColumnMetaMap(tableName);
for (int j = 0; j < table.getColumnSize(); j++) {
final DfDataColumn dataColumn = table.getColumn(j);
if (!metaMetaMap.containsKey(dataColumn.getColumnDbName())) {
dataColumn.setWritable(false);
}
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfColumnMeta in project dbflute-core by dbflute.
the class DfCustomizeEntityMetaExtractor method extractColumnMetaInfoMap.
// ===================================================================================
// Main
// ====
public Map<String, DfColumnMeta> extractColumnMetaInfoMap(ResultSet rs, String sql, DfForcedJavaNativeProvider forcedJavaNativeProvider) throws SQLException {
final Map<String, DfColumnMeta> columnMetaInfoMap = StringKeyMap.createAsFlexibleOrdered();
final ResultSetMetaData md = rs.getMetaData();
for (int i = 1; i <= md.getColumnCount(); i++) {
final DfColumnMeta columnMeta = new DfColumnMeta();
String sql2EntityRelatedTableName = null;
try {
sql2EntityRelatedTableName = md.getTableName(i);
} catch (SQLException continued) {
// because this table name is not required, basically only for classification
String msg = "ResultSetMetaData.getTableName(" + i + ") threw the exception: " + continued.getMessage();
_log.info(msg);
}
columnMeta.setSql2EntityRelatedTableName(sql2EntityRelatedTableName);
String columnName = md.getColumnLabel(i);
final String relatedColumnName = md.getColumnName(i);
columnMeta.setSql2EntityRelatedColumnName(relatedColumnName);
if (columnName == null || columnName.trim().length() == 0) {
columnName = relatedColumnName;
}
if (columnName == null || columnName.trim().length() == 0) {
final String ln = ln();
String msg = "The columnName is invalid: columnName=" + columnName + ln;
msg = msg + "ResultSetMetaData returned invalid value." + ln;
msg = msg + "sql=" + sql;
throw new IllegalStateException(msg);
}
columnMeta.setColumnName(columnName);
final int columnType = md.getColumnType(i);
columnMeta.setJdbcDefValue(columnType);
final String columnTypeName = md.getColumnTypeName(i);
columnMeta.setDbTypeName(columnTypeName);
int columnSize = md.getPrecision(i);
if (!DfColumnExtractor.isColumnSizeValid(columnSize)) {
// e.g. sum(COLUMN)
columnSize = md.getColumnDisplaySize(i);
}
columnMeta.setColumnSize(columnSize);
final int scale = md.getScale(i);
columnMeta.setDecimalDigits(scale);
if (forcedJavaNativeProvider != null) {
final String sql2entityForcedJavaNative = forcedJavaNativeProvider.provide(columnName);
columnMeta.setSql2EntityForcedJavaNative(sql2entityForcedJavaNative);
}
// not use meta data because it might be not accuracy
// and it is unneeded in outside-SQL first
// but only used as optional determination for Scala
// so you can specify not-null mark at select column comment e.g. -- // *Member Name
// (see DfCustomizeEntityInfo#acceptSelectColumnComment())
// try {
// // basically it is unneeded in outside-SQL and might be not accuracy
// // but get it here just in case (use-or-not depends on Sql2Entity handling)
// final int nullable = md.isNullable(i);
// if (ResultSetMetaData.columnNoNulls == nullable) {
// columnMeta.setRequired(true);
// }
// } catch (SQLException continued) {
// // because this is added after production so for compatible just in case
// String msg = "ResultSetMetaData.isNullable(" + i + ") threw the exception: " + continued.getMessage();
// _log.info(msg);
// }
// column comment is not set here (no comment on meta data)
// if select column comment is specified, comment will be set later
columnMetaInfoMap.put(columnName, columnMeta);
}
return columnMetaInfoMap;
}
Aggregations