use of org.dbflute.logic.replaceschema.loaddata.DfDelimiterDataResultInfo in project dbflute-core by dbflute.
the class DfDelimiterDataHandlerImpl method writeSeveralData.
// ===================================================================================
// Main
// ====
public DfDelimiterDataResultInfo writeSeveralData(DfDelimiterDataResource resource, DfLoadedDataInfo loadedDataInfo) {
final DfDelimiterDataResultInfo resultInfo = new DfDelimiterDataResultInfo();
final String basePath = resource.getBasePath();
final File baseDir = new File(basePath);
final String[] dataDirectoryElements = baseDir.list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return !name.startsWith(".");
}
});
if (dataDirectoryElements == null) {
return resultInfo;
}
final FilenameFilter filter = createFilenameFilter(resource.getFileType());
try {
for (String encoding : dataDirectoryElements) {
if (isUnsupportedEncodingDirectory(encoding)) {
_log.warn("The encoding(directory name) is unsupported: encoding=" + encoding);
continue;
}
final String dataDirectory = basePath + "/" + encoding;
final File encodingNameDirectory = new File(dataDirectory);
final String[] fileNameList = encodingNameDirectory.list(filter);
final Comparator<String> fileNameAscComparator = new Comparator<String>() {
public int compare(String o1, String o2) {
return o1.compareTo(o2);
}
};
final SortedSet<String> sortedFileNameSet = new TreeSet<String>(fileNameAscComparator);
for (String fileName : fileNameList) {
sortedFileNameSet.add(fileName);
}
final Map<String, Map<String, String>> convertValueMap = getConvertValueMap(resource, encoding);
final Map<String, String> defaultValueMap = getDefaultValueMap(resource, encoding);
for (String fileName : sortedFileNameSet) {
final String fileNamePath = dataDirectory + "/" + fileName;
final DfDelimiterDataWriterImpl writer = new DfDelimiterDataWriterImpl(_dataSource, _unifiedSchema);
writer.setLoggingInsertSql(isLoggingInsertSql());
writer.setFileName(fileNamePath);
writer.setEncoding(encoding);
writer.setDelimiter(resource.getDelimiter());
writer.setConvertValueMap(convertValueMap);
writer.setDefaultValueMap(defaultValueMap);
writer.setSuppressBatchUpdate(isSuppressBatchUpdate());
writer.setSuppressCheckColumnDef(isSuppressCheckColumnDef());
writer.setSuppressCheckImplicitSet(isSuppressCheckImplicitSet());
writer.setDataWritingInterceptor(_dataWritingInterceptor);
writer.setDefaultValueProp(_defaultValueProp);
writer.setLoadingControlProp(_loadingControlProp);
writer.writeData(resultInfo);
prepareImplicitClassificationLazyCheck(loadedDataInfo, writer);
final String loadType = resource.getLoadType();
final String fileType = resource.getFileType();
final boolean warned = resultInfo.containsColumnCountDiff(fileNamePath);
loadedDataInfo.addLoadedFile(loadType, fileType, encoding, fileName, warned);
}
outputResultMark(resource, resultInfo, dataDirectory);
}
} catch (IOException e) {
String msg = "Failed to register delimiter data: " + resource;
throw new DfDelimiterDataRegistrationFailureException(msg, e);
}
return resultInfo;
}
Aggregations