use of org.dbflute.helper.dataset.DfDataSet in project dbflute-core by dbflute.
the class DfLReverseExistingFileProvider method extractExistingXlsInfo.
// ===================================================================================
// Existing Xls
// ============
public DfLReverseExistingXlsInfo extractExistingXlsInfo(File baseDir) {
final List<File> existingXlsList = findExistingXlsList(baseDir);
final Map<File, List<String>> existingXlsTableListMap = DfCollectionUtil.newLinkedHashMap();
final Map<String, File> tableExistingXlsMap = StringKeyMap.createAsFlexible();
final String dataDirPath = resolvePath(baseDir);
final Map<String, String> tableNameMap = _tableNameProp.findTableNameMap(dataDirPath);
for (File existingXls : existingXlsList) {
final DfTableXlsReader reader = createTableXlsReader(baseDir, existingXls, tableNameMap);
final DfDataSet dataSet = reader.read();
final List<String> tableList = new ArrayList<String>();
for (int i = 0; i < dataSet.getTableSize(); i++) {
final DfDataTable dataTable = dataSet.getTable(i);
final String tableDbName = dataTable.getTableDbName();
tableList.add(tableDbName);
if (tableExistingXlsMap.containsKey(tableDbName)) {
throwLoadDataReverseDuplicateTableException(tableExistingXlsMap, tableDbName);
}
tableExistingXlsMap.put(tableDbName, existingXls);
}
existingXlsTableListMap.put(existingXls, tableList);
}
return new DfLReverseExistingXlsInfo(existingXlsTableListMap, tableExistingXlsMap);
}
use of org.dbflute.helper.dataset.DfDataSet in project dbflute-core by dbflute.
the class DfTableXlsReaderTest method test_read_rtrim.
public void test_read_rtrim() throws IOException {
// ## Arrange ##
final File xlsFile = prepareTestBasicXlsFile();
final DfTableXlsReader reader = createTableXlsReader(xlsFile, null, true);
// ## Act ##
final DfDataSet dataSet = reader.read();
// ## Assert ##
log("[DataSet]:" + ln() + dataSet);
final int tableSize = dataSet.getTableSize();
assertTrue(tableSize > 0);
for (int tableIndex = 0; tableIndex < tableSize; tableIndex++) {
final DfDataTable table = dataSet.getTable(tableIndex);
final int columnSize = table.getColumnSize();
assertTrue(columnSize > 0);
final int rowSize = table.getRowSize();
assertTrue(rowSize > 0);
for (int rowIndex = 0; rowIndex < rowSize; rowIndex++) {
final DfDataRow row = table.getRow(rowIndex);
for (int columnIndex = 0; columnIndex < columnSize; columnIndex++) {
final DfDataColumn column = table.getColumn(columnIndex);
final String columnDbName = column.getColumnDbName();
final Object value = row.getValue(columnDbName);
if (columnDbName.equals("AAA")) {
assertNotNull(value);
} else if (columnDbName.equals("BBB")) {
markHere("nullBBB");
} else if (columnDbName.equals("CCC")) {
assertNotNull(value);
} else if (columnDbName.equals("DDD")) {
assertNotNull(value);
String str = (String) value;
if (str.length() > str.trim().length()) {
fail();
}
} else if (columnDbName.equals("EEE")) {
assertNotNull(value);
String str = (String) value;
if (str.length() > str.trim().length()) {
// because of not trimmed column
markHere("trimmed_EEE");
}
}
}
}
}
assertMarked("nullBBB");
assertMarked("trimmed_EEE");
}
use of org.dbflute.helper.dataset.DfDataSet in project dbflute-core by dbflute.
the class DfTableXlsWriterTest method test_write_largeData_handling.
// ===================================================================================
// Large Data
// ==========
public void test_write_largeData_handling() throws Exception {
// ## Arrange ##
DfTableXlsReader existingReader = createTableXlsReader(prepareTestLargeDataXlsFile());
DfDataSet baseSet = existingReader.read();
log(ln() + baseSet);
String fileName = "output-table-xls-large-data-handling.xls";
String path = getTestCaseBuildDir().getCanonicalPath() + "/../" + fileName;
File outputFile = new File(path);
DfTableXlsWriter writer = new DfTableXlsWriter(outputFile);
writer.largeDataHandling().cellLengthLimit(5);
// ## Act ##
writer.write(baseSet);
// ## Assert ##
refresh();
DfTableXlsReader outputReader = createTableXlsReader(outputFile);
DfDataSet actualSet = outputReader.read();
log(ln() + actualSet);
String actualExp = actualSet.toString();
assertEquals(baseSet.toString(), actualExp);
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_SHEET_NAME));
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_KEY_DELIMITER));
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_REF_PREFIX));
}
use of org.dbflute.helper.dataset.DfDataSet in project dbflute-core by dbflute.
the class DfTableXlsWriterTest method test_write_largeData_truncated.
public void test_write_largeData_truncated() throws Exception {
// ## Arrange ##
DfTableXlsReader reader = createTableXlsReader(prepareTestLargeDataXlsFile());
DfDataSet baseSet = reader.read();
String fileName = "output-table-xls-large-data-truncated.xls";
String path = getTestCaseBuildDir().getCanonicalPath() + "/../" + fileName;
File outputFile = new File(path);
DfTableXlsWriter writer = new DfTableXlsWriter(outputFile).cellLengthLimit(5);
// ## Act ##
writer.write(baseSet);
// ## Assert ##
refresh();
DfTableXlsReader outputReader = createTableXlsReader(outputFile);
DfDataSet actualSet = outputReader.read();
log(ln() + actualSet);
String actualExp = actualSet.toString();
assertNotSame(baseSet.toString(), actualExp);
assertContains(actualExp, "...");
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_SHEET_NAME));
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_KEY_DELIMITER));
assertFalse(actualExp.contains(DfTableXlsReader.LDATA_REF_PREFIX));
}
Aggregations