use of org.dbflute.helper.dataset.DfDataColumn in project dbflute-core by dbflute.
the class DfTableXlsReader method resolveLargeDataIfNeeds.
protected String resolveLargeDataIfNeeds(DfDataTable table, int columnIndex, Row row, String str) {
if (str == null) {
return null;
}
final String refPrefix = LDATA_REF_PREFIX;
final String refSuffix = LDATA_REF_SUFFIX;
if (_largeDataMap != null && str.startsWith(refPrefix) && str.endsWith(refSuffix)) {
final ScopeInfo scopeInfo = Srl.extractScopeFirst(str, refPrefix, refSuffix);
final String dataKey = scopeInfo.getContent();
final DfDataColumn column = table.getColumn(columnIndex);
final String columnTitle = table.getTableDbName() + "." + column.getColumnDbName();
final Map<String, String> dataMap = _largeDataMap.get(columnTitle);
if (dataMap != null) {
final String largeData = dataMap.get(dataKey);
if (largeData != null) {
return largeData;
} else {
throwLargeDataReferenceDataNotFoundException(table, columnIndex, row, str, dataKey);
}
} else {
throwLargeDataReferenceDataNotFoundException(table, columnIndex, row, str, dataKey);
}
}
return str;
}
use of org.dbflute.helper.dataset.DfDataColumn in project dbflute-core by dbflute.
the class DfTableXlsReader method prepareLargeDataTable.
// -----------------------------------------------------
// Large Data
// ----------
protected void prepareLargeDataTable() {
for (int i = 0; i < _workbook.getNumberOfSheets(); ++i) {
final String sheetName = _workbook.getSheetName(i);
if (!isLargeDataSheet(sheetName)) {
continue;
}
final Sheet sheet = _workbook.getSheetAt(i);
// unused
final String largeTableName = "LARGE_DATA";
final DfDataTable table = setupTable(sheet, largeTableName, new DfDataTable(largeTableName));
_largeDataMap = DfCollectionUtil.newLinkedHashMap();
final Map<Integer, String> indexColumnTitleMap = DfCollectionUtil.newLinkedHashMap();
for (int columnIndex = 0; columnIndex < table.getColumnSize(); columnIndex++) {
final DfDataColumn column = table.getColumn(columnIndex);
final String columnTitle = column.getColumnDbName();
if (!columnTitle.contains(".")) {
// should be e.g. MEMBER.MEMBER_NAME
throwLargeDataInvalidColumnTitleException(sheetName, columnTitle);
}
Map<String, String> dataMap = _largeDataMap.get(columnTitle);
if (dataMap == null) {
dataMap = DfCollectionUtil.newLinkedHashMap();
}
_largeDataMap.put(columnTitle, dataMap);
indexColumnTitleMap.put(columnIndex, columnTitle);
}
for (int rowIndex = 0; rowIndex < table.getRowSize(); rowIndex++) {
final DfDataRow row = table.getRow(rowIndex);
for (int columnIndex = 0; columnIndex < table.getColumnSize(); ++columnIndex) {
final Object obj = row.getValue(columnIndex);
if (obj == null) {
continue;
}
// basically String, but just in case
final String value = obj.toString();
final String columnTitle = indexColumnTitleMap.get(columnIndex);
final Map<String, String> dataMap = _largeDataMap.get(columnTitle);
if (!value.contains(LDATA_KEY_DELIMITER)) {
// should be e.g. key(df:delimiter){value}
throwLargeDataInvalidManagedDataException(sheetName, columnTitle, row, value);
}
final String dataKey = Srl.substringFirstFront(value, LDATA_KEY_DELIMITER);
final String largeValue = Srl.substringFirstRear(value, LDATA_KEY_DELIMITER);
final String unquotedValue = Srl.unquoteAnything(largeValue, LDATA_QUOTE_BEGIN, LDATA_QUOTE_END);
final String existingValue = dataMap.get(dataKey);
final String realValue = existingValue != null ? existingValue + unquotedValue : unquotedValue;
dataMap.put(dataKey, realValue);
}
}
// only one
break;
}
}
use of org.dbflute.helper.dataset.DfDataColumn in project dbflute-core by dbflute.
the class DfDtsModifiedState method getSqlContext.
protected DfDtsSqlContext getSqlContext(DfDataRow row) {
final DfDataTable table = row.getTable();
final StringBuffer sb = new StringBuffer(100);
final List<Object> argList = new ArrayList<Object>();
final List<Class<?>> argTypeList = new ArrayList<Class<?>>();
sb.append("update ");
sb.append(table.getTableSqlName());
sb.append(" set ");
for (int i = 0; i < table.getColumnSize(); ++i) {
final DfDataColumn column = table.getColumn(i);
if (column.isWritable() && !column.isPrimaryKey()) {
sb.append(column.getColumnSqlName());
sb.append(" = ?, ");
argList.add(row.getValue(i));
argTypeList.add(column.getColumnType().getType());
}
}
sb.setLength(sb.length() - 2);
sb.append(" where ");
for (int i = 0; i < table.getColumnSize(); ++i) {
final DfDataColumn column = table.getColumn(i);
if (column.isPrimaryKey()) {
sb.append(column.getColumnSqlName());
sb.append(" = ? and ");
argList.add(row.getValue(i));
argTypeList.add(column.getColumnType().getType());
}
}
sb.setLength(sb.length() - 5);
return createDtsSqlContext(sb.toString(), argList, argTypeList);
}
use of org.dbflute.helper.dataset.DfDataColumn in project dbflute-core by dbflute.
the class DfXlsDataHandlerImpl method checkHeaderColumnIfNeeds.
protected void checkHeaderColumnIfNeeds(DfXlsDataResource resource, File file, DfDataTable dataTable, Map<String, DfColumnMeta> columnMetaMap) {
final String dataDirectory = resource.getDataDirectory();
if (!isCheckColumnDef(dataDirectory)) {
return;
}
final List<String> columnDefNameList = new ArrayList<String>();
for (int i = 0; i < dataTable.getColumnSize(); i++) {
// all columns are target
final DfDataColumn dataColumn = dataTable.getColumn(i);
final String columnName = dataColumn.getColumnDbName();
columnDefNameList.add(columnName);
}
// use columnMetaMap to check (not use DataTable's meta data here)
// at old age, columnMetaMap is not required but required now
checkColumnDef(file, dataTable.getTableDbName(), columnDefNameList, columnMetaMap);
}
use of org.dbflute.helper.dataset.DfDataColumn in project dbflute-core by dbflute.
the class DfDtsCreatedState method getSqlContext.
protected DfDtsSqlContext getSqlContext(DfDataRow row) {
final DfDataTable table = row.getTable();
final StringBuffer sb = new StringBuffer(100);
final List<Object> argList = new ArrayList<Object>();
final List<Class<?>> argTypeList = new ArrayList<Class<?>>();
sb.append("insert into ");
sb.append(table.getTableSqlName());
sb.append(" (");
int writableColumnSize = 0;
for (int i = 0; i < table.getColumnSize(); ++i) {
final DfDataColumn column = table.getColumn(i);
if (column.isWritable()) {
++writableColumnSize;
sb.append(column.getColumnSqlName());
sb.append(", ");
argList.add(row.getValue(i));
argTypeList.add(column.getColumnType().getType());
}
}
sb.setLength(sb.length() - 2);
sb.append(") values (");
for (int i = 0; i < writableColumnSize; ++i) {
sb.append("?, ");
}
sb.setLength(sb.length() - 2);
sb.append(")");
return createDtsSqlContext(sb.toString(), argList, argTypeList);
}
Aggregations