use of org.dbflute.logic.jdbc.metadata.info.DfTableMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method doHelpTableDatePrecision.
protected void doHelpTableDatePrecision(List<DfTableMeta> tableList, UnifiedSchema unifiedSchema) {
final DfDatetimePrecisionExtractor datetimePrecisionExtractor = createDatetimePrecisionExtractor(unifiedSchema);
if (datetimePrecisionExtractor != null) {
final Set<String> tableSet = new HashSet<String>();
for (DfTableMeta table : tableList) {
tableSet.add(table.getTableName());
}
try {
if (_datetimePrecisionAllMap == null) {
_datetimePrecisionAllMap = new LinkedHashMap<UnifiedSchema, Map<String, Map<String, Integer>>>();
}
final Map<String, Map<String, Integer>> datetimePrecisionMap = _datetimePrecisionAllMap.get(unifiedSchema);
final Map<String, Map<String, Integer>> extractedMap = datetimePrecisionExtractor.extractDatetimePrecisionMap(tableSet);
if (datetimePrecisionMap == null) {
_datetimePrecisionAllMap.put(unifiedSchema, extractedMap);
} else {
// basically no way, schema is unique but just in case
// merge
datetimePrecisionMap.putAll(extractedMap);
}
} catch (RuntimeException continued) {
_log.info("Failed to extract date-time precisions: extractor=" + datetimePrecisionExtractor, continued);
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfTableMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method generateXML.
/**
* Generates an XML database schema from JDBC meta data.
* @throws SQLException When it fails to handle the SQL.
*/
protected void generateXML() throws SQLException {
Connection conn = null;
try {
_log.info("...Getting DB connection");
conn = _dataSource.getConnection();
_log.info("...Getting DB meta data");
final DatabaseMetaData metaData = conn.getMetaData();
final List<DfTableMeta> tableList = getTableList(metaData);
// initialize the map of generated tables
// this is used by synonym handling and foreign key handling
// so this process should be before their processes
_generatedTableMap = StringKeyMap.createAsCaseInsensitive();
for (DfTableMeta meta : tableList) {
_generatedTableMap.put(meta.getTableName(), meta);
}
// Load synonym information for merging additional meta data if it needs.
loadSupplementarySynonymInfoIfNeeds();
// This should be after loading synonyms so it is executed at this timing!
// The property 'outOfGenerateTarget' is set here
processSynonymTable(tableList);
// The handler of foreign keys for generating.
// It needs to check whether a reference table is generate-target or not.
_foreignKeyExtractor.exceptForeignTableNotGenerated(_generatedTableMap);
// Create database node. (The beginning of schema XML!)
_databaseNode = _doc.createElement("database");
// as main schema
_databaseNode.setAttribute("name", _dataSource.getSchema().getPureSchema());
processTable(conn, metaData, tableList);
final boolean additionalTableExists = setupAddtionalTableIfNeeds();
if (tableList.isEmpty() && !additionalTableExists) {
throwSchemaEmptyException();
}
processSequence(conn, metaData);
if (isProcedureMetaEnabled()) {
processProcedure(conn, metaData);
}
if (isCraftMetaEnabled()) {
processCraftMeta(tableList);
}
_doc.appendChild(_databaseNode);
} finally {
if (conn != null) {
conn.close();
}
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfTableMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method assertDuplicateTable.
protected void assertDuplicateTable(List<DfTableMeta> tableList) {
if (getLittleAdjustmentProperties().isSuppressOtherSchemaSameNameTableLimiter()) {
return;
}
final Set<String> tableNameSet = StringSet.createAsCaseInsensitive();
final Set<String> duplicateTableSet = StringSet.createAsCaseInsensitive();
for (DfTableMeta info : tableList) {
final String tableName = info.getTableName();
if (tableNameSet.contains(tableName)) {
duplicateTableSet.add(tableName);
} else {
tableNameSet.add(tableName);
}
}
if (!duplicateTableSet.isEmpty()) {
throwTableDuplicateException(duplicateTableSet);
}
}
use of org.dbflute.logic.jdbc.metadata.info.DfTableMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method countDownRaceProcessTable.
protected void countDownRaceProcessTable(final List<DfTableMeta> tableList, int runnerCount, final DfFittingDataSource fittingDs) {
final CountDownRace fireMan = new CountDownRace(runnerCount);
fireMan.readyGo(new CountDownRaceExecution() {
public void execute(CountDownRaceRunner resource) {
final Object lockObj = resource.getLockObj();
// for exception message
String currentTable = null;
Connection runnerConn = null;
try {
runnerConn = fittingDs.newConnection();
prepareThreadDataSource(fittingDs, runnerConn);
final DatabaseMetaData newMetaData = runnerConn.getMetaData();
for (DfTableMeta tableMeta : tableList) {
final String tableKey = tableMeta.getTableFullQualifiedName();
synchronized (lockObj) {
if (_tableMetaDataSyncSet.contains(tableKey)) {
continue;
}
_tableMetaDataSyncSet.add(tableKey);
}
currentTable = tableKey;
doProcessTable(runnerConn, newMetaData, tableMeta);
}
} catch (SQLException e) {
String msg = "Failed to get the table meta data: " + currentTable;
throw new IllegalStateException(msg, e);
} finally {
if (runnerConn != null) {
try {
runnerConn.close();
} catch (SQLException e) {
}
}
DfDataSourceContext.clearDataSource();
}
}
protected void prepareThreadDataSource(final DfFittingDataSource fittingDs, final Connection runnerConn) {
if (DfDataSourceContext.isExistDataSource()) {
return;
}
final Connection threadConn = new NotClosingConnectionWrapper(runnerConn);
DfDataSourceContext.setDataSource(new HandlingDataSourceWrapper(fittingDs, new DataSourceHandler() {
public Connection getConnection(DataSource dataSource) throws SQLException {
return threadConn;
}
}));
}
});
}
use of org.dbflute.logic.jdbc.metadata.info.DfTableMeta in project dbflute-core by dbflute.
the class DfSchemaXmlSerializer method doHelpTableComment.
protected void doHelpTableComment(List<DfTableMeta> tableList, UnifiedSchema unifiedSchema) {
final DfDbCommentExtractor dbCommentExtractor = createDbCommentExtractor(unifiedSchema);
if (dbCommentExtractor != null) {
final Set<String> tableSet = new HashSet<String>();
for (DfTableMeta table : tableList) {
tableSet.add(table.getTableName());
}
try {
final Map<String, UserTabComments> tableCommentMap = dbCommentExtractor.extractTableComment(tableSet);
for (DfTableMeta table : tableList) {
table.acceptTableComment(tableCommentMap);
// *Synonym Processing is after loading synonyms.
}
} catch (RuntimeException ignored) {
_log.info("Failed to extract table comments: extractor=" + dbCommentExtractor, ignored);
}
try {
if (_columnCommentAllMap == null) {
_columnCommentAllMap = new LinkedHashMap<UnifiedSchema, Map<String, Map<String, UserColComments>>>();
}
final Map<String, Map<String, UserColComments>> columnCommentMap = _columnCommentAllMap.get(unifiedSchema);
final Map<String, Map<String, UserColComments>> extractedMap = dbCommentExtractor.extractColumnComment(tableSet);
if (columnCommentMap == null) {
_columnCommentAllMap.put(unifiedSchema, extractedMap);
} else {
// basically no way, schema is unique but just in case
// merge
columnCommentMap.putAll(extractedMap);
}
} catch (RuntimeException continued) {
_log.info("Failed to extract column comments: extractor=" + dbCommentExtractor, continued);
}
}
}
Aggregations