use of org.apache.drill.exec.store.AbstractSchema in project drill by axbaretto.
the class InfoSchemaRecordGenerator method visitTables.
/**
* Visit the tables in the given schema. The
* @param schemaPath the path to the given schema
* @param schema the given schema
*/
public void visitTables(String schemaPath, SchemaPlus schema) {
final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
final List<String> tableNames = Lists.newArrayList(schema.getTableNames());
for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) {
final String tableName = tableNameToTable.getKey();
final Table table = tableNameToTable.getValue();
final TableType tableType = table.getJdbcTableType();
// Visit the table, and if requested ...
if (shouldVisitTable(schemaPath, tableName, tableType) && visitTable(schemaPath, tableName, table)) {
// ... do for each of the table's fields.
final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl(DRILL_REL_DATATYPE_SYSTEM));
for (RelDataTypeField field : tableRow.getFieldList()) {
if (shouldVisitColumn(schemaPath, tableName, field.getName())) {
visitField(schemaPath, tableName, field);
}
}
}
}
}
use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.
the class MetastoreDropTableMetadataHandler method getPlan.
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ForemanSetupException {
if (!context.getOptions().getOption(ExecConstants.METASTORE_ENABLED_VALIDATOR)) {
throw UserException.validationError().message("Running ANALYZE TABLE DROP command when Metastore is disabled (`metastore.enabled` is set to false)").build(logger);
}
SqlDropTableMetadata dropTableMetadata = unwrap(sqlNode, SqlDropTableMetadata.class);
AbstractSchema drillSchema = SchemaUtilites.resolveToDrillSchema(config.getConverter().getDefaultSchema(), dropTableMetadata.getSchemaPath());
List<String> schemaPath = drillSchema.getSchemaPath();
String pluginName = schemaPath.get(0);
String workspaceName = Strings.join(schemaPath.subList(1, schemaPath.size()), AbstractSchema.SCHEMA_SEPARATOR);
TableInfo tableInfo = TableInfo.builder().name(dropTableMetadata.getName()).storagePlugin(pluginName).workspace(workspaceName).build();
try {
Tables tables = context.getMetastoreRegistry().get().tables();
MetastoreTableInfo metastoreTableInfo = tables.basicRequests().metastoreTableInfo(tableInfo);
if (!metastoreTableInfo.isExists()) {
if (dropTableMetadata.checkMetadataExistence()) {
throw UserException.validationError().message("Metadata for table [%s] not found.", dropTableMetadata.getName()).build(logger);
}
return DirectPlan.createDirectPlan(context, false, String.format("Metadata for table [%s] does not exist.", dropTableMetadata.getName()));
}
tables.modify().delete(Delete.builder().metadataType(MetadataType.ALL).filter(tableInfo.toFilter()).build()).execute();
} catch (MetastoreException e) {
logger.error("Error when dropping metadata for table {}", dropTableMetadata.getName(), e);
return DirectPlan.createDirectPlan(context, false, e.getMessage());
}
return DirectPlan.createDirectPlan(context, true, String.format("Metadata for table [%s] dropped.", dropTableMetadata.getName()));
}
use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.
the class SchemaHandler method getWorkspaceSchema.
public WorkspaceSchemaFactory.WorkspaceSchema getWorkspaceSchema(List<String> tableSchema, String tableName) {
SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
AbstractSchema temporarySchema = SchemaUtilites.resolveToTemporarySchema(tableSchema, defaultSchema, context.getConfig());
if (context.getSession().isTemporaryTable(temporarySchema, context.getConfig(), tableName)) {
produceErrorResult(String.format("Indicated table [%s] is temporary table", tableName), true);
}
AbstractSchema drillSchema = SchemaUtilites.resolveToMutableDrillSchema(defaultSchema, tableSchema);
Table table = SqlHandlerUtil.getTableFromSchema(drillSchema, tableName);
if (table == null || table.getJdbcTableType() != Schema.TableType.TABLE) {
produceErrorResult(String.format("Table [%s] was not found", tableName), true);
}
if (!(drillSchema instanceof WorkspaceSchemaFactory.WorkspaceSchema)) {
produceErrorResult(String.format("Table [`%s`.`%s`] must belong to file storage plugin", drillSchema.getFullSchemaName(), tableName), true);
}
Preconditions.checkState(drillSchema instanceof WorkspaceSchemaFactory.WorkspaceSchema);
return (WorkspaceSchemaFactory.WorkspaceSchema) drillSchema;
}
use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.
the class ShowTablesHandler method rewrite.
/**
* Rewrite the parse tree as SELECT ... FROM INFORMATION_SCHEMA.`TABLES` ...
*/
@Override
public SqlNode rewrite(SqlNode sqlNode) throws ForemanSetupException {
SqlShowTables node = unwrap(sqlNode, SqlShowTables.class);
List<SqlNode> selectList = Arrays.asList(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO), new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO));
SqlNode fromClause = new SqlIdentifier(Arrays.asList(IS_SCHEMA_NAME, InfoSchemaTableType.TABLES.name()), SqlParserPos.ZERO);
SchemaPlus schemaPlus;
if (node.getDb() != null) {
List<String> schemaNames = node.getDb().names;
schemaPlus = SchemaUtilites.findSchema(config.getConverter().getDefaultSchema(), schemaNames);
if (schemaPlus == null) {
throw UserException.validationError().message("Invalid schema name [%s]", SchemaUtilites.getSchemaPath(schemaNames)).build(logger);
}
} else {
// If no schema is given in SHOW TABLES command, list tables from current schema
schemaPlus = config.getConverter().getDefaultSchema();
}
if (SchemaUtilites.isRootSchema(schemaPlus)) {
// If the default schema is a root schema, throw an error to select a default schema
throw UserException.validationError().message("No default schema selected. Select a schema using 'USE schema' command").build(logger);
}
AbstractSchema drillSchema = SchemaUtilites.unwrapAsDrillSchemaInstance(schemaPlus);
SqlNode where = DrillParserUtil.createCondition(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO), SqlStdOperatorTable.EQUALS, SqlLiteral.createCharString(drillSchema.getFullSchemaName(), Util.getDefaultCharset().name(), SqlParserPos.ZERO));
SqlNode filter = null;
if (node.getLikePattern() != null) {
SqlNode likePattern = node.getLikePattern();
SqlNode column = new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO);
// wrap columns name values and condition in lower function if case insensitive
if (!drillSchema.areTableNamesCaseSensitive() && likePattern instanceof SqlCharStringLiteral) {
NlsString conditionString = ((SqlCharStringLiteral) likePattern).getNlsString();
likePattern = SqlCharStringLiteral.createCharString(conditionString.getValue().toLowerCase(), conditionString.getCharsetName(), likePattern.getParserPosition());
column = SqlStdOperatorTable.LOWER.createCall(SqlParserPos.ZERO, column);
}
filter = DrillParserUtil.createCondition(column, SqlStdOperatorTable.LIKE, likePattern);
} else if (node.getWhereClause() != null) {
filter = node.getWhereClause();
}
where = DrillParserUtil.createCondition(where, SqlStdOperatorTable.AND, filter);
return new SqlSelect(SqlParserPos.ZERO, null, new SqlNodeList(selectList, SqlParserPos.ZERO), fromClause, where, null, null, null, null, null, null);
}
use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.
the class AnalyzeTableHandler method getPlan.
@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException, ForemanSetupException {
final SqlAnalyzeTable sqlAnalyzeTable = unwrap(sqlNode, SqlAnalyzeTable.class);
verifyNoUnsupportedFunctions(sqlAnalyzeTable);
SqlNode tableRef = sqlAnalyzeTable.getTableRef();
SqlSelect scanSql = new SqlSelect(SqlParserPos.ZERO, /* position */
SqlNodeList.EMPTY, /* keyword list */
getColumnList(sqlAnalyzeTable), /* select list */
tableRef, /* from */
null, /* where */
null, /* group by */
null, /* having */
null, /* windowDecls */
null, /* orderBy */
null, /* offset */
null);
ConvertedRelNode convertedRelNode = validateAndConvert(rewrite(scanSql));
RelDataType validatedRowType = convertedRelNode.getValidatedRowType();
RelNode relScan = convertedRelNode.getConvertedNode();
DrillTableInfo drillTableInfo = DrillTableInfo.getTableInfoHolder(sqlAnalyzeTable.getTableRef(), config);
String tableName = drillTableInfo.tableName();
AbstractSchema drillSchema = SchemaUtilites.resolveToDrillSchema(config.getConverter().getDefaultSchema(), drillTableInfo.schemaPath());
Table table = SqlHandlerUtil.getTableFromSchema(drillSchema, tableName);
if (table == null) {
throw UserException.validationError().message("No table with given name [%s] exists in schema [%s]", tableName, drillSchema.getFullSchemaName()).build(logger);
} else if (!(table instanceof DrillTable)) {
return DrillStatsTable.notSupported(context, tableName);
}
DrillTable drillTable = (DrillTable) table;
final Object selection = drillTable.getSelection();
if (!(selection instanceof FormatSelection)) {
return DrillStatsTable.notSupported(context, tableName);
}
// Do not support non-parquet tables
FormatSelection formatSelection = (FormatSelection) selection;
FormatPluginConfig formatConfig = formatSelection.getFormat();
if (!((formatConfig instanceof ParquetFormatConfig) || ((formatConfig instanceof NamedFormatPluginConfig) && ((NamedFormatPluginConfig) formatConfig).getName().equals("parquet")))) {
return DrillStatsTable.notSupported(context, tableName);
}
FileSystemPlugin plugin = (FileSystemPlugin) drillTable.getPlugin();
DrillFileSystem fs = new DrillFileSystem(plugin.getFormatPlugin(formatSelection.getFormat()).getFsConf());
Path selectionRoot = formatSelection.getSelection().getSelectionRoot();
if (!selectionRoot.toUri().getPath().endsWith(tableName) || !fs.getFileStatus(selectionRoot).isDirectory()) {
return DrillStatsTable.notSupported(context, tableName);
}
// Do not recompute statistics, if stale
Path statsFilePath = new Path(selectionRoot, DotDrillType.STATS.getEnding());
if (fs.exists(statsFilePath) && !isStatsStale(fs, statsFilePath)) {
return DrillStatsTable.notRequired(context, tableName);
}
// Convert the query to Drill Logical plan and insert a writer operator on top.
DrillRel drel = convertToDrel(relScan, drillSchema, tableName, sqlAnalyzeTable.getSamplePercent());
Prel prel = convertToPrel(drel, validatedRowType);
logAndSetTextPlan("Drill Physical", prel, logger);
PhysicalOperator pop = convertToPop(prel);
PhysicalPlan plan = convertToPlan(pop);
log("Drill Plan", plan, logger);
return plan;
}
Aggregations