Search in sources :

Example 11 with AbstractSchema

use of org.apache.drill.exec.store.AbstractSchema in project drill by axbaretto.

the class InfoSchemaRecordGenerator method visitTables.

/**
 * Visit the tables in the given schema. The
 * @param  schemaPath  the path to the given schema
 * @param  schema  the given schema
 */
public void visitTables(String schemaPath, SchemaPlus schema) {
    final AbstractSchema drillSchema = schema.unwrap(AbstractSchema.class);
    final List<String> tableNames = Lists.newArrayList(schema.getTableNames());
    for (Pair<String, ? extends Table> tableNameToTable : drillSchema.getTablesByNames(tableNames)) {
        final String tableName = tableNameToTable.getKey();
        final Table table = tableNameToTable.getValue();
        final TableType tableType = table.getJdbcTableType();
        // Visit the table, and if requested ...
        if (shouldVisitTable(schemaPath, tableName, tableType) && visitTable(schemaPath, tableName, table)) {
            // ... do for each of the table's fields.
            final RelDataType tableRow = table.getRowType(new JavaTypeFactoryImpl(DRILL_REL_DATATYPE_SYSTEM));
            for (RelDataTypeField field : tableRow.getFieldList()) {
                if (shouldVisitColumn(schemaPath, tableName, field.getName())) {
                    visitField(schemaPath, tableName, field);
                }
            }
        }
    }
}
Also used : RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) Table(org.apache.calcite.schema.Table) TableType(org.apache.calcite.schema.Schema.TableType) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataType(org.apache.calcite.rel.type.RelDataType)

Example 12 with AbstractSchema

use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.

the class MetastoreDropTableMetadataHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ForemanSetupException {
    if (!context.getOptions().getOption(ExecConstants.METASTORE_ENABLED_VALIDATOR)) {
        throw UserException.validationError().message("Running ANALYZE TABLE DROP command when Metastore is disabled (`metastore.enabled` is set to false)").build(logger);
    }
    SqlDropTableMetadata dropTableMetadata = unwrap(sqlNode, SqlDropTableMetadata.class);
    AbstractSchema drillSchema = SchemaUtilites.resolveToDrillSchema(config.getConverter().getDefaultSchema(), dropTableMetadata.getSchemaPath());
    List<String> schemaPath = drillSchema.getSchemaPath();
    String pluginName = schemaPath.get(0);
    String workspaceName = Strings.join(schemaPath.subList(1, schemaPath.size()), AbstractSchema.SCHEMA_SEPARATOR);
    TableInfo tableInfo = TableInfo.builder().name(dropTableMetadata.getName()).storagePlugin(pluginName).workspace(workspaceName).build();
    try {
        Tables tables = context.getMetastoreRegistry().get().tables();
        MetastoreTableInfo metastoreTableInfo = tables.basicRequests().metastoreTableInfo(tableInfo);
        if (!metastoreTableInfo.isExists()) {
            if (dropTableMetadata.checkMetadataExistence()) {
                throw UserException.validationError().message("Metadata for table [%s] not found.", dropTableMetadata.getName()).build(logger);
            }
            return DirectPlan.createDirectPlan(context, false, String.format("Metadata for table [%s] does not exist.", dropTableMetadata.getName()));
        }
        tables.modify().delete(Delete.builder().metadataType(MetadataType.ALL).filter(tableInfo.toFilter()).build()).execute();
    } catch (MetastoreException e) {
        logger.error("Error when dropping metadata for table {}", dropTableMetadata.getName(), e);
        return DirectPlan.createDirectPlan(context, false, e.getMessage());
    }
    return DirectPlan.createDirectPlan(context, true, String.format("Metadata for table [%s] dropped.", dropTableMetadata.getName()));
}
Also used : MetastoreTableInfo(org.apache.drill.metastore.components.tables.MetastoreTableInfo) SqlDropTableMetadata(org.apache.drill.exec.planner.sql.parser.SqlDropTableMetadata) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) MetastoreException(org.apache.drill.metastore.exceptions.MetastoreException) Tables(org.apache.drill.metastore.components.tables.Tables) TableInfo(org.apache.drill.metastore.metadata.TableInfo) MetastoreTableInfo(org.apache.drill.metastore.components.tables.MetastoreTableInfo)

Example 13 with AbstractSchema

use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.

the class SchemaHandler method getWorkspaceSchema.

public WorkspaceSchemaFactory.WorkspaceSchema getWorkspaceSchema(List<String> tableSchema, String tableName) {
    SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
    AbstractSchema temporarySchema = SchemaUtilites.resolveToTemporarySchema(tableSchema, defaultSchema, context.getConfig());
    if (context.getSession().isTemporaryTable(temporarySchema, context.getConfig(), tableName)) {
        produceErrorResult(String.format("Indicated table [%s] is temporary table", tableName), true);
    }
    AbstractSchema drillSchema = SchemaUtilites.resolveToMutableDrillSchema(defaultSchema, tableSchema);
    Table table = SqlHandlerUtil.getTableFromSchema(drillSchema, tableName);
    if (table == null || table.getJdbcTableType() != Schema.TableType.TABLE) {
        produceErrorResult(String.format("Table [%s] was not found", tableName), true);
    }
    if (!(drillSchema instanceof WorkspaceSchemaFactory.WorkspaceSchema)) {
        produceErrorResult(String.format("Table [`%s`.`%s`] must belong to file storage plugin", drillSchema.getFullSchemaName(), tableName), true);
    }
    Preconditions.checkState(drillSchema instanceof WorkspaceSchemaFactory.WorkspaceSchema);
    return (WorkspaceSchemaFactory.WorkspaceSchema) drillSchema;
}
Also used : Table(org.apache.calcite.schema.Table) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) SchemaPlus(org.apache.calcite.schema.SchemaPlus) WorkspaceSchemaFactory(org.apache.drill.exec.store.dfs.WorkspaceSchemaFactory)

Example 14 with AbstractSchema

use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.

the class ShowTablesHandler method rewrite.

/**
 * Rewrite the parse tree as SELECT ... FROM INFORMATION_SCHEMA.`TABLES` ...
 */
@Override
public SqlNode rewrite(SqlNode sqlNode) throws ForemanSetupException {
    SqlShowTables node = unwrap(sqlNode, SqlShowTables.class);
    List<SqlNode> selectList = Arrays.asList(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO), new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO));
    SqlNode fromClause = new SqlIdentifier(Arrays.asList(IS_SCHEMA_NAME, InfoSchemaTableType.TABLES.name()), SqlParserPos.ZERO);
    SchemaPlus schemaPlus;
    if (node.getDb() != null) {
        List<String> schemaNames = node.getDb().names;
        schemaPlus = SchemaUtilites.findSchema(config.getConverter().getDefaultSchema(), schemaNames);
        if (schemaPlus == null) {
            throw UserException.validationError().message("Invalid schema name [%s]", SchemaUtilites.getSchemaPath(schemaNames)).build(logger);
        }
    } else {
        // If no schema is given in SHOW TABLES command, list tables from current schema
        schemaPlus = config.getConverter().getDefaultSchema();
    }
    if (SchemaUtilites.isRootSchema(schemaPlus)) {
        // If the default schema is a root schema, throw an error to select a default schema
        throw UserException.validationError().message("No default schema selected. Select a schema using 'USE schema' command").build(logger);
    }
    AbstractSchema drillSchema = SchemaUtilites.unwrapAsDrillSchemaInstance(schemaPlus);
    SqlNode where = DrillParserUtil.createCondition(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO), SqlStdOperatorTable.EQUALS, SqlLiteral.createCharString(drillSchema.getFullSchemaName(), Util.getDefaultCharset().name(), SqlParserPos.ZERO));
    SqlNode filter = null;
    if (node.getLikePattern() != null) {
        SqlNode likePattern = node.getLikePattern();
        SqlNode column = new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO);
        // wrap columns name values and condition in lower function if case insensitive
        if (!drillSchema.areTableNamesCaseSensitive() && likePattern instanceof SqlCharStringLiteral) {
            NlsString conditionString = ((SqlCharStringLiteral) likePattern).getNlsString();
            likePattern = SqlCharStringLiteral.createCharString(conditionString.getValue().toLowerCase(), conditionString.getCharsetName(), likePattern.getParserPosition());
            column = SqlStdOperatorTable.LOWER.createCall(SqlParserPos.ZERO, column);
        }
        filter = DrillParserUtil.createCondition(column, SqlStdOperatorTable.LIKE, likePattern);
    } else if (node.getWhereClause() != null) {
        filter = node.getWhereClause();
    }
    where = DrillParserUtil.createCondition(where, SqlStdOperatorTable.AND, filter);
    return new SqlSelect(SqlParserPos.ZERO, null, new SqlNodeList(selectList, SqlParserPos.ZERO), fromClause, where, null, null, null, null, null, null);
}
Also used : SqlShowTables(org.apache.drill.exec.planner.sql.parser.SqlShowTables) SqlSelect(org.apache.calcite.sql.SqlSelect) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) SchemaPlus(org.apache.calcite.schema.SchemaPlus) NlsString(org.apache.calcite.util.NlsString) SqlNodeList(org.apache.calcite.sql.SqlNodeList) NlsString(org.apache.calcite.util.NlsString) SqlCharStringLiteral(org.apache.calcite.sql.SqlCharStringLiteral) SqlIdentifier(org.apache.calcite.sql.SqlIdentifier) SqlNode(org.apache.calcite.sql.SqlNode)

Example 15 with AbstractSchema

use of org.apache.drill.exec.store.AbstractSchema in project drill by apache.

the class AnalyzeTableHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException, ForemanSetupException {
    final SqlAnalyzeTable sqlAnalyzeTable = unwrap(sqlNode, SqlAnalyzeTable.class);
    verifyNoUnsupportedFunctions(sqlAnalyzeTable);
    SqlNode tableRef = sqlAnalyzeTable.getTableRef();
    SqlSelect scanSql = new SqlSelect(SqlParserPos.ZERO, /* position */
    SqlNodeList.EMPTY, /* keyword list */
    getColumnList(sqlAnalyzeTable), /* select list */
    tableRef, /* from */
    null, /* where */
    null, /* group by */
    null, /* having */
    null, /* windowDecls */
    null, /* orderBy */
    null, /* offset */
    null);
    ConvertedRelNode convertedRelNode = validateAndConvert(rewrite(scanSql));
    RelDataType validatedRowType = convertedRelNode.getValidatedRowType();
    RelNode relScan = convertedRelNode.getConvertedNode();
    DrillTableInfo drillTableInfo = DrillTableInfo.getTableInfoHolder(sqlAnalyzeTable.getTableRef(), config);
    String tableName = drillTableInfo.tableName();
    AbstractSchema drillSchema = SchemaUtilites.resolveToDrillSchema(config.getConverter().getDefaultSchema(), drillTableInfo.schemaPath());
    Table table = SqlHandlerUtil.getTableFromSchema(drillSchema, tableName);
    if (table == null) {
        throw UserException.validationError().message("No table with given name [%s] exists in schema [%s]", tableName, drillSchema.getFullSchemaName()).build(logger);
    } else if (!(table instanceof DrillTable)) {
        return DrillStatsTable.notSupported(context, tableName);
    }
    DrillTable drillTable = (DrillTable) table;
    final Object selection = drillTable.getSelection();
    if (!(selection instanceof FormatSelection)) {
        return DrillStatsTable.notSupported(context, tableName);
    }
    // Do not support non-parquet tables
    FormatSelection formatSelection = (FormatSelection) selection;
    FormatPluginConfig formatConfig = formatSelection.getFormat();
    if (!((formatConfig instanceof ParquetFormatConfig) || ((formatConfig instanceof NamedFormatPluginConfig) && ((NamedFormatPluginConfig) formatConfig).getName().equals("parquet")))) {
        return DrillStatsTable.notSupported(context, tableName);
    }
    FileSystemPlugin plugin = (FileSystemPlugin) drillTable.getPlugin();
    DrillFileSystem fs = new DrillFileSystem(plugin.getFormatPlugin(formatSelection.getFormat()).getFsConf());
    Path selectionRoot = formatSelection.getSelection().getSelectionRoot();
    if (!selectionRoot.toUri().getPath().endsWith(tableName) || !fs.getFileStatus(selectionRoot).isDirectory()) {
        return DrillStatsTable.notSupported(context, tableName);
    }
    // Do not recompute statistics, if stale
    Path statsFilePath = new Path(selectionRoot, DotDrillType.STATS.getEnding());
    if (fs.exists(statsFilePath) && !isStatsStale(fs, statsFilePath)) {
        return DrillStatsTable.notRequired(context, tableName);
    }
    // Convert the query to Drill Logical plan and insert a writer operator on top.
    DrillRel drel = convertToDrel(relScan, drillSchema, tableName, sqlAnalyzeTable.getSamplePercent());
    Prel prel = convertToPrel(drel, validatedRowType);
    logAndSetTextPlan("Drill Physical", prel, logger);
    PhysicalOperator pop = convertToPop(prel);
    PhysicalPlan plan = convertToPlan(pop);
    log("Drill Plan", plan, logger);
    return plan;
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) FileSystemPlugin(org.apache.drill.exec.store.dfs.FileSystemPlugin) PhysicalPlan(org.apache.drill.exec.physical.PhysicalPlan) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) DrillStatsTable(org.apache.drill.exec.planner.common.DrillStatsTable) SqlAnalyzeTable(org.apache.drill.exec.planner.sql.parser.SqlAnalyzeTable) Table(org.apache.calcite.schema.Table) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) RelDataType(org.apache.calcite.rel.type.RelDataType) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) NamedFormatPluginConfig(org.apache.drill.exec.store.dfs.NamedFormatPluginConfig) Prel(org.apache.drill.exec.planner.physical.Prel) SqlSelect(org.apache.calcite.sql.SqlSelect) RelNode(org.apache.calcite.rel.RelNode) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) FormatPluginConfig(org.apache.drill.common.logical.FormatPluginConfig) NamedFormatPluginConfig(org.apache.drill.exec.store.dfs.NamedFormatPluginConfig) PhysicalOperator(org.apache.drill.exec.physical.base.PhysicalOperator) DrillRel(org.apache.drill.exec.planner.logical.DrillRel) ParquetFormatConfig(org.apache.drill.exec.store.parquet.ParquetFormatConfig) SqlAnalyzeTable(org.apache.drill.exec.planner.sql.parser.SqlAnalyzeTable) SqlNode(org.apache.calcite.sql.SqlNode)

Aggregations

AbstractSchema (org.apache.drill.exec.store.AbstractSchema)18 SchemaPlus (org.apache.calcite.schema.SchemaPlus)10 Table (org.apache.calcite.schema.Table)6 RelDataType (org.apache.calcite.rel.type.RelDataType)5 SqlIdentifier (org.apache.calcite.sql.SqlIdentifier)4 SqlNode (org.apache.calcite.sql.SqlNode)4 SqlSelect (org.apache.calcite.sql.SqlSelect)4 DrillConfig (org.apache.drill.common.config.DrillConfig)4 RelNode (org.apache.calcite.rel.RelNode)3 SqlNodeList (org.apache.calcite.sql.SqlNodeList)3 PhysicalPlan (org.apache.drill.exec.physical.PhysicalPlan)3 PhysicalOperator (org.apache.drill.exec.physical.base.PhysicalOperator)3 DrillRel (org.apache.drill.exec.planner.logical.DrillRel)3 Prel (org.apache.drill.exec.planner.physical.Prel)3 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)2 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)2 TableType (org.apache.calcite.schema.Schema.TableType)2 SqlCharStringLiteral (org.apache.calcite.sql.SqlCharStringLiteral)2 NlsString (org.apache.calcite.util.NlsString)2 ProjectAllowDupPrel (org.apache.drill.exec.planner.physical.ProjectAllowDupPrel)2