Search in sources :

Example 26 with SchemaPlus

use of org.apache.calcite.schema.SchemaPlus in project drill by apache.

the class RefreshMetadataHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException, ForemanSetupException {
    final SqlRefreshMetadata refreshTable = unwrap(sqlNode, SqlRefreshMetadata.class);
    try {
        final SchemaPlus schema = findSchema(config.getConverter().getDefaultSchema(), refreshTable.getSchemaPath());
        if (schema == null) {
            return direct(false, "Storage plugin or workspace does not exist [%s]", SchemaUtilites.SCHEMA_PATH_JOINER.join(refreshTable.getSchemaPath()));
        }
        final String tableName = refreshTable.getName();
        if (tableName.contains("*") || tableName.contains("?")) {
            return direct(false, "Glob path %s not supported for metadata refresh", tableName);
        }
        final Table table = schema.getTable(tableName);
        if (table == null) {
            return direct(false, "Table %s does not exist.", tableName);
        }
        if (!(table instanceof DrillTable)) {
            return notSupported(tableName);
        }
        final DrillTable drillTable = (DrillTable) table;
        final Object selection = drillTable.getSelection();
        if (!(selection instanceof FormatSelection)) {
            return notSupported(tableName);
        }
        FormatSelection formatSelection = (FormatSelection) selection;
        FormatPluginConfig formatConfig = formatSelection.getFormat();
        if (!((formatConfig instanceof ParquetFormatConfig) || ((formatConfig instanceof NamedFormatPluginConfig) && ((NamedFormatPluginConfig) formatConfig).name.equals("parquet")))) {
            return notSupported(tableName);
        }
        FileSystemPlugin plugin = (FileSystemPlugin) drillTable.getPlugin();
        DrillFileSystem fs = new DrillFileSystem(plugin.getFormatPlugin(formatSelection.getFormat()).getFsConf());
        String selectionRoot = formatSelection.getSelection().selectionRoot;
        if (!fs.getFileStatus(new Path(selectionRoot)).isDirectory()) {
            return notSupported(tableName);
        }
        if (!(formatConfig instanceof ParquetFormatConfig)) {
            formatConfig = new ParquetFormatConfig();
        }
        Metadata.createMeta(fs, selectionRoot, (ParquetFormatConfig) formatConfig);
        return direct(true, "Successfully updated metadata for table %s.", tableName);
    } catch (Exception e) {
        logger.error("Failed to update metadata for table '{}'", refreshTable.getName(), e);
        return DirectPlan.createDirectPlan(context, false, String.format("Error: %s", e.getMessage()));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystemPlugin(org.apache.drill.exec.store.dfs.FileSystemPlugin) Table(org.apache.calcite.schema.Table) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) DrillTable(org.apache.drill.exec.planner.logical.DrillTable) SchemaPlus(org.apache.calcite.schema.SchemaPlus) FormatSelection(org.apache.drill.exec.store.dfs.FormatSelection) SqlRefreshMetadata(org.apache.drill.exec.planner.sql.parser.SqlRefreshMetadata) IOException(java.io.IOException) ValidationException(org.apache.calcite.tools.ValidationException) ForemanSetupException(org.apache.drill.exec.work.foreman.ForemanSetupException) RelConversionException(org.apache.calcite.tools.RelConversionException) NamedFormatPluginConfig(org.apache.drill.exec.store.dfs.NamedFormatPluginConfig) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) NamedFormatPluginConfig(org.apache.drill.exec.store.dfs.NamedFormatPluginConfig) FormatPluginConfig(org.apache.drill.common.logical.FormatPluginConfig) ParquetFormatConfig(org.apache.drill.exec.store.parquet.ParquetFormatConfig)

Example 27 with SchemaPlus

use of org.apache.calcite.schema.SchemaPlus in project drill by apache.

the class ShowFileHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException {
    SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb();
    DrillFileSystem fs = null;
    String defaultLocation = null;
    String fromDir = "./";
    SchemaPlus defaultSchema = config.getConverter().getDefaultSchema();
    SchemaPlus drillSchema = defaultSchema;
    // Show files can be used without from clause, in which case we display the files in the default schema
    if (from != null) {
        // We are not sure if the full from clause is just the schema or includes table name,
        // first try to see if the full path specified is a schema
        drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names);
        if (drillSchema == null) {
            // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause.
            drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1));
            fromDir = fromDir + from.names.get((from.names.size() - 1));
        }
        if (drillSchema == null) {
            throw UserException.validationError().message("Invalid FROM/IN clause [%s]", from.toString()).build(logger);
        }
    }
    WorkspaceSchema wsSchema;
    try {
        wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema();
    } catch (ClassCastException e) {
        throw UserException.validationError().message("SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.", SchemaUtilites.getSchemaPath(drillSchema)).build(logger);
    }
    // Get the file system object
    fs = wsSchema.getFS();
    // Get the default path
    defaultLocation = wsSchema.getDefaultLocation();
    List<ShowFilesCommandResult> rows = new ArrayList<>();
    for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) {
        ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(), !fileStatus.isDir(), fileStatus.getLen(), fileStatus.getOwner(), fileStatus.getGroup(), fileStatus.getPermission().toString(), fileStatus.getAccessTime(), fileStatus.getModificationTime());
        rows.add(result);
    }
    return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(), ShowFilesCommandResult.class);
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) WorkspaceSchema(org.apache.drill.exec.store.dfs.WorkspaceSchemaFactory.WorkspaceSchema) SchemaPlus(org.apache.calcite.schema.SchemaPlus) ArrayList(java.util.ArrayList) SqlIdentifier(org.apache.calcite.sql.SqlIdentifier) SqlShowFiles(org.apache.drill.exec.planner.sql.parser.SqlShowFiles) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) AbstractSchema(org.apache.drill.exec.store.AbstractSchema)

Example 28 with SchemaPlus

use of org.apache.calcite.schema.SchemaPlus in project drill by apache.

the class ShowTablesHandler method rewrite.

/** Rewrite the parse tree as SELECT ... FROM INFORMATION_SCHEMA.`TABLES` ... */
@Override
public SqlNode rewrite(SqlNode sqlNode) throws RelConversionException, ForemanSetupException {
    SqlShowTables node = unwrap(sqlNode, SqlShowTables.class);
    List<SqlNode> selectList = Lists.newArrayList();
    SqlNode fromClause;
    SqlNode where;
    // create select columns
    selectList.add(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO));
    selectList.add(new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO));
    fromClause = new SqlIdentifier(ImmutableList.of(IS_SCHEMA_NAME, TAB_TABLES), SqlParserPos.ZERO);
    final SqlIdentifier db = node.getDb();
    String tableSchema;
    if (db != null) {
        tableSchema = db.toString();
    } else {
        // If no schema is given in SHOW TABLES command, list tables from current schema
        SchemaPlus schema = config.getConverter().getDefaultSchema();
        if (SchemaUtilites.isRootSchema(schema)) {
            // If the default schema is a root schema, throw an error to select a default schema
            throw UserException.validationError().message("No default schema selected. Select a schema using 'USE schema' command").build(logger);
        }
        final AbstractSchema drillSchema = SchemaUtilites.unwrapAsDrillSchemaInstance(schema);
        tableSchema = drillSchema.getFullSchemaName();
    }
    final String charset = Util.getDefaultCharset().name();
    where = DrillParserUtil.createCondition(new SqlIdentifier(SHRD_COL_TABLE_SCHEMA, SqlParserPos.ZERO), SqlStdOperatorTable.EQUALS, SqlLiteral.createCharString(tableSchema, charset, SqlParserPos.ZERO));
    SqlNode filter = null;
    final SqlNode likePattern = node.getLikePattern();
    if (likePattern != null) {
        filter = DrillParserUtil.createCondition(new SqlIdentifier(SHRD_COL_TABLE_NAME, SqlParserPos.ZERO), SqlStdOperatorTable.LIKE, likePattern);
    } else if (node.getWhereClause() != null) {
        filter = node.getWhereClause();
    }
    where = DrillParserUtil.createCondition(where, SqlStdOperatorTable.AND, filter);
    return new SqlSelect(SqlParserPos.ZERO, null, new SqlNodeList(selectList, SqlParserPos.ZERO), fromClause, where, null, null, null, null, null, null);
}
Also used : SqlShowTables(org.apache.drill.exec.planner.sql.parser.SqlShowTables) SqlSelect(org.apache.calcite.sql.SqlSelect) AbstractSchema(org.apache.drill.exec.store.AbstractSchema) SchemaPlus(org.apache.calcite.schema.SchemaPlus) SqlNodeList(org.apache.calcite.sql.SqlNodeList) SqlIdentifier(org.apache.calcite.sql.SqlIdentifier) SqlNode(org.apache.calcite.sql.SqlNode)

Example 29 with SchemaPlus

use of org.apache.calcite.schema.SchemaPlus in project drill by apache.

the class HiveSchemaFactory method registerSchemas.

@Override
public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) throws IOException {
    DrillHiveMetaStoreClient mClientForSchemaTree = processUserMetastoreClient;
    if (isDrillImpersonationEnabled) {
        try {
            mClientForSchemaTree = metaStoreClientLoadingCache.get(schemaConfig.getUserName());
        } catch (final ExecutionException e) {
            throw new IOException("Failure setting up Hive metastore client.", e);
        }
    }
    HiveSchema schema = new HiveSchema(schemaConfig, mClientForSchemaTree, schemaName);
    SchemaPlus hPlus = parent.add(schemaName, schema);
    schema.setHolder(hPlus);
}
Also used : DrillHiveMetaStoreClient(org.apache.drill.exec.store.hive.DrillHiveMetaStoreClient) SchemaPlus(org.apache.calcite.schema.SchemaPlus) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 30 with SchemaPlus

use of org.apache.calcite.schema.SchemaPlus in project drill by apache.

the class DescribeSchemaHandler method getPlan.

@Override
public PhysicalPlan getPlan(SqlNode sqlNode) {
    SqlIdentifier schema = ((SqlDescribeSchema) sqlNode).getSchema();
    SchemaPlus drillSchema = SchemaUtilites.findSchema(config.getConverter().getDefaultSchema(), schema.names);
    if (drillSchema != null) {
        StoragePlugin storagePlugin;
        try {
            storagePlugin = context.getStorage().getPlugin(schema.names.get(0));
        } catch (ExecutionSetupException e) {
            throw new DrillRuntimeException("Failure while retrieving storage plugin", e);
        }
        String properties;
        try {
            final Map configMap = mapper.convertValue(storagePlugin.getConfig(), Map.class);
            if (storagePlugin instanceof FileSystemPlugin) {
                transformWorkspaces(schema.names, configMap);
            }
            properties = mapper.writeValueAsString(configMap);
        } catch (JsonProcessingException e) {
            throw new DrillRuntimeException("Error while trying to convert storage config to json string", e);
        }
        return DirectPlan.createDirectPlan(context, new DescribeSchemaResult(Joiner.on(".").join(schema.names), properties));
    }
    throw UserException.validationError().message(String.format("Invalid schema name [%s]", Joiner.on(".").join(schema.names))).build(logger);
}
Also used : ExecutionSetupException(org.apache.drill.common.exceptions.ExecutionSetupException) FileSystemPlugin(org.apache.drill.exec.store.dfs.FileSystemPlugin) SchemaPlus(org.apache.calcite.schema.SchemaPlus) SerializableString(com.fasterxml.jackson.core.SerializableString) SqlIdentifier(org.apache.calcite.sql.SqlIdentifier) DrillRuntimeException(org.apache.drill.common.exceptions.DrillRuntimeException) Map(java.util.Map) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) SqlDescribeSchema(org.apache.drill.exec.planner.sql.parser.SqlDescribeSchema) StoragePlugin(org.apache.drill.exec.store.StoragePlugin)

Aggregations

SchemaPlus (org.apache.calcite.schema.SchemaPlus)33 Table (org.apache.calcite.schema.Table)8 JavaTypeFactory (org.apache.calcite.adapter.java.JavaTypeFactory)6 JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)6 DruidOperatorTable (io.druid.sql.calcite.planner.DruidOperatorTable)5 PlannerConfig (io.druid.sql.calcite.planner.PlannerConfig)5 PlannerFactory (io.druid.sql.calcite.planner.PlannerFactory)5 StreamableTable (org.apache.calcite.schema.StreamableTable)5 SqlIdentifier (org.apache.calcite.sql.SqlIdentifier)4 SqlNode (org.apache.calcite.sql.SqlNode)4 AbstractSchema (org.apache.drill.exec.store.AbstractSchema)4 QueryPlanner (org.apache.storm.sql.planner.trident.QueryPlanner)4 TridentRel (org.apache.storm.sql.planner.trident.rel.TridentRel)4 Before (org.junit.Before)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 CompilerUtil (org.apache.storm.sql.compiler.CompilerUtil)3 CalciteCatalogReader (org.apache.calcite.prepare.CalciteCatalogReader)2 UserSession (org.apache.drill.exec.rpc.user.UserSession)2 DrillFileSystem (org.apache.drill.exec.store.dfs.DrillFileSystem)2