Search in sources :

Example 6 with TableType

use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.

the class GetTableTypesOperation method runInternal.

@Override
public void runInternal() throws HiveSQLException {
    setState(OperationState.RUNNING);
    LOG.info("Fetching table type metadata");
    if (isAuthV2Enabled()) {
        authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null);
    }
    try {
        for (TableType type : TableType.values()) {
            String tableType = tableTypeMapping.mapToClientType(type.toString());
            rowSet.addRow(new String[] { tableType });
            if (LOG.isDebugEnabled()) {
                String debugMessage = getDebugMessage("table type", RESULT_SET_SCHEMA);
                LOG.debug(debugMessage, tableType);
            }
        }
        if (LOG.isDebugEnabled() && rowSet.numRows() == 0) {
            LOG.debug("No table type metadata has been returned.");
        }
        setState(OperationState.FINISHED);
        LOG.info("Fetching table type metadata has been successfully finished");
    } catch (Exception e) {
        setState(OperationState.ERROR);
        throw new HiveSQLException(e);
    }
}
Also used : TableType(org.apache.hadoop.hive.metastore.TableType) HiveSQLException(org.apache.hive.service.cli.HiveSQLException) HiveSQLException(org.apache.hive.service.cli.HiveSQLException)

Example 7 with TableType

use of org.apache.hadoop.hive.metastore.TableType in project presto by prestodb.

the class FileHiveMetastore method addPartitions.

@Override
public synchronized void addPartitions(String databaseName, String tableName, List<Partition> partitions) {
    requireNonNull(databaseName, "databaseName is null");
    requireNonNull(tableName, "tableName is null");
    requireNonNull(partitions, "partitions is null");
    Table table = getRequiredTable(databaseName, tableName);
    TableType tableType = TableType.valueOf(table.getTableType());
    checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE).contains(tableType), "Invalid table type: %s", tableType);
    try {
        Map<Path, byte[]> schemaFiles = new LinkedHashMap<>();
        for (Partition partition : partitions) {
            verifiedPartition(table, partition);
            Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());
            Path schemaPath = new Path(partitionMetadataDirectory, PRESTO_SCHEMA_FILE_NAME);
            if (metadataFileSystem.exists(schemaPath)) {
                throw new PrestoException(HIVE_METASTORE_ERROR, "Partition already exists");
            }
            byte[] schemaJson = partitionCodec.toJsonBytes(new PartitionMetadata(table, partition));
            schemaFiles.put(schemaPath, schemaJson);
        }
        Set<Path> createdFiles = new LinkedHashSet<>();
        try {
            for (Entry<Path, byte[]> entry : schemaFiles.entrySet()) {
                try (OutputStream outputStream = metadataFileSystem.create(entry.getKey())) {
                    createdFiles.add(entry.getKey());
                    outputStream.write(entry.getValue());
                } catch (IOException e) {
                    throw new PrestoException(HIVE_METASTORE_ERROR, "Could not write partition schema", e);
                }
            }
        } catch (Throwable e) {
            for (Path createdFile : createdFiles) {
                try {
                    metadataFileSystem.delete(createdFile, false);
                } catch (IOException ignored) {
                }
            }
            throw e;
        }
    } catch (IOException e) {
        throw new PrestoException(HIVE_METASTORE_ERROR, e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LinkedHashSet(java.util.LinkedHashSet) Partition(com.facebook.presto.hive.metastore.Partition) Table(com.facebook.presto.hive.metastore.Table) TableType(org.apache.hadoop.hive.metastore.TableType) OutputStream(java.io.OutputStream) PrestoException(com.facebook.presto.spi.PrestoException) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap)

Example 8 with TableType

use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.

the class DDLTask method showTablesOrViews.

/**
 * Write a list of the tables/views in the database to a file.
 *
 * @param db
 *          The database in context.
 * @param showDesc
 *        A ShowTablesDesc for tables or views we're interested in.
 * @return Returns 0 when execution succeeds and above 0 if it fails.
 * @throws HiveException
 *           Throws this exception if an unexpected error occurs.
 */
private int showTablesOrViews(Hive db, ShowTablesDesc showDesc) throws HiveException {
    // get the tables/views for the desired pattern - populate the output stream
    List<String> tablesOrViews = null;
    String dbName = showDesc.getDbName();
    // if null, all tables/views are returned
    String pattern = showDesc.getPattern();
    String resultsFile = showDesc.getResFile();
    // null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs
    TableType type = showDesc.getType();
    if (!db.databaseExists(dbName)) {
        throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName);
    }
    LOG.debug("pattern: {}", pattern);
    tablesOrViews = db.getTablesByType(dbName, pattern, type);
    LOG.debug("results : {}", tablesOrViews.size());
    // write the results in the file
    DataOutputStream outStream = null;
    try {
        Path resFile = new Path(resultsFile);
        FileSystem fs = resFile.getFileSystem(conf);
        outStream = fs.create(resFile);
        SortedSet<String> sortedSet = new TreeSet<String>(tablesOrViews);
        formatter.showTables(outStream, sortedSet);
        outStream.close();
        outStream = null;
    } catch (Exception e) {
        throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName);
    } finally {
        IOUtils.closeStream(outStream);
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) TableType(org.apache.hadoop.hive.metastore.TableType) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) TreeSet(java.util.TreeSet) FileSystem(org.apache.hadoop.fs.FileSystem) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) InvalidOperationException(org.apache.hadoop.hive.metastore.api.InvalidOperationException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) URISyntaxException(java.net.URISyntaxException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) SQLException(java.sql.SQLException) FileNotFoundException(java.io.FileNotFoundException) HiveAuthzPluginException(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException) InvalidTableException(org.apache.hadoop.hive.ql.metadata.InvalidTableException)

Example 9 with TableType

use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.

the class ShowTablesAnalyzer method analyzeInternal.

@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
    if (root.getChildCount() > 4) {
        throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(root.toStringTree()));
    }
    ctx.setResFile(ctx.getLocalTmpPath());
    String dbName = SessionState.get().getCurrentDatabase();
    String tableNames = null;
    TableType tableTypeFilter = null;
    boolean isExtended = false;
    for (int i = 0; i < root.getChildCount(); i++) {
        ASTNode child = (ASTNode) root.getChild(i);
        if (child.getType() == HiveParser.TOK_FROM) {
            // Specifies a DB
            dbName = unescapeIdentifier(root.getChild(++i).getText());
            db.validateDatabaseExists(dbName);
        } else if (child.getType() == HiveParser.TOK_TABLE_TYPE) {
            // Filter on table type
            String tableType = unescapeIdentifier(child.getChild(0).getText());
            if (!"table_type".equalsIgnoreCase(tableType)) {
                throw new SemanticException("SHOW TABLES statement only allows equality filter on table_type value");
            }
            tableTypeFilter = TableType.valueOf(unescapeSQLString(child.getChild(1).getText()));
        } else if (child.getType() == HiveParser.KW_EXTENDED) {
            // Include table type
            isExtended = true;
        } else {
            // Uses a pattern
            tableNames = unescapeSQLString(child.getText());
        }
    }
    inputs.add(new ReadEntity(getDatabase(dbName)));
    ShowTablesDesc desc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended);
    Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
    rootTasks.add(task);
    task.setFetchSource(true);
    setFetchTask(createFetchTask(desc.getSchema()));
}
Also used : ReadEntity(org.apache.hadoop.hive.ql.hooks.ReadEntity) TableType(org.apache.hadoop.hive.metastore.TableType) DDLWork(org.apache.hadoop.hive.ql.ddl.DDLWork) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 10 with TableType

use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.

the class HiveStrictManagedMigration method processTable.

boolean processTable(Database dbObj, String tableName, boolean modifyLocation) {
    try {
        String dbName = dbObj.getName();
        LOG.debug("Processing table {}", getQualifiedName(dbName, tableName));
        Table tableObj = hms.get().getTable(dbName, tableName);
        TableType tableType = TableType.valueOf(tableObj.getTableType());
        TableMigrationOption migrationOption = runOptions.migrationOption;
        if (migrationOption == TableMigrationOption.AUTOMATIC) {
            migrationOption = determineMigrationTypeAutomatically(tableObj, tableType, ownerName, conf, hms.get(), null);
        }
        boolean failedValidationCheck = migrateTable(tableObj, tableType, migrationOption, runOptions.dryRun, hiveUpdater.get(), hms.get(), conf);
        if (failedValidationCheck) {
            this.failedValidationChecks.set(true);
            return true;
        }
        String tablePathString = tableObj.getSd().getLocation();
        if (StringUtils.isEmpty(tablePathString)) {
            // encounter sysdb / information_schema databases. These should not be moved, they have null location.
            return true;
        }
        Path tablePath = new Path(tablePathString);
        boolean shouldMoveTable = modifyLocation && ((MANAGED_TABLE.name().equals(tableObj.getTableType()) && runOptions.shouldModifyManagedTableLocation) || (EXTERNAL_TABLE.name().equals(tableObj.getTableType()) && runOptions.shouldMoveExternal));
        if (shouldMoveTable && shouldModifyTableLocation(dbObj, tableObj)) {
            Path newTablePath = wh.get().getDnsPath(new Path(getDefaultDbPathManagedOrExternal(dbName), MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
            moveTableData(dbObj, tableObj, newTablePath);
            if (!runOptions.dryRun) {
                // File ownership/permission checks should be done on the new table path.
                tablePath = newTablePath;
            }
        }
        if (MANAGED_TABLE.equals(tableType)) {
            if (runOptions.shouldModifyManagedTableOwner || runOptions.shouldModifyManagedTablePermissions) {
                FileSystem fs = tablePath.getFileSystem(conf);
                if (isHdfs(fs)) {
                    // TODO: what about partitions not in the default location?
                    checkAndSetFileOwnerPermissions(fs, tablePath, ownerName, groupName, dirPerms, filePerms, runOptions.dryRun, true);
                }
            }
        }
    } catch (Exception ex) {
        LOG.error("Error processing table " + getQualifiedName(dbObj.getName(), tableName), ex);
        return false;
    }
    return true;
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.metastore.api.Table) TableType(org.apache.hadoop.hive.metastore.TableType) FileSystem(org.apache.hadoop.fs.FileSystem) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) TException(org.apache.thrift.TException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

TableType (org.apache.hadoop.hive.metastore.TableType)10 Path (org.apache.hadoop.fs.Path)5 TableAlreadyExistsException (com.facebook.presto.hive.TableAlreadyExistsException)3 SchemaTableName (com.facebook.presto.spi.SchemaTableName)3 File (java.io.File)3 IOException (java.io.IOException)3 Table (org.apache.hadoop.hive.metastore.api.Table)3 PrestoException (com.facebook.presto.spi.PrestoException)2 ExecutionException (java.util.concurrent.ExecutionException)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)2 PrincipalPrivilegeSet (org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet)2 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)2 Partition (com.facebook.presto.hive.metastore.Partition)1 Table (com.facebook.presto.hive.metastore.Table)1 ImmutableList (com.google.common.collect.ImmutableList)1 DataOutputStream (java.io.DataOutputStream)1 FileNotFoundException (java.io.FileNotFoundException)1 OutputStream (java.io.OutputStream)1