use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.
the class GetTableTypesOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
LOG.info("Fetching table type metadata");
if (isAuthV2Enabled()) {
authorizeMetaGets(HiveOperationType.GET_TABLETYPES, null);
}
try {
for (TableType type : TableType.values()) {
String tableType = tableTypeMapping.mapToClientType(type.toString());
rowSet.addRow(new String[] { tableType });
if (LOG.isDebugEnabled()) {
String debugMessage = getDebugMessage("table type", RESULT_SET_SCHEMA);
LOG.debug(debugMessage, tableType);
}
}
if (LOG.isDebugEnabled() && rowSet.numRows() == 0) {
LOG.debug("No table type metadata has been returned.");
}
setState(OperationState.FINISHED);
LOG.info("Fetching table type metadata has been successfully finished");
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.TableType in project presto by prestodb.
the class FileHiveMetastore method addPartitions.
@Override
public synchronized void addPartitions(String databaseName, String tableName, List<Partition> partitions) {
requireNonNull(databaseName, "databaseName is null");
requireNonNull(tableName, "tableName is null");
requireNonNull(partitions, "partitions is null");
Table table = getRequiredTable(databaseName, tableName);
TableType tableType = TableType.valueOf(table.getTableType());
checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE).contains(tableType), "Invalid table type: %s", tableType);
try {
Map<Path, byte[]> schemaFiles = new LinkedHashMap<>();
for (Partition partition : partitions) {
verifiedPartition(table, partition);
Path partitionMetadataDirectory = getPartitionMetadataDirectory(table, partition.getValues());
Path schemaPath = new Path(partitionMetadataDirectory, PRESTO_SCHEMA_FILE_NAME);
if (metadataFileSystem.exists(schemaPath)) {
throw new PrestoException(HIVE_METASTORE_ERROR, "Partition already exists");
}
byte[] schemaJson = partitionCodec.toJsonBytes(new PartitionMetadata(table, partition));
schemaFiles.put(schemaPath, schemaJson);
}
Set<Path> createdFiles = new LinkedHashSet<>();
try {
for (Entry<Path, byte[]> entry : schemaFiles.entrySet()) {
try (OutputStream outputStream = metadataFileSystem.create(entry.getKey())) {
createdFiles.add(entry.getKey());
outputStream.write(entry.getValue());
} catch (IOException e) {
throw new PrestoException(HIVE_METASTORE_ERROR, "Could not write partition schema", e);
}
}
} catch (Throwable e) {
for (Path createdFile : createdFiles) {
try {
metadataFileSystem.delete(createdFile, false);
} catch (IOException ignored) {
}
}
throw e;
}
} catch (IOException e) {
throw new PrestoException(HIVE_METASTORE_ERROR, e);
}
}
use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.
the class DDLTask method showTablesOrViews.
/**
* Write a list of the tables/views in the database to a file.
*
* @param db
* The database in context.
* @param showDesc
* A ShowTablesDesc for tables or views we're interested in.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int showTablesOrViews(Hive db, ShowTablesDesc showDesc) throws HiveException {
// get the tables/views for the desired pattern - populate the output stream
List<String> tablesOrViews = null;
String dbName = showDesc.getDbName();
// if null, all tables/views are returned
String pattern = showDesc.getPattern();
String resultsFile = showDesc.getResFile();
// null for tables, VIRTUAL_VIEW for views, MATERIALIZED_VIEW for MVs
TableType type = showDesc.getType();
if (!db.databaseExists(dbName)) {
throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, dbName);
}
LOG.debug("pattern: {}", pattern);
tablesOrViews = db.getTablesByType(dbName, pattern, type);
LOG.debug("results : {}", tablesOrViews.size());
// write the results in the file
DataOutputStream outStream = null;
try {
Path resFile = new Path(resultsFile);
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
SortedSet<String> sortedSet = new TreeSet<String>(tablesOrViews);
formatter.showTables(outStream, sortedSet);
outStream.close();
outStream = null;
} catch (Exception e) {
throw new HiveException(e, ErrorMsg.GENERIC_ERROR, "in database" + dbName);
} finally {
IOUtils.closeStream(outStream);
}
return 0;
}
use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.
the class ShowTablesAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
if (root.getChildCount() > 4) {
throw new SemanticException(ErrorMsg.INVALID_AST_TREE.getMsg(root.toStringTree()));
}
ctx.setResFile(ctx.getLocalTmpPath());
String dbName = SessionState.get().getCurrentDatabase();
String tableNames = null;
TableType tableTypeFilter = null;
boolean isExtended = false;
for (int i = 0; i < root.getChildCount(); i++) {
ASTNode child = (ASTNode) root.getChild(i);
if (child.getType() == HiveParser.TOK_FROM) {
// Specifies a DB
dbName = unescapeIdentifier(root.getChild(++i).getText());
db.validateDatabaseExists(dbName);
} else if (child.getType() == HiveParser.TOK_TABLE_TYPE) {
// Filter on table type
String tableType = unescapeIdentifier(child.getChild(0).getText());
if (!"table_type".equalsIgnoreCase(tableType)) {
throw new SemanticException("SHOW TABLES statement only allows equality filter on table_type value");
}
tableTypeFilter = TableType.valueOf(unescapeSQLString(child.getChild(1).getText()));
} else if (child.getType() == HiveParser.KW_EXTENDED) {
// Include table type
isExtended = true;
} else {
// Uses a pattern
tableNames = unescapeSQLString(child.getText());
}
}
inputs.add(new ReadEntity(getDatabase(dbName)));
ShowTablesDesc desc = new ShowTablesDesc(ctx.getResFile(), dbName, tableNames, tableTypeFilter, isExtended);
Task<DDLWork> task = TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc));
rootTasks.add(task);
task.setFetchSource(true);
setFetchTask(createFetchTask(desc.getSchema()));
}
use of org.apache.hadoop.hive.metastore.TableType in project hive by apache.
the class HiveStrictManagedMigration method processTable.
boolean processTable(Database dbObj, String tableName, boolean modifyLocation) {
try {
String dbName = dbObj.getName();
LOG.debug("Processing table {}", getQualifiedName(dbName, tableName));
Table tableObj = hms.get().getTable(dbName, tableName);
TableType tableType = TableType.valueOf(tableObj.getTableType());
TableMigrationOption migrationOption = runOptions.migrationOption;
if (migrationOption == TableMigrationOption.AUTOMATIC) {
migrationOption = determineMigrationTypeAutomatically(tableObj, tableType, ownerName, conf, hms.get(), null);
}
boolean failedValidationCheck = migrateTable(tableObj, tableType, migrationOption, runOptions.dryRun, hiveUpdater.get(), hms.get(), conf);
if (failedValidationCheck) {
this.failedValidationChecks.set(true);
return true;
}
String tablePathString = tableObj.getSd().getLocation();
if (StringUtils.isEmpty(tablePathString)) {
// encounter sysdb / information_schema databases. These should not be moved, they have null location.
return true;
}
Path tablePath = new Path(tablePathString);
boolean shouldMoveTable = modifyLocation && ((MANAGED_TABLE.name().equals(tableObj.getTableType()) && runOptions.shouldModifyManagedTableLocation) || (EXTERNAL_TABLE.name().equals(tableObj.getTableType()) && runOptions.shouldMoveExternal));
if (shouldMoveTable && shouldModifyTableLocation(dbObj, tableObj)) {
Path newTablePath = wh.get().getDnsPath(new Path(getDefaultDbPathManagedOrExternal(dbName), MetaStoreUtils.encodeTableName(tableName.toLowerCase())));
moveTableData(dbObj, tableObj, newTablePath);
if (!runOptions.dryRun) {
// File ownership/permission checks should be done on the new table path.
tablePath = newTablePath;
}
}
if (MANAGED_TABLE.equals(tableType)) {
if (runOptions.shouldModifyManagedTableOwner || runOptions.shouldModifyManagedTablePermissions) {
FileSystem fs = tablePath.getFileSystem(conf);
if (isHdfs(fs)) {
// TODO: what about partitions not in the default location?
checkAndSetFileOwnerPermissions(fs, tablePath, ownerName, groupName, dirPerms, filePerms, runOptions.dryRun, true);
}
}
}
} catch (Exception ex) {
LOG.error("Error processing table " + getQualifiedName(dbObj.getName(), tableName), ex);
return false;
}
return true;
}
Aggregations