use of org.apache.hadoop.hive.metastore.IMetaStoreClient in project hive by apache.
the class GetColumnsOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
String tablePattern = convertIdentifierPattern(tableName, true);
Pattern columnPattern = null;
if (columnName != null) {
columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false));
}
List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
Collections.sort(dbNames);
Map<String, List<String>> db2Tabs = new HashMap<>();
for (String dbName : dbNames) {
List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
Collections.sort(tableNames);
db2Tabs.put(dbName, tableNames);
}
if (isAuthV2Enabled()) {
List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName + ", tablePattern : " + tableName;
authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
}
int maxBatchSize = SessionState.get().getConf().getIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
String dbName = dbTabs.getKey();
List<String> tableNames = dbTabs.getValue();
for (Table table : new TableIterable(metastoreClient, dbName, tableNames, maxBatchSize)) {
TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
List<SQLPrimaryKey> primaryKeys = metastoreClient.getPrimaryKeys(new PrimaryKeysRequest(dbName, table.getTableName()));
Set<String> pkColNames = new HashSet<>();
for (SQLPrimaryKey key : primaryKeys) {
pkColNames.add(key.getColumn_name().toLowerCase());
}
for (ColumnDescriptor column : schema.getColumnDescriptors()) {
if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) {
continue;
}
Object[] rowData = new Object[] { // TABLE_CAT
null, // TABLE_SCHEM
table.getDbName(), // TABLE_NAME
table.getTableName(), // COLUMN_NAME
column.getName(), // DATA_TYPE
column.getType().toJavaSQLType(), // TYPE_NAME
column.getTypeName(), // COLUMN_SIZE
column.getTypeDescriptor().getColumnSize(), // BUFFER_LENGTH, unused
null, // DECIMAL_DIGITS
column.getTypeDescriptor().getDecimalDigits(), // NUM_PREC_RADIX
column.getType().getNumPrecRadix(), pkColNames.contains(column.getName().toLowerCase()) ? DatabaseMetaData.columnNoNulls : // NULLABLE
DatabaseMetaData.columnNullable, // REMARKS
column.getComment(), // COLUMN_DEF
null, // SQL_DATA_TYPE
null, // SQL_DATETIME_SUB
null, // CHAR_OCTET_LENGTH
null, // ORDINAL_POSITION
column.getOrdinalPosition(), // IS_NULLABLE
pkColNames.contains(column.getName().toLowerCase()) ? "NO" : "YES", // SCOPE_CATALOG
null, // SCOPE_SCHEMA
null, // SCOPE_TABLE
null, // SOURCE_DATA_TYPE
null, // IS_AUTO_INCREMENT
"NO" };
rowSet.addRow(rowData);
}
}
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.IMetaStoreClient in project hive by apache.
the class GetCrossReferenceOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
ForeignKeysRequest fkReq = new ForeignKeysRequest(parentSchemaName, parentTableName, foreignSchemaName, foreignTableName);
List<SQLForeignKey> fks = metastoreClient.getForeignKeys(fkReq);
if (fks == null) {
return;
}
for (SQLForeignKey fk : fks) {
rowSet.addRow(new Object[] { parentCatalogName, fk.getPktable_db(), fk.getPktable_name(), fk.getPkcolumn_name(), foreignCatalogName, fk.getFktable_db(), fk.getFktable_name(), fk.getFkcolumn_name(), fk.getKey_seq(), fk.getUpdate_rule(), fk.getDelete_rule(), fk.getFk_name(), fk.getPk_name(), 0 });
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.IMetaStoreClient in project hive by apache.
the class GetFunctionsOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
if (isAuthV2Enabled()) {
// get databases for schema pattern
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
List<String> matchingDbs;
try {
matchingDbs = metastoreClient.getDatabases(schemaPattern);
} catch (TException e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
// authorize this call on the schema objects
List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
authorizeMetaGets(HiveOperationType.GET_FUNCTIONS, privObjs, cmdStr);
}
try {
if ((null == catalogName || "".equals(catalogName)) && (null == schemaName || "".equals(schemaName))) {
Set<String> functionNames = FunctionRegistry.getFunctionNames(CLIServiceUtils.patternToRegex(functionName));
for (String functionName : functionNames) {
FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(functionName);
Object[] rowData = new Object[] { // FUNCTION_CAT
null, // FUNCTION_SCHEM
null, // FUNCTION_NAME
functionInfo.getDisplayName(), // REMARKS
"", (functionInfo.isGenericUDTF() ? DatabaseMetaData.functionReturnsTable : // FUNCTION_TYPE
DatabaseMetaData.functionNoTable), functionInfo.getClass().getCanonicalName() };
rowSet.addRow(rowData);
}
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.IMetaStoreClient in project hive by apache.
the class GetTablesOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
List<String> matchingDbs = metastoreClient.getDatabases(schemaPattern);
if (isAuthV2Enabled()) {
List<HivePrivilegeObject> privObjs = HivePrivilegeObjectUtils.getHivePrivDbObjects(matchingDbs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName;
authorizeMetaGets(HiveOperationType.GET_TABLES, privObjs, cmdStr);
}
String tablePattern = convertIdentifierPattern(tableName, true);
for (TableMeta tableMeta : metastoreClient.getTableMeta(schemaPattern, tablePattern, tableTypeList)) {
rowSet.addRow(new Object[] { DEFAULT_HIVE_CATALOG, tableMeta.getDbName(), tableMeta.getTableName(), tableTypeMapping.mapToClientType(tableMeta.getTableType()), tableMeta.getComments(), null, null, null, null, null });
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hadoop.hive.metastore.IMetaStoreClient in project storm by apache.
the class HiveSetupUtil method createDbAndTable.
public static void createDbAndTable(HiveConf conf, String databaseName, String tableName, List<String> partVals, String[] colNames, String[] colTypes, String[] partNames, String dbLocation) throws Exception {
IMetaStoreClient client = new HiveMetaStoreClient(conf);
try {
Database db = new Database();
db.setName(databaseName);
db.setLocationUri(dbLocation);
client.createDatabase(db);
Table tbl = new Table();
tbl.setDbName(databaseName);
tbl.setTableName(tableName);
tbl.setTableType(TableType.MANAGED_TABLE.toString());
StorageDescriptor sd = new StorageDescriptor();
sd.setCols(getTableColumns(colNames, colTypes));
sd.setNumBuckets(1);
sd.setLocation(dbLocation + Path.SEPARATOR + tableName);
if (partNames != null && partNames.length != 0) {
tbl.setPartitionKeys(getPartitionKeys(partNames));
}
tbl.setSd(sd);
sd.setBucketCols(new ArrayList<String>(2));
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(tbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(OrcSerde.class.getName());
sd.setInputFormat(OrcInputFormat.class.getName());
sd.setOutputFormat(OrcOutputFormat.class.getName());
Map<String, String> tableParams = new HashMap<String, String>();
tbl.setParameters(tableParams);
client.createTable(tbl);
try {
if (partVals != null && partVals.size() > 0) {
addPartition(client, tbl, partVals);
}
} catch (AlreadyExistsException e) {
}
} finally {
client.close();
}
}
Aggregations