use of org.apache.hive.service.cli.TableSchema in project hive by apache.
the class OperationManager method getOperationLogRowSet.
public RowSet getOperationLogRowSet(OperationHandle opHandle, FetchOrientation orientation, long maxRows, HiveConf hConf) throws HiveSQLException {
TableSchema tableSchema = new TableSchema(getLogSchema());
RowSet rowSet = RowSetFactory.create(tableSchema, getOperation(opHandle).getProtocolVersion(), false);
if (hConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED) == false) {
LOG.warn("Try to get operation log when hive.server2.logging.operation.enabled is false, no log will be returned. ");
return rowSet;
}
// get the OperationLog object from the operation
OperationLog operationLog = getOperation(opHandle).getOperationLog();
if (operationLog == null) {
throw new HiveSQLException("Couldn't find log associated with operation handle: " + opHandle);
}
// read logs
List<String> logs;
try {
logs = operationLog.readOperationLog(isFetchFirst(orientation), maxRows);
} catch (SQLException e) {
throw new HiveSQLException(e.getMessage(), e.getCause());
}
// convert logs to RowSet
for (String log : logs) {
rowSet.addRow(new String[] { log });
}
return rowSet;
}
use of org.apache.hive.service.cli.TableSchema in project hive by apache.
the class GetColumnsOperation method runInternal.
@Override
public void runInternal() throws HiveSQLException {
setState(OperationState.RUNNING);
try {
IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient();
String schemaPattern = convertSchemaPattern(schemaName);
String tablePattern = convertIdentifierPattern(tableName, true);
Pattern columnPattern = null;
if (columnName != null) {
columnPattern = Pattern.compile(convertIdentifierPattern(columnName, false));
}
List<String> dbNames = metastoreClient.getDatabases(schemaPattern);
Collections.sort(dbNames);
Map<String, List<String>> db2Tabs = new HashMap<>();
for (String dbName : dbNames) {
List<String> tableNames = metastoreClient.getTables(dbName, tablePattern);
Collections.sort(tableNames);
db2Tabs.put(dbName, tableNames);
}
if (isAuthV2Enabled()) {
List<HivePrivilegeObject> privObjs = getPrivObjs(db2Tabs);
String cmdStr = "catalog : " + catalogName + ", schemaPattern : " + schemaName + ", tablePattern : " + tableName;
authorizeMetaGets(HiveOperationType.GET_COLUMNS, privObjs, cmdStr);
}
int maxBatchSize = SessionState.get().getConf().getIntVar(ConfVars.METASTORE_BATCH_RETRIEVE_MAX);
for (Entry<String, List<String>> dbTabs : db2Tabs.entrySet()) {
String dbName = dbTabs.getKey();
List<String> tableNames = dbTabs.getValue();
for (Table table : new TableIterable(metastoreClient, dbName, tableNames, maxBatchSize)) {
TableSchema schema = new TableSchema(metastoreClient.getSchema(dbName, table.getTableName()));
List<SQLPrimaryKey> primaryKeys = metastoreClient.getPrimaryKeys(new PrimaryKeysRequest(dbName, table.getTableName()));
Set<String> pkColNames = new HashSet<>();
for (SQLPrimaryKey key : primaryKeys) {
pkColNames.add(key.getColumn_name().toLowerCase());
}
for (ColumnDescriptor column : schema.getColumnDescriptors()) {
if (columnPattern != null && !columnPattern.matcher(column.getName()).matches()) {
continue;
}
Object[] rowData = new Object[] { // TABLE_CAT
null, // TABLE_SCHEM
table.getDbName(), // TABLE_NAME
table.getTableName(), // COLUMN_NAME
column.getName(), // DATA_TYPE
column.getType().toJavaSQLType(), // TYPE_NAME
column.getTypeName(), // COLUMN_SIZE
column.getTypeDescriptor().getColumnSize(), // BUFFER_LENGTH, unused
null, // DECIMAL_DIGITS
column.getTypeDescriptor().getDecimalDigits(), // NUM_PREC_RADIX
column.getType().getNumPrecRadix(), pkColNames.contains(column.getName().toLowerCase()) ? DatabaseMetaData.columnNoNulls : // NULLABLE
DatabaseMetaData.columnNullable, // REMARKS
column.getComment(), // COLUMN_DEF
null, // SQL_DATA_TYPE
null, // SQL_DATETIME_SUB
null, // CHAR_OCTET_LENGTH
null, // ORDINAL_POSITION
column.getOrdinalPosition(), // IS_NULLABLE
pkColNames.contains(column.getName().toLowerCase()) ? "NO" : "YES", // SCOPE_CATALOG
null, // SCOPE_SCHEMA
null, // SCOPE_TABLE
null, // SOURCE_DATA_TYPE
null, // IS_AUTO_INCREMENT
"NO" };
rowSet.addRow(rowData);
}
}
}
setState(OperationState.FINISHED);
} catch (Exception e) {
setState(OperationState.ERROR);
throw new HiveSQLException(e);
}
}
use of org.apache.hive.service.cli.TableSchema in project hive by apache.
the class HiveQueryResultSet method retrieveSchema.
/**
* Retrieve schema from the server
*/
private void retrieveSchema() throws SQLException {
try {
TGetResultSetMetadataReq metadataReq = new TGetResultSetMetadataReq(stmtHandle);
// TODO need session handle
TGetResultSetMetadataResp metadataResp;
metadataResp = client.GetResultSetMetadata(metadataReq);
Utils.verifySuccess(metadataResp.getStatus());
StringBuilder namesSb = new StringBuilder();
StringBuilder typesSb = new StringBuilder();
TTableSchema schema = metadataResp.getSchema();
if (schema == null || !schema.isSetColumns()) {
// TODO: should probably throw an exception here.
return;
}
setSchema(new TableSchema(schema));
List<TColumnDesc> columns = schema.getColumns();
for (int pos = 0; pos < schema.getColumnsSize(); pos++) {
if (pos != 0) {
namesSb.append(",");
typesSb.append(",");
}
String columnName = columns.get(pos).getColumnName();
columnNames.add(columnName);
normalizedColumnNames.add(columnName.toLowerCase());
TPrimitiveTypeEntry primitiveTypeEntry = columns.get(pos).getTypeDesc().getTypes().get(0).getPrimitiveEntry();
String columnTypeName = TYPE_NAMES.get(primitiveTypeEntry.getType());
columnTypes.add(columnTypeName);
columnAttributes.add(getColumnAttributes(primitiveTypeEntry));
}
} catch (SQLException eS) {
// rethrow the SQLException as is
throw eS;
} catch (Exception ex) {
ex.printStackTrace();
throw new SQLException("Could not create ResultSet: " + ex.getMessage(), ex);
}
}
use of org.apache.hive.service.cli.TableSchema in project hive by apache.
the class SQLOperation method prepare.
/**
* Compile the query and extract metadata
*
* @throws HiveSQLException
*/
public void prepare(QueryState queryState) throws HiveSQLException {
setState(OperationState.RUNNING);
try {
driver = DriverFactory.newDriver(queryState, getParentSession().getUserName(), queryInfo);
// queryTimeout == 0 means no timeout
if (queryTimeout > 0) {
timeoutExecutor = new ScheduledThreadPoolExecutor(1);
Runnable timeoutTask = new Runnable() {
@Override
public void run() {
try {
String queryId = queryState.getQueryId();
LOG.info("Query timed out after: " + queryTimeout + " seconds. Cancelling the execution now: " + queryId);
SQLOperation.this.cancel(OperationState.TIMEDOUT);
} catch (HiveSQLException e) {
LOG.error("Error cancelling the query after timeout: " + queryTimeout + " seconds", e);
} finally {
// Stop
timeoutExecutor.shutdown();
}
}
};
timeoutExecutor.schedule(timeoutTask, queryTimeout, TimeUnit.SECONDS);
}
queryInfo.setQueryDisplay(driver.getQueryDisplay());
// set the operation handle information in Driver, so that thrift API users
// can use the operation handle they receive, to lookup query information in
// Yarn ATS
String guid64 = Base64.encodeBase64URLSafeString(getHandle().getHandleIdentifier().toTHandleIdentifier().getGuid()).trim();
driver.setOperationId(guid64);
// In Hive server mode, we are not able to retry in the FetchTask
// case, when calling fetch queries since execute() has returned.
// For now, we disable the test attempts.
response = driver.compileAndRespond(statement);
if (0 != response.getResponseCode()) {
throw toSQLException("Error while compiling statement", response);
}
mResultSchema = driver.getSchema();
// "explain" is an exception for now
if (driver.getPlan().getFetchTask() != null) {
// Schema has to be set
if (mResultSchema == null || !mResultSchema.isSetFieldSchemas()) {
throw new HiveSQLException("Error compiling query: Schema and FieldSchema " + "should be set when query plan has a FetchTask");
}
resultSchema = new TableSchema(mResultSchema);
setHasResultSet(true);
} else {
setHasResultSet(false);
}
// TODO explain should use a FetchTask for reading
for (Task<? extends Serializable> task : driver.getPlan().getRootTasks()) {
if (task.getClass() == ExplainTask.class) {
resultSchema = new TableSchema(mResultSchema);
setHasResultSet(true);
break;
}
}
} catch (HiveSQLException e) {
setState(OperationState.ERROR);
throw e;
} catch (Throwable e) {
setState(OperationState.ERROR);
throw new HiveSQLException("Error running query: " + e.toString(), e);
}
}
use of org.apache.hive.service.cli.TableSchema in project hive by apache.
the class ThriftCLIService method GetResultSetMetadata.
@Override
public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) throws TException {
TGetResultSetMetadataResp resp = new TGetResultSetMetadataResp();
try {
TableSchema schema = cliService.getResultSetMetadata(new OperationHandle(req.getOperationHandle()));
resp.setSchema(schema.toTTableSchema());
resp.setStatus(OK_STATUS);
} catch (Exception e) {
LOG.warn("Error getting result set metadata: ", e);
resp.setStatus(HiveSQLException.toTStatus(e));
}
return resp;
}
Aggregations