use of org.apache.hadoop.hive.metastore.api.Schema in project hive by apache.
the class TestCliDriverMethods method testThatCliDriverPrintsHeaderForCommandsWithSchema.
// If the command has an associated schema, make sure it gets printed to use
public void testThatCliDriverPrintsHeaderForCommandsWithSchema() throws CommandNeedRetryException {
Schema mockSchema = mock(Schema.class);
List<FieldSchema> fieldSchemas = new ArrayList<FieldSchema>();
String fieldName = "FlightOfTheConchords";
fieldSchemas.add(new FieldSchema(fieldName, "type", "comment"));
when(mockSchema.getFieldSchemas()).thenReturn(fieldSchemas);
PrintStream mockOut = headerPrintingTestDriver(mockSchema);
// Should have printed out the header for the field schema
verify(mockOut, times(1)).print(fieldName);
}
use of org.apache.hadoop.hive.metastore.api.Schema in project hive by apache.
the class Driver method getSchema.
/**
* Get a Schema with fields represented with native Hive types
*/
private static Schema getSchema(BaseSemanticAnalyzer sem, HiveConf conf) {
Schema schema = null;
// give up.
if (sem == null) {
// can't get any info without a plan
} else if (sem.getResultSchema() != null) {
List<FieldSchema> lst = sem.getResultSchema();
schema = new Schema(lst, null);
} else if (sem.getFetchTask() != null) {
FetchTask ft = sem.getFetchTask();
TableDesc td = ft.getTblDesc();
// deserializer.
if (td == null && ft.getWork() != null && ft.getWork().getPartDesc() != null) {
if (ft.getWork().getPartDesc().size() > 0) {
td = ft.getWork().getPartDesc().get(0).getTableDesc();
}
}
if (td == null) {
LOG.info("No returning schema.");
} else {
String tableName = "result";
List<FieldSchema> lst = null;
try {
lst = MetaStoreUtils.getFieldsFromDeserializer(tableName, td.getDeserializer(conf));
} catch (Exception e) {
LOG.warn("Error getting schema: " + org.apache.hadoop.util.StringUtils.stringifyException(e));
}
if (lst != null) {
schema = new Schema(lst, null);
}
}
}
if (schema == null) {
schema = new Schema();
}
LOG.info("Returning Hive schema: " + schema);
return schema;
}
Aggregations