use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class TestFunctionRegistry method testPrintTypeCompatibility.
/**
* Method to print out the comparison/conversion behavior for data types.
*/
public void testPrintTypeCompatibility() {
if (true) {
return;
}
String[] typeStrings = { "void", "boolean", "tinyint", "smallint", "int", "bigint", "float", "double", "string", "timestamp", "date", "binary", "decimal", "varchar(10)", "varchar(5)" };
for (String cat1 : typeStrings) {
TypeInfo ti1 = null;
try {
ti1 = TypeInfoUtils.getTypeInfoFromTypeString(cat1);
} catch (Exception err) {
System.out.println(err);
System.out.println("Unable to get TypeInfo for " + cat1 + ", skipping ...");
continue;
}
for (String cat2 : typeStrings) {
TypeInfo commonClass = null;
boolean implicitConvertable = false;
try {
TypeInfo ti2 = TypeInfoUtils.getTypeInfoFromTypeString(cat2);
try {
commonClass = FunctionRegistry.getCommonClassForComparison(ti1, ti2);
// implicitConvertable = FunctionRegistry.implicitConvertable(ti1, ti2);
} catch (Exception err) {
System.out.println("Failed to get common class for " + ti1 + ", " + ti2 + ": " + err);
err.printStackTrace();
// System.out.println("Unable to get TypeInfo for " + cat2 + ", skipping ...");
}
System.out.println(cat1 + " - " + cat2 + ": " + commonClass);
// System.out.println(cat1 + " - " + cat2 + ": " + implicitConvertable);
} catch (Exception err) {
System.out.println(err);
System.out.println("Unable to get TypeInfo for " + cat2 + ", skipping ...");
continue;
}
}
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class LlapInputFormat method createFakeVrbCtx.
static VectorizedRowBatchCtx createFakeVrbCtx(MapWork mapWork) throws HiveException {
// This is based on Vectorizer code, minus the validation.
// Add all non-virtual columns from the TableScan operator.
RowSchema rowSchema = findTsOp(mapWork).getSchema();
final List<String> colNames = new ArrayList<String>(rowSchema.getSignature().size());
final List<TypeInfo> colTypes = new ArrayList<TypeInfo>(rowSchema.getSignature().size());
boolean hasRowId = false;
for (ColumnInfo c : rowSchema.getSignature()) {
String columnName = c.getInternalName();
if (VirtualColumn.ROWID.getName().equals(columnName)) {
hasRowId = true;
} else {
if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(columnName))
continue;
}
colNames.add(columnName);
colTypes.add(TypeInfoUtils.getTypeInfoFromTypeString(c.getTypeName()));
}
// Determine the partition columns using the first partition descriptor.
// Note - like vectorizer, this assumes partition columns go after data columns.
int partitionColumnCount = 0;
Iterator<Path> paths = mapWork.getPathToAliases().keySet().iterator();
if (paths.hasNext()) {
PartitionDesc partDesc = mapWork.getPathToPartitionInfo().get(paths.next());
if (partDesc != null) {
LinkedHashMap<String, String> partSpec = partDesc.getPartSpec();
if (partSpec != null && !partSpec.isEmpty()) {
partitionColumnCount = partSpec.size();
}
}
}
final VirtualColumn[] virtualColumns;
if (hasRowId) {
virtualColumns = new VirtualColumn[] { VirtualColumn.ROWID };
} else {
virtualColumns = new VirtualColumn[0];
}
return new VectorizedRowBatchCtx(colNames.toArray(new String[colNames.size()]), colTypes.toArray(new TypeInfo[colTypes.size()]), null, null, partitionColumnCount, virtualColumns.length, virtualColumns, new String[0], null);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class BaseSemanticAnalyzer method getDefaultValue.
/**
* Validate and get the default value from the AST
* @param defaultValueAST AST node corresponding to default value
* @return retrieve the default value and return it as string
* @throws SemanticException
*/
private static String getDefaultValue(ASTNode defaultValueAST, ASTNode typeChild) throws SemanticException {
// first create expression from defaultValueAST
TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null);
ExprNodeDesc defaultValExpr = TypeCheckProcFactory.genExprNode(defaultValueAST, typeCheckCtx).get(defaultValueAST);
if (defaultValExpr == null) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value!"));
}
// get default value to be be stored in metastore
String defaultValueText = defaultValExpr.getExprString();
final int DEFAULT_MAX_LEN = 255;
if (defaultValueText.length() > DEFAULT_MAX_LEN) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value: " + defaultValueText + " .Maximum character length allowed is " + DEFAULT_MAX_LEN + " ."));
}
// Make sure the default value expression type is exactly same as column's type.
TypeInfo defaultValTypeInfo = defaultValExpr.getTypeInfo();
TypeInfo colTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(getTypeStringFromAST(typeChild));
if (!defaultValTypeInfo.equals(colTypeInfo)) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid type: " + defaultValTypeInfo.getTypeName() + " for default value: " + defaultValueText + ". Please make sure that the type is compatible with column type: " + colTypeInfo.getTypeName()));
}
// throw an error if default value isn't what hive allows
if (!isDefaultValueAllowed(defaultValExpr)) {
throw new SemanticException(ErrorMsg.INVALID_CSTR_SYNTAX.getMsg("Invalid Default value: " + defaultValueText + ". DEFAULT only allows constant or function expressions"));
}
return defaultValueText;
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class ColumnStatsSemanticAnalyzer method getColumnTypes.
private List<String> getColumnTypes(List<String> colNames) throws SemanticException {
List<String> colTypes = new ArrayList<String>();
List<FieldSchema> cols = tbl.getCols();
List<String> copyColNames = new ArrayList<>(colNames);
for (String colName : copyColNames) {
for (FieldSchema col : cols) {
if (colName.equalsIgnoreCase(col.getName())) {
String type = col.getType();
TypeInfo typeInfo = TypeInfoUtils.getTypeInfoFromTypeString(type);
if (typeInfo.getCategory() != ObjectInspector.Category.PRIMITIVE) {
logTypeWarning(colName, type);
colNames.remove(colName);
} else {
colTypes.add(type);
}
}
}
}
return colTypes;
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfoFromTypeString in project hive by apache.
the class MacroSemanticAnalyzer method analyzeCreateMacro.
@SuppressWarnings("unchecked")
private void analyzeCreateMacro(ASTNode ast) throws SemanticException {
String functionName = ast.getChild(0).getText();
// Temp macros are not allowed to have qualified names.
if (FunctionUtils.isQualifiedFunctionName(functionName)) {
throw new SemanticException("Temporary macro cannot be created with a qualified name.");
}
List<FieldSchema> arguments = BaseSemanticAnalyzer.getColumns((ASTNode) ast.getChild(1), true);
boolean isNoArgumentMacro = arguments.size() == 0;
RowResolver rowResolver = new RowResolver();
ArrayList<String> macroColNames = new ArrayList<String>(arguments.size());
ArrayList<TypeInfo> macroColTypes = new ArrayList<TypeInfo>(arguments.size());
final Set<String> actualColumnNames = new HashSet<String>();
if (!isNoArgumentMacro) {
/*
* Walk down expression to see which arguments are actually used.
*/
Node expression = (Node) ast.getChild(2);
PreOrderWalker walker = new PreOrderWalker(new Dispatcher() {
@Override
public Object dispatch(Node nd, Stack<Node> stack, Object... nodeOutputs) throws SemanticException {
if (nd instanceof ASTNode) {
ASTNode node = (ASTNode) nd;
if (node.getType() == HiveParser.TOK_TABLE_OR_COL) {
actualColumnNames.add(node.getChild(0).getText());
}
}
return null;
}
});
walker.startWalking(Collections.singletonList(expression), null);
}
for (FieldSchema argument : arguments) {
TypeInfo colType = TypeInfoUtils.getTypeInfoFromTypeString(argument.getType());
rowResolver.put("", argument.getName(), new ColumnInfo(argument.getName(), colType, "", false));
macroColNames.add(argument.getName());
macroColTypes.add(colType);
}
Set<String> expectedColumnNames = new LinkedHashSet<String>(macroColNames);
if (!expectedColumnNames.equals(actualColumnNames)) {
throw new SemanticException("Expected columns " + expectedColumnNames + " but found " + actualColumnNames);
}
if (expectedColumnNames.size() != macroColNames.size()) {
throw new SemanticException("At least one parameter name was used more than once " + macroColNames);
}
SemanticAnalyzer sa = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CBO_ENABLED) ? new CalcitePlanner(queryState) : new SemanticAnalyzer(queryState);
;
ExprNodeDesc body;
if (isNoArgumentMacro) {
body = sa.genExprNodeDesc((ASTNode) ast.getChild(1), rowResolver);
} else {
body = sa.genExprNodeDesc((ASTNode) ast.getChild(2), rowResolver);
}
CreateMacroDesc desc = new CreateMacroDesc(functionName, macroColNames, macroColTypes, body);
rootTasks.add(TaskFactory.get(new FunctionWork(desc)));
addEntities();
}
Aggregations