use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString in project hive by apache.
the class TestLazyAccumuloMap method testMixedSerializationMap.
@Test
public void testMixedSerializationMap() throws SerDeException, IOException {
AccumuloHiveRow row = new AccumuloHiveRow("row");
row.add(new Text("cf1"), new Text(toBytes(1)), "2".getBytes());
row.add(new Text("cf1"), new Text(toBytes(2)), "4".getBytes());
row.add(new Text("cf1"), new Text(toBytes(3)), "6".getBytes());
HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, ColumnEncoding.BINARY, ColumnEncoding.STRING, "column", TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo).toString());
// Map of Integer to String
Text nullSequence = new Text("\\N");
ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils.getTypeInfosFromTypeString("map<int,int>").get(0), new byte[] { (byte) 1, (byte) 2 }, 0, nullSequence, false, (byte) 0);
LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi);
map.init(row, mapping);
Assert.assertEquals(3, map.getMapSize());
Object o = map.getMapValueElement(new IntWritable(1));
Assert.assertNotNull(o);
Assert.assertEquals(new IntWritable(2), ((LazyInteger) o).getWritableObject());
o = map.getMapValueElement(new IntWritable(2));
Assert.assertNotNull(o);
Assert.assertEquals(new IntWritable(4), ((LazyInteger) o).getWritableObject());
o = map.getMapValueElement(new IntWritable(3));
Assert.assertNotNull(o);
Assert.assertEquals(new IntWritable(6), ((LazyInteger) o).getWritableObject());
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString in project hive by apache.
the class TestLazyAccumuloMap method testStringMapWithProjection.
@Test
public void testStringMapWithProjection() throws SerDeException {
AccumuloHiveRow row = new AccumuloHiveRow("row");
row.add("cf1", "foo", "bar".getBytes());
row.add("cf1", "bar", "foo".getBytes());
row.add("cf2", "foo1", "bar1".getBytes());
row.add("cf3", "bar1", "foo1".getBytes());
HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, ColumnEncoding.STRING, ColumnEncoding.STRING, "column", TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo).toString());
// Map of Integer to String
Text nullSequence = new Text("\\N");
ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils.getTypeInfosFromTypeString("map<string,string>").get(0), new byte[] { (byte) 1, (byte) 2 }, 0, nullSequence, false, (byte) 0);
LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi);
map.init(row, mapping);
Assert.assertEquals(2, map.getMapSize());
Object o = map.getMapValueElement(new Text("foo"));
Assert.assertNotNull(o);
Assert.assertEquals(new Text("bar"), ((LazyString) o).getWritableObject());
o = map.getMapValueElement(new Text("bar"));
Assert.assertNotNull(o);
Assert.assertEquals(new Text("foo"), ((LazyString) o).getWritableObject());
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString in project hive by apache.
the class ColumnarStorageBench method getArrayWritableObjectInspector.
private ObjectInspector getArrayWritableObjectInspector(final String columnTypes) {
List<TypeInfo> columnTypeList = TypeInfoUtils.getTypeInfosFromTypeString(columnTypes);
List<String> columnNameList = Arrays.asList(getColumnNames(columnTypes).split(","));
StructTypeInfo rowTypeInfo = (StructTypeInfo) TypeInfoFactory.getStructTypeInfo(columnNameList, columnTypeList);
return new ArrayWritableObjectInspector(rowTypeInfo);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString in project hive by apache.
the class MapWork method checkVectorizerSupportedTypes.
private boolean checkVectorizerSupportedTypes(boolean hasLlap) {
for (Map.Entry<String, Operator<? extends OperatorDesc>> entry : aliasToWork.entrySet()) {
final String alias = entry.getKey();
Operator<? extends OperatorDesc> op = entry.getValue();
PartitionDesc partitionDesc = aliasToPartnInfo.get(alias);
if (op instanceof TableScanOperator && partitionDesc != null && partitionDesc.getTableDesc() != null) {
final TableScanOperator tsOp = (TableScanOperator) op;
final List<String> readColumnNames = tsOp.getNeededColumns();
final Properties props = partitionDesc.getTableDesc().getProperties();
final List<TypeInfo> typeInfos = TypeInfoUtils.getTypeInfosFromTypeString(props.getProperty(serdeConstants.LIST_COLUMN_TYPES));
final List<String> allColumnTypes = TypeInfoUtils.getTypeStringsFromTypeInfo(typeInfos);
final List<String> allColumnNames = Utilities.getColumnNames(props);
hasLlap = Utilities.checkVectorizerSupportedTypes(readColumnNames, allColumnNames, allColumnTypes);
}
}
return hasLlap;
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString in project hive by apache.
the class RegexSerDe method initialize.
@Override
public void initialize(Configuration conf, Properties tbl) throws SerDeException {
// We can get the table definition from tbl.
// Read the configuration parameters
inputRegex = tbl.getProperty(INPUT_REGEX);
String columnNameProperty = tbl.getProperty(serdeConstants.LIST_COLUMNS);
String columnTypeProperty = tbl.getProperty(serdeConstants.LIST_COLUMN_TYPES);
boolean inputRegexIgnoreCase = "true".equalsIgnoreCase(tbl.getProperty(INPUT_REGEX_CASE_SENSITIVE));
// output format string is not supported anymore, warn user of deprecation
if (null != tbl.getProperty("output.format.string")) {
LOG.warn("output.format.string has been deprecated");
}
// Parse the configuration parameters
if (inputRegex != null) {
inputPattern = Pattern.compile(inputRegex, Pattern.DOTALL + (inputRegexIgnoreCase ? Pattern.CASE_INSENSITIVE : 0));
} else {
inputPattern = null;
throw new SerDeException("This table does not have serde property \"input.regex\"!");
}
final String columnNameDelimiter = tbl.containsKey(serdeConstants.COLUMN_NAME_DELIMITER) ? tbl.getProperty(serdeConstants.COLUMN_NAME_DELIMITER) : String.valueOf(SerDeUtils.COMMA);
List<String> columnNames = Arrays.asList(columnNameProperty.split(columnNameDelimiter));
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
assert columnNames.size() == columnTypes.size();
numColumns = columnNames.size();
/* Constructing the row ObjectInspector:
* The row consists of some set of primitive columns, each column will
* be a java object of primitive type.
*/
List<ObjectInspector> columnOIs = new ArrayList<ObjectInspector>(columnNames.size());
for (int c = 0; c < numColumns; c++) {
TypeInfo typeInfo = columnTypes.get(c);
if (typeInfo instanceof PrimitiveTypeInfo) {
PrimitiveTypeInfo pti = (PrimitiveTypeInfo) columnTypes.get(c);
AbstractPrimitiveJavaObjectInspector oi = PrimitiveObjectInspectorFactory.getPrimitiveJavaObjectInspector(pti);
columnOIs.add(oi);
} else {
throw new SerDeException(getClass().getName() + " doesn't allow column [" + c + "] named " + columnNames.get(c) + " with type " + columnTypes.get(c));
}
}
// StandardStruct uses ArrayList to store the row.
rowOI = ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, columnOIs, Lists.newArrayList(Splitter.on('\0').split(tbl.getProperty("columns.comments"))));
row = new ArrayList<Object>(numColumns);
// Constructing the row object, etc, which will be reused for all rows.
for (int c = 0; c < numColumns; c++) {
row.add(null);
}
outputFields = new Object[numColumns];
outputRowText = new Text();
}
Aggregations