use of org.apache.hadoop.io.ArrayWritable in project carbondata by apache.
the class CarbonArrayInspector method getList.
@Override
public List<?> getList(final Object data) {
if (data == null) {
return null;
}
if (data instanceof ArrayWritable) {
final Writable[] listContainer = ((ArrayWritable) data).get();
if (listContainer == null || listContainer.length == 0) {
return null;
}
final Writable subObj = listContainer[0];
if (subObj == null) {
return null;
}
final Writable[] array = ((ArrayWritable) subObj).get();
final List<Writable> list = Arrays.asList(array);
for (final Writable obj : array) {
list.add(obj);
}
return list;
}
throw new UnsupportedOperationException("Cannot inspect " + data.getClass().getCanonicalName());
}
use of org.apache.hadoop.io.ArrayWritable in project carbondata by apache.
the class CarbonHiveRecordReader method createArray.
private ArrayWritable createArray(Object obj, ListObjectInspector inspector) throws SerDeException {
List sourceArray = inspector.getList(obj);
ObjectInspector subInspector = inspector.getListElementObjectInspector();
List array = new ArrayList();
Iterator iterator;
if (sourceArray != null) {
for (iterator = sourceArray.iterator(); iterator.hasNext(); ) {
Object curObj = iterator.next();
Writable newObj = createObject(curObj, subInspector);
if (newObj != null) {
array.add(newObj);
}
}
}
if (array.size() > 0) {
ArrayWritable subArray = new ArrayWritable(((Writable) array.get(0)).getClass(), (Writable[]) array.toArray(new Writable[array.size()]));
return new ArrayWritable(Writable.class, new Writable[] { subArray });
}
return null;
}
use of org.apache.hadoop.io.ArrayWritable in project carbondata by apache.
the class CarbonHiveRecordReader method initialize.
public void initialize(InputSplit inputSplit, Configuration conf) throws IOException {
// The input split can contain single HDFS block or multiple blocks, so firstly get all the
// blocks and then set them in the query model.
List<CarbonHiveInputSplit> splitList;
if (inputSplit instanceof CarbonHiveInputSplit) {
splitList = new ArrayList<>(1);
splitList.add((CarbonHiveInputSplit) inputSplit);
} else {
throw new RuntimeException("unsupported input split type: " + inputSplit);
}
List<TableBlockInfo> tableBlockInfoList = CarbonHiveInputSplit.createBlocks(splitList);
queryModel.setTableBlockInfos(tableBlockInfoList);
readSupport.initialize(queryModel.getProjectionColumns(), queryModel.getAbsoluteTableIdentifier());
try {
carbonIterator = new ChunkRowIterator(queryExecutor.execute(queryModel));
} catch (QueryExecutionException e) {
throw new IOException(e.getMessage(), e.getCause());
}
if (valueObj == null) {
valueObj = new ArrayWritable(Writable.class, new Writable[queryModel.getProjectionColumns().length]);
}
final TypeInfo rowTypeInfo;
final List<String> columnNames;
List<TypeInfo> columnTypes;
// Get column names and sort order
final String colIds = conf.get("hive.io.file.readcolumn.ids");
final String columnNameProperty = conf.get("hive.io.file.readcolumn.names");
final String columnTypeProperty = conf.get(serdeConstants.LIST_COLUMN_TYPES);
if (columnNameProperty.length() == 0) {
columnNames = new ArrayList<String>();
} else {
columnNames = Arrays.asList(columnNameProperty.split(","));
}
if (columnTypeProperty.length() == 0) {
columnTypes = new ArrayList<TypeInfo>();
} else {
columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty);
}
String[] arraySelectedColId = colIds.split(",");
List<TypeInfo> reqColTypes = new ArrayList<TypeInfo>();
for (String anArrayColId : arraySelectedColId) {
reqColTypes.add(columnTypes.get(Integer.parseInt(anArrayColId)));
}
// Create row related objects
rowTypeInfo = TypeInfoFactory.getStructTypeInfo(columnNames, reqColTypes);
this.objInspector = new CarbonObjectInspector((StructTypeInfo) rowTypeInfo);
}
use of org.apache.hadoop.io.ArrayWritable in project carbondata by apache.
the class CarbonHiveSerDe method createArray.
private ArrayWritable createArray(Object obj, ListObjectInspector inspector) throws SerDeException {
List sourceArray = inspector.getList(obj);
ObjectInspector subInspector = inspector.getListElementObjectInspector();
List array = new ArrayList();
Iterator iterator;
if (sourceArray != null) {
for (iterator = sourceArray.iterator(); iterator.hasNext(); ) {
Object curObj = iterator.next();
Writable newObj = createObject(curObj, subInspector);
if (newObj != null) {
array.add(newObj);
}
}
}
if (array.size() > 0) {
ArrayWritable subArray = new ArrayWritable(((Writable) array.get(0)).getClass(), (Writable[]) array.toArray(new Writable[array.size()]));
return new ArrayWritable(Writable.class, new Writable[] { subArray });
}
return null;
}
use of org.apache.hadoop.io.ArrayWritable in project carbondata by apache.
the class CarbonObjectInspector method getStructFieldData.
@Override
public Object getStructFieldData(final Object data, final StructField fieldRef) {
if (data == null) {
return null;
}
if (data instanceof ArrayWritable) {
final ArrayWritable arr = (ArrayWritable) data;
return arr.get()[((StructFieldImpl) fieldRef).getIndex()];
}
boolean isArray = !(data instanceof List);
if (!isArray && !(data instanceof List)) {
return data;
} else {
int listSize = isArray ? ((Object[]) ((Object[]) data)).length : ((List) data).size();
int fieldID = fieldRef.getFieldID();
return fieldID >= listSize ? null : (isArray ? ((Object[]) ((Object[]) data))[fieldID] : ((List) data).get(fieldID));
}
}
Aggregations