use of io.prestosql.spi.type.Type in project hetu-core by openlookeng.
the class LogicalPart method writeObject.
private void writeObject(ObjectOutputStream out) throws IOException {
out.defaultWriteObject();
out.writeInt(types.size());
for (Type type : types) {
out.writeUTF(TYPE_SIGNATURE_JSON_CODEC.toJson(type.getTypeSignature()));
}
}
use of io.prestosql.spi.type.Type in project hetu-core by openlookeng.
the class LogicalPart method partitionPage.
static Map<String, Page> partitionPage(Page page, List<String> partitionedBy, List<MemoryColumnHandle> columns, TypeManager typeManager) {
// derive the channel numbers that corresponds to the partitionedBy list
List<MemoryColumnHandle> partitionChannels = new ArrayList<>(partitionedBy.size());
for (String name : partitionedBy) {
for (MemoryColumnHandle handle : columns) {
if (handle.getColumnName().equals(name)) {
partitionChannels.add(handle);
}
}
}
// build the partitions
Map<String, Page> partitions = new HashMap<>();
MemoryColumnHandle partitionColumnHandle = partitionChannels.get(0);
Block block = page.getBlock(partitionColumnHandle.getColumnIndex());
Type type = partitionColumnHandle.getType(typeManager);
Map<Object, ArrayList<Integer>> uniqueValues = new HashMap<>();
for (int i = 0; i < page.getPositionCount(); i++) {
Object value = getNativeValue(type, block, i);
uniqueValues.putIfAbsent(value, new ArrayList<>());
uniqueValues.get(value).add(i);
}
for (Map.Entry<Object, ArrayList<Integer>> valueAndPosition : uniqueValues.entrySet()) {
int[] retainedPositions = valueAndPosition.getValue().stream().mapToInt(i -> i).toArray();
Object valueKey = valueAndPosition.getKey();
Page subPage = page.getPositions(retainedPositions, 0, retainedPositions.length);
// NOTE: null partition key is allowed here in the map
// but when this partition map is sent to coordinator via MemoryDataFragment
// the JSON parser fails and can't handle null keys in the map
// the JSON parser will ignore null keys
// therefore during scheduling if the query predicate is for null
// we MUST NOT do any partition filtering because the partition map
// the coordinator has is missing null partitions
// the coordinator must schedule all splits if the query predicate is null
// see: MemorySplitManager#getSplits
//
// note: the other option is to use an empty string as the null key
// then the JSON parser could send the key to the coordinator
// but then this would cause conflicts with actual empty string values
partitions.put(valueKey == null ? null : valueKey.toString(), subPage);
}
return partitions;
}
use of io.prestosql.spi.type.Type in project hetu-core by openlookeng.
the class SortBuffer method appendPositionTo.
public static void appendPositionTo(Page page, int position, PageBuilder pageBuilder) {
pageBuilder.declarePosition();
for (int i = 0; i < page.getChannelCount(); i++) {
Type type = pageBuilder.getType(i);
Block block = page.getBlock(i);
BlockBuilder blockBuilder = pageBuilder.getBlockBuilder(i);
type.appendTo(block, position, blockBuilder);
}
}
use of io.prestosql.spi.type.Type in project hetu-core by openlookeng.
the class OrcRecordReader method createColumnReaders.
private ColumnReader[] createColumnReaders(List<OrcColumn> columns, List<Type> readTypes, AggregatedMemoryContext systemMemoryContext, OrcBlockFactory blockFactory, OrcCacheStore orcCacheStore, OrcCacheProperties orcCacheProperties) throws OrcCorruptionException {
ColumnReader[] columnReaders = new ColumnReader[columns.size()];
for (int i = 0; i < columns.size(); i++) {
int columnIndex = i;
Type readType = readTypes.get(columnIndex);
OrcColumn column = columns.get(columnIndex);
ColumnReader columnReader = createColumnReader(readType, column, systemMemoryContext, blockFactory.createNestedBlockFactory(block -> blockLoaded(columnIndex, block)));
if (orcCacheProperties.isRowDataCacheEnabled()) {
columnReader = ColumnReaders.wrapWithCachingStreamReader(columnReader, column, orcCacheStore.getRowDataCache());
}
columnReaders[columnIndex] = columnReader;
}
return columnReaders;
}
use of io.prestosql.spi.type.Type in project hetu-core by openlookeng.
the class ParquetReader method readMap.
private ColumnChunk readMap(GroupField field) throws IOException {
List<Type> parameters = field.getType().getTypeParameters();
checkArgument(parameters.size() == 2, "Maps must have two type parameters, found %s", parameters.size());
Block[] localBlocks = new Block[parameters.size()];
ColumnChunk columnChunk = readColumnChunk(field.getChildren().get(0).get());
localBlocks[0] = columnChunk.getBlock();
localBlocks[1] = readColumnChunk(field.getChildren().get(1).get()).getBlock();
IntList offsets = new IntArrayList();
BooleanList valueIsNull = new BooleanArrayList();
calculateCollectionOffsets(field, offsets, valueIsNull, columnChunk.getDefinitionLevels(), columnChunk.getRepetitionLevels());
Block mapBlock = ((MapType) field.getType()).createBlockFromKeyValue(Optional.of(valueIsNull.toBooleanArray()), offsets.toIntArray(), localBlocks[0], localBlocks[1]);
return new ColumnChunk(mapBlock, columnChunk.getDefinitionLevels(), columnChunk.getRepetitionLevels());
}
Aggregations