use of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable in project presto by prestodb.
the class ColumnarBinaryHiveRecordCursor method parseCharColumn.
private void parseCharColumn(int column) {
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
} else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
} catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseCharColumn(column, bytes, start, length);
}
}
use of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable in project presto by prestodb.
the class ColumnarBinaryHiveRecordCursor method parseBooleanColumn.
private void parseBooleanColumn(int column) {
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
} else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
} catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseBooleanColumn(column, bytes, start, length);
}
}
use of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable in project presto by prestodb.
the class ColumnarBinaryHiveRecordCursor method parseDecimalColumn.
private void parseDecimalColumn(int column) {
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
} else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
} catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseDecimalColumn(column, bytes, start, length);
}
}
use of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable in project presto by prestodb.
the class ColumnarTextHiveRecordCursor method parseStringColumn.
private void parseStringColumn(int column) {
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
} else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
} catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseStringColumn(column, bytes, start, length);
}
}
use of org.apache.hadoop.hive.serde2.columnar.BytesRefWritable in project presto by prestodb.
the class ColumnarTextHiveRecordCursor method parseLongColumn.
private void parseLongColumn(int column) {
loaded[column] = true;
if (hiveColumnIndexes[column] >= value.size()) {
// this partition may contain fewer fields than what's declared in the schema
// this happens when additional columns are added to the hive table after a partition has been created
nulls[column] = true;
} else {
BytesRefWritable fieldData = value.unCheckedGet(hiveColumnIndexes[column]);
byte[] bytes;
try {
bytes = fieldData.getData();
} catch (IOException e) {
throw Throwables.propagate(e);
}
int start = fieldData.getStart();
int length = fieldData.getLength();
parseLongColumn(column, bytes, start, length);
}
}
Aggregations