use of org.apache.hadoop.io.FloatWritable in project hive by apache.
the class TypedBytesRecordReader method write.
private void write(int pos, Writable inpw) throws IOException {
String typ = columnTypes.get(pos);
Writable w = (Writable) converters.get(pos).convert(inpw);
if (typ.equalsIgnoreCase(serdeConstants.BOOLEAN_TYPE_NAME)) {
tbOut.writeBoolean((BooleanWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.TINYINT_TYPE_NAME)) {
tbOut.writeByte((ByteWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.SMALLINT_TYPE_NAME)) {
tbOut.writeShort((ShortWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.INT_TYPE_NAME)) {
tbOut.writeInt((IntWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) {
tbOut.writeLong((LongWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME)) {
tbOut.writeFloat((FloatWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) {
tbOut.writeDouble((DoubleWritable) w);
} else if (typ.equalsIgnoreCase(serdeConstants.STRING_TYPE_NAME)) {
tbOut.writeText((Text) w);
} else {
assert false;
}
}
use of org.apache.hadoop.io.FloatWritable in project hive by apache.
the class TypedBytesRecordReader method next.
public int next(Writable data) throws IOException {
int pos = 0;
barrStr.reset();
while (true) {
Type type = tbIn.readTypeCode();
// it was a empty stream
if (type == null) {
return -1;
}
if (type == Type.ENDOFRECORD) {
tbOut.writeEndOfRecord();
if (barrStr.getLength() > 0) {
((BytesWritable) data).set(barrStr.getData(), 0, barrStr.getLength());
}
return barrStr.getLength();
}
if (pos >= row.size()) {
Writable wrt = allocateWritable(type);
assert pos == row.size();
assert pos == rowTypeName.size();
row.add(wrt);
rowTypeName.add(type.name());
String typeName = typedBytesToTypeName.get(type);
PrimitiveTypeInfo srcTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(typeName);
srcOIns.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(srcTypeInfo));
converters.add(ObjectInspectorConverters.getConverter(srcOIns.get(pos), dstOIns.get(pos)));
} else {
if (!rowTypeName.get(pos).equals(type.name())) {
throw new RuntimeException("datatype of row changed from " + rowTypeName.get(pos) + " to " + type.name());
}
}
Writable w = row.get(pos);
switch(type) {
case BYTE:
tbIn.readByte((ByteWritable) w);
break;
case BOOL:
tbIn.readBoolean((BooleanWritable) w);
break;
case INT:
tbIn.readInt((IntWritable) w);
break;
case SHORT:
tbIn.readShort((ShortWritable) w);
break;
case LONG:
tbIn.readLong((LongWritable) w);
break;
case FLOAT:
tbIn.readFloat((FloatWritable) w);
break;
case DOUBLE:
tbIn.readDouble((DoubleWritable) w);
break;
case STRING:
tbIn.readText((Text) w);
break;
default:
// should never come here
assert false;
}
write(pos, w);
pos++;
}
}
use of org.apache.hadoop.io.FloatWritable in project hive by apache.
the class TestDruidSerDe method testDruidObjectSerializerwithNullTimestamp.
@Test
public void testDruidObjectSerializerwithNullTimestamp() throws Exception {
// Create, initialize, and test the SerDe
DruidSerDe serDe = new DruidSerDe();
Configuration conf = new Configuration();
Properties tbl;
// Mixed source (all types)
tbl = createPropertiesSource(COLUMN_NAMES, COLUMN_TYPES);
serDe.initialize(conf, tbl, null);
Object[] row = new Object[] { null, new Text("dim1_val"), new HiveCharWritable(new HiveChar("dim2_v", 6)), new HiveVarcharWritable(new HiveVarchar("dim3_val", 8)), new DoubleWritable(10669.3D), new FloatWritable(10669.45F), new LongWritable(1113939), new IntWritable(1112123), new ShortWritable((short) 12), new ByteWritable((byte) 0), null // granularity
};
expectedEx.expect(NullPointerException.class);
expectedEx.expectMessage("Timestamp column cannot have null value");
// should fail as timestamp is null
serializeObject(tbl, serDe, row, DRUID_WRITABLE);
}
use of org.apache.hadoop.io.FloatWritable in project hive by apache.
the class TestHBaseSerDe method testHBaseSerDeWithTimestamp.
@Test
public void testHBaseSerDeWithTimestamp() throws SerDeException {
// Create the SerDe
HBaseSerDe serDe = new HBaseSerDe();
Configuration conf = new Configuration();
Properties tbl = createPropertiesI_I();
long putTimestamp = 1;
tbl.setProperty(HBaseSerDe.HBASE_PUT_TIMESTAMP, Long.toString(putTimestamp));
serDe.initialize(conf, tbl, null);
byte[] cfa = "cola".getBytes();
byte[] cfb = "colb".getBytes();
byte[] cfc = "colc".getBytes();
byte[] qualByte = "byte".getBytes();
byte[] qualShort = "short".getBytes();
byte[] qualInt = "int".getBytes();
byte[] qualLong = "long".getBytes();
byte[] qualFloat = "float".getBytes();
byte[] qualDouble = "double".getBytes();
byte[] qualString = "string".getBytes();
byte[] qualBool = "boolean".getBytes();
byte[] rowKey = Bytes.toBytes("test-row1");
// Data
List<Cell> kvs = new ArrayList<Cell>();
kvs.add(new KeyValue(rowKey, cfa, qualByte, Bytes.toBytes("123")));
kvs.add(new KeyValue(rowKey, cfb, qualShort, Bytes.toBytes("456")));
kvs.add(new KeyValue(rowKey, cfc, qualInt, Bytes.toBytes("789")));
kvs.add(new KeyValue(rowKey, cfa, qualLong, Bytes.toBytes("1000")));
kvs.add(new KeyValue(rowKey, cfb, qualFloat, Bytes.toBytes("-0.01")));
kvs.add(new KeyValue(rowKey, cfc, qualDouble, Bytes.toBytes("5.3")));
kvs.add(new KeyValue(rowKey, cfa, qualString, Bytes.toBytes("Hadoop, HBase, and Hive")));
kvs.add(new KeyValue(rowKey, cfb, qualBool, Bytes.toBytes("true")));
Collections.sort(kvs, KeyValue.COMPARATOR);
Result r = Result.create(kvs);
Put p = new Put(rowKey, putTimestamp);
p.addColumn(cfa, qualByte, Bytes.toBytes("123"));
p.addColumn(cfb, qualShort, Bytes.toBytes("456"));
p.addColumn(cfc, qualInt, Bytes.toBytes("789"));
p.addColumn(cfa, qualLong, Bytes.toBytes("1000"));
p.addColumn(cfb, qualFloat, Bytes.toBytes("-0.01"));
p.addColumn(cfc, qualDouble, Bytes.toBytes("5.3"));
p.addColumn(cfa, qualString, Bytes.toBytes("Hadoop, HBase, and Hive"));
p.addColumn(cfb, qualBool, Bytes.toBytes("true"));
Object[] expectedFieldsData = { new Text("test-row1"), new ByteWritable((byte) 123), new ShortWritable((short) 456), new IntWritable(789), new LongWritable(1000), new FloatWritable(-0.01F), new DoubleWritable(5.3), new Text("Hadoop, HBase, and Hive"), new BooleanWritable(true) };
deserializeAndSerialize(serDe, r, p, expectedFieldsData);
}
use of org.apache.hadoop.io.FloatWritable in project hive by apache.
the class TestHBaseSerDe method testHBaseSerDeII.
@Test
public void testHBaseSerDeII() throws SerDeException {
byte[] cfa = "cfa".getBytes();
byte[] cfb = "cfb".getBytes();
byte[] cfc = "cfc".getBytes();
byte[] qualByte = "byte".getBytes();
byte[] qualShort = "short".getBytes();
byte[] qualInt = "int".getBytes();
byte[] qualLong = "long".getBytes();
byte[] qualFloat = "float".getBytes();
byte[] qualDouble = "double".getBytes();
byte[] qualString = "string".getBytes();
byte[] qualBool = "boolean".getBytes();
byte[] rowKey = Bytes.toBytes("test-row-2");
// Data
List<Cell> kvs = new ArrayList<Cell>();
kvs.add(new KeyValue(rowKey, cfa, qualByte, new byte[] { Byte.MIN_VALUE }));
kvs.add(new KeyValue(rowKey, cfb, qualShort, Bytes.toBytes(Short.MIN_VALUE)));
kvs.add(new KeyValue(rowKey, cfc, qualInt, Bytes.toBytes(Integer.MIN_VALUE)));
kvs.add(new KeyValue(rowKey, cfa, qualLong, Bytes.toBytes(Long.MIN_VALUE)));
kvs.add(new KeyValue(rowKey, cfb, qualFloat, Bytes.toBytes(Float.MIN_VALUE)));
kvs.add(new KeyValue(rowKey, cfc, qualDouble, Bytes.toBytes(Double.MAX_VALUE)));
kvs.add(new KeyValue(rowKey, cfa, qualString, Bytes.toBytes("Hadoop, HBase, and Hive Again!")));
kvs.add(new KeyValue(rowKey, cfb, qualBool, Bytes.toBytes(false)));
// When using only HBase2, then we could change to this
// Collections.sort(kvs, CellComparator.COMPARATOR);
Collections.sort(kvs, KeyValue.COMPARATOR);
Result r = Result.create(kvs);
Put p = new Put(rowKey);
p.addColumn(cfa, qualByte, new byte[] { Byte.MIN_VALUE });
p.addColumn(cfb, qualShort, Bytes.toBytes(Short.MIN_VALUE));
p.addColumn(cfc, qualInt, Bytes.toBytes(Integer.MIN_VALUE));
p.addColumn(cfa, qualLong, Bytes.toBytes(Long.MIN_VALUE));
p.addColumn(cfb, qualFloat, Bytes.toBytes(Float.MIN_VALUE));
p.addColumn(cfc, qualDouble, Bytes.toBytes(Double.MAX_VALUE));
p.addColumn(cfa, qualString, Bytes.toBytes("Hadoop, HBase, and Hive Again!"));
p.addColumn(cfb, qualBool, Bytes.toBytes(false));
Object[] expectedFieldsData = { new Text("test-row-2"), new ByteWritable(Byte.MIN_VALUE), new ShortWritable(Short.MIN_VALUE), new IntWritable(Integer.MIN_VALUE), new LongWritable(Long.MIN_VALUE), new FloatWritable(Float.MIN_VALUE), new DoubleWritable(Double.MAX_VALUE), new Text("Hadoop, HBase, and Hive Again!"), new BooleanWritable(false) };
// Create, initialize, and test the SerDe
HBaseSerDe serDe = new HBaseSerDe();
Configuration conf = new Configuration();
Properties tbl = createPropertiesII_I();
serDe.initialize(conf, tbl, null);
deserializeAndSerialize(serDe, r, p, expectedFieldsData);
serDe = new HBaseSerDe();
conf = new Configuration();
tbl = createPropertiesII_II();
serDe.initialize(conf, tbl, null);
deserializeAndSerialize(serDe, r, p, expectedFieldsData);
serDe = new HBaseSerDe();
conf = new Configuration();
tbl = createPropertiesII_III();
serDe.initialize(conf, tbl, null);
deserializeAndSerialize(serDe, r, p, expectedFieldsData);
}
Aggregations