use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.timestampTypeInfo in project hive by apache.
the class TestGenericUDFOPMinus method testTimestampMinusIntervalDayTime.
@Test
public void testTimestampMinusIntervalDayTime() throws Exception {
GenericUDFOPMinus udf = new GenericUDFOPMinus();
TimestampWritable left = new TimestampWritable(Timestamp.valueOf("2001-01-02 2:3:4.567"));
HiveIntervalDayTimeWritable right = new HiveIntervalDayTimeWritable(HiveIntervalDayTime.valueOf("1 2:3:4.567"));
ObjectInspector[] inputOIs = { PrimitiveObjectInspectorFactory.writableTimestampObjectInspector, PrimitiveObjectInspectorFactory.writableHiveIntervalDayTimeObjectInspector };
DeferredObject[] args = { new DeferredJavaObject(left), new DeferredJavaObject(right) };
PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs);
Assert.assertEquals(TypeInfoFactory.timestampTypeInfo, oi.getTypeInfo());
TimestampWritable res = (TimestampWritable) udf.evaluate(args);
Assert.assertEquals(Timestamp.valueOf("2001-01-01 00:00:00"), res.getTimestamp());
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.timestampTypeInfo in project hive by apache.
the class DruidSerDe method inferSchema.
/* Select query */
private void inferSchema(SelectQuery query, List<String> columnNames, List<PrimitiveTypeInfo> columnTypes, String address) throws SerDeException {
// Timestamp column
columnNames.add(DruidTable.DEFAULT_TIMESTAMP_COLUMN);
columnTypes.add(TypeInfoFactory.timestampTypeInfo);
// Dimension columns
for (DimensionSpec ds : query.getDimensions()) {
columnNames.add(ds.getOutputName());
columnTypes.add(TypeInfoFactory.stringTypeInfo);
}
// The type for metric columns is not explicit in the query, thus in this case
// we need to emit a metadata query to know their type
SegmentMetadataQueryBuilder builder = new Druids.SegmentMetadataQueryBuilder();
builder.dataSource(query.getDataSource());
builder.merge(true);
builder.analysisTypes();
SegmentMetadataQuery metadataQuery = builder.build();
// Execute query in Druid
SegmentAnalysis schemaInfo;
try {
schemaInfo = submitMetadataRequest(address, metadataQuery);
} catch (IOException e) {
throw new SerDeException(e);
}
if (schemaInfo == null) {
throw new SerDeException("Connected to Druid but could not retrieve datasource information");
}
for (String metric : query.getMetrics()) {
columnNames.add(metric);
columnTypes.add(DruidSerDeUtils.convertDruidToHiveType(schemaInfo.getColumns().get(metric).getType()));
}
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.timestampTypeInfo in project hive by apache.
the class TestMapJoinOperator method doTestString.
public void doTestString(long seed, VectorMapJoinVariation vectorMapJoinVariation) throws Exception {
int rowCount = 10000;
HiveConf hiveConf = new HiveConf();
String[] bigTableColumnNames = new String[] { "b1" };
TypeInfo[] bigTableTypeInfos = new TypeInfo[] { TypeInfoFactory.stringTypeInfo };
int[] bigTableKeyColumnNums = new int[] { 0 };
String[] smallTableValueColumnNames = new String[] { "sv1", "sv2" };
TypeInfo[] smallTableValueTypeInfos = new TypeInfo[] { TypeInfoFactory.dateTypeInfo, TypeInfoFactory.timestampTypeInfo };
int[] bigTableRetainColumnNums = new int[] { 0 };
int[] smallTableRetainKeyColumnNums = new int[] {};
int[] smallTableRetainValueColumnNums = new int[] { 0, 1 };
SmallTableGenerationParameters smallTableGenerationParameters = new SmallTableGenerationParameters();
// ----------------------------------------------------------------------------------------------
MapJoinTestDescription testDesc = new MapJoinTestDescription(hiveConf, vectorMapJoinVariation, bigTableColumnNames, bigTableTypeInfos, bigTableKeyColumnNums, smallTableValueColumnNames, smallTableValueTypeInfos, bigTableRetainColumnNums, smallTableRetainKeyColumnNums, smallTableRetainValueColumnNums, smallTableGenerationParameters);
// Prepare data. Good for ANY implementation variation.
MapJoinTestData testData = new MapJoinTestData(rowCount, testDesc, seed, seed * 10);
executeTest(testDesc, testData);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.timestampTypeInfo in project hive by apache.
the class TestVectorGenericDateExpressions method testDateAddColScalar.
@Test
public void testDateAddColScalar() throws HiveException {
for (PrimitiveCategory colType1 : dateTimestampStringTypes) testDateAddColScalar(colType1, true);
VectorExpression udf = new VectorUDFDateAddColScalar(0, 0, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.stringTypeInfo, TypeInfoFactory.timestampTypeInfo });
udf.transientInit();
VectorizedRowBatch batch = new VectorizedRowBatch(2, 1);
batch.cols[0] = new BytesColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
byte[] bytes = "error".getBytes(utf8);
bcv.vector[0] = bytes;
bcv.start[0] = 0;
bcv.length[0] = bytes.length;
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
}
use of org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory.timestampTypeInfo in project hive by apache.
the class TestVectorGenericDateExpressions method testDateDiffColScalar.
@Test
public void testDateDiffColScalar() throws HiveException {
for (PrimitiveCategory colType1 : dateTimestampStringTypes) {
for (PrimitiveCategory scalarType2 : dateTimestampStringTypes) {
LongColumnVector date1 = newRandomLongColumnVector(10000, size);
LongColumnVector output = new LongColumnVector(size);
VectorizedRowBatch batch = new VectorizedRowBatch(2, size);
batch.cols[0] = castTo(date1, colType1);
batch.cols[1] = output;
long scalar2 = newRandom(1000);
validateDateDiff(batch, date1, scalar2, colType1, scalarType2);
TestVectorizedRowBatch.addRandomNulls(date1);
batch.cols[0] = castTo(date1, colType1);
validateDateDiff(batch, date1, scalar2, colType1, scalarType2);
}
}
VectorExpression udf;
byte[] bytes = "error".getBytes(utf8);
VectorizedRowBatch batch = new VectorizedRowBatch(2, 1);
udf = new VectorUDFDateDiffColScalar(0, 0L, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.stringTypeInfo });
batch.cols[0] = new BytesColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
BytesColumnVector bcv = (BytesColumnVector) batch.cols[0];
bcv.vector[0] = bytes;
bcv.start[0] = 0;
bcv.length[0] = bytes.length;
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
udf = new VectorUDFDateDiffColScalar(0, bytes, 1);
udf.setInputTypeInfos(new TypeInfo[] { TypeInfoFactory.timestampTypeInfo, TypeInfoFactory.stringTypeInfo });
udf.transientInit();
batch.cols[0] = new LongColumnVector(1);
batch.cols[1] = new LongColumnVector(1);
udf.evaluate(batch);
Assert.assertEquals(batch.cols[1].isNull[0], true);
}
Aggregations