Search in sources :

Example 41 with ObjectInspectorFactory.getReflectionObjectInspector

use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getReflectionObjectInspector in project hive by apache.

the class TestOrcFile method testSnappy.

/**
 * Read and write a randomly generated snappy file.
 * @throws Exception
 */
@Test
public void testSnappy() throws Exception {
    ObjectInspector inspector;
    synchronized (TestOrcFile.class) {
        inspector = ObjectInspectorFactory.getReflectionObjectInspector(InnerStruct.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    Writer writer = OrcFile.createWriter(testFilePath, OrcFile.writerOptions(conf).inspector(inspector).stripeSize(1000).compress(CompressionKind.SNAPPY).bufferSize(100));
    Random rand = new Random(12);
    for (int i = 0; i < 10000; ++i) {
        writer.addRow(new InnerStruct(rand.nextInt(), Integer.toHexString(rand.nextInt())));
    }
    writer.close();
    Reader reader = OrcFile.createReader(testFilePath, OrcFile.readerOptions(conf).filesystem(fs));
    RecordReader rows = reader.rows();
    rand = new Random(12);
    OrcStruct row = null;
    for (int i = 0; i < 10000; ++i) {
        assertEquals(true, rows.hasNext());
        row = (OrcStruct) rows.next(row);
        assertEquals(rand.nextInt(), ((IntWritable) row.getFieldValue(0)).get());
        assertEquals(Integer.toHexString(rand.nextInt()), row.getFieldValue(1).toString());
    }
    assertEquals(false, rows.hasNext());
    rows.close();
}
Also used : HiveDecimalObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector) BooleanObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector) ShortObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) MapObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) FloatObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector) StringObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector) ListObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector) IntObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector) LongObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector) BinaryObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector) ByteObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector) DoubleObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector) TimestampObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector) Random(java.util.Random) Test(org.junit.Test)

Example 42 with ObjectInspectorFactory.getReflectionObjectInspector

use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getReflectionObjectInspector in project hive by apache.

the class TestOrcRawRecordMerger method testRecordReaderNewBaseAndDelta.

/**
 * Test the RecordReader when there is a new base and a delta.
 * This test creates multiple stripes in both base and delta files which affects how many splits
 * are created on read.  With ORC-228 this could be done in E2E fashion with a query or
 * streaming ingest writing data.
 * @see #testRecordReaderOldBaseAndDelta()
 * @throws Exception
 */
@Test
public void testRecordReaderNewBaseAndDelta() throws Exception {
    final int BUCKET = 11;
    Configuration conf = new Configuration();
    OrcOutputFormat of = new OrcOutputFormat();
    FileSystem fs = FileSystem.getLocal(conf);
    Path root = new Path(tmpDir, "testRecordReaderNewBaseAndDelta").makeQualified(fs);
    fs.delete(root, true);
    ObjectInspector inspector;
    synchronized (TestOrcFile.class) {
        inspector = ObjectInspectorFactory.getReflectionObjectInspector(BigRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    // write the base
    MemoryManager mgr = new MemoryManagerImpl(conf) {

        int rowsAddedSinceCheck = 0;

        @Override
        public synchronized void addedRow(int rows) throws IOException {
            rowsAddedSinceCheck += rows;
            if (rowsAddedSinceCheck >= 2) {
                notifyWriters();
                rowsAddedSinceCheck = 0;
            }
        }
    };
    // make 5 stripes with 2 rows each
    OrcRecordUpdater.OrcOptions options = (OrcRecordUpdater.OrcOptions) new OrcRecordUpdater.OrcOptions(conf).writingBase(true).minimumWriteId(0).maximumWriteId(0).bucket(BUCKET).inspector(inspector).filesystem(fs);
    final int BUCKET_PROPERTY = BucketCodec.V1.encode(options);
    options.orcOptions(OrcFile.writerOptions(conf).stripeSize(1).blockPadding(false).compress(CompressionKind.NONE).memory(mgr).batchSize(2));
    options.finalDestination(root);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values = new String[] { "ignore.1", "0.1", "ignore.2", "ignore.3", "2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6" };
    for (int i = 0; i < values.length; ++i) {
        ru.insert(0, new BigRow(i, i, values[i], i, i));
    }
    ru.close(false);
    // write a delta
    options.writingBase(false).minimumWriteId(1).maximumWriteId(1).recordIdColumn(5);
    ru = of.getRecordUpdater(root, options);
    values = new String[] { "0.0", null, null, "1.1", null, null, null, "ignore.7" };
    for (int i = 0; i < values.length; ++i) {
        if (values[i] != null) {
            ru.update(1, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY));
        }
    }
    ru.delete(1, new BigRow(9, 0, BUCKET_PROPERTY));
    ru.close(false);
    // write a delta
    options.minimumWriteId(100).maximumWriteId(100);
    ru = of.getRecordUpdater(root, options);
    values = new String[] { null, null, "1.0", null, null, null, null, "3.1" };
    for (int i = 0; i < values.length - 1; ++i) {
        if (values[i] != null) {
            ru.update(100, new BigRow(i, i, values[i], i, i, i, 0, BUCKET_PROPERTY));
        }
    }
    // do this before next update so that delte_delta is properly sorted
    ru.delete(100, new BigRow(8, 0, BUCKET_PROPERTY));
    // because row 8 was updated and thus has a different RecordIdentifier now
    ru.update(100, new BigRow(7, 7, values[values.length - 1], 7, 7, 2, 1, BUCKET_PROPERTY));
    ru.close(false);
    MyResult[] expected = new MyResult[10];
    int k = 0;
    expected[k++] = new MyResult(0, "0.0");
    expected[k++] = new MyResult(1, "0.1");
    expected[k++] = new MyResult(2, "1.0");
    expected[k++] = new MyResult(3, "1.1");
    expected[k++] = new MyResult(4, "2.0");
    expected[k++] = new MyResult(5, "2.1");
    expected[k++] = new MyResult(6, "3.0");
    expected[k] = new MyResult(7, "3.1");
    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.min.split.size", "1");
    job.set("mapred.max.split.size", "2");
    job.set("mapred.input.dir", root.toString());
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, BigRow.getColumnNamesProperty());
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, BigRow.getColumnTypesProperty());
    AcidUtils.setAcidOperationalProperties(job, true, null);
    job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
    InputSplit[] splits = inf.getSplits(job, 5);
    // base has 10 rows, so 5 splits, 1 delta has 2 rows so 1 split, and 1 delta has 3 so 2 splits
    assertEquals(8, splits.length);
    org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr;
    for (InputSplit split : splits) {
        rr = inf.getRecordReader(split, job, Reporter.NULL);
        NullWritable key = rr.createKey();
        OrcStruct value = rr.createValue();
        while (rr.next(key, value)) {
            MyResult mr = new MyResult(Integer.parseInt(value.getFieldValue(0).toString()), value.getFieldValue(2).toString());
            int i = 0;
            for (; i < expected.length; i++) {
                if (mr.equals(expected[i])) {
                    expected[i] = null;
                    break;
                }
            }
            if (i >= expected.length) {
                // not found
                assertTrue("Found unexpected row: " + mr, false);
            }
        }
    }
    for (MyResult mr : expected) {
        assertTrue("Expected " + mr + " not found in any InputSplit", mr == null);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MemoryManagerImpl(org.apache.orc.impl.MemoryManagerImpl) FileSystem(org.apache.hadoop.fs.FileSystem) RecordUpdater(org.apache.hadoop.hive.ql.io.RecordUpdater) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit) Path(org.apache.hadoop.fs.Path) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) MemoryManager(org.apache.orc.MemoryManager) NullWritable(org.apache.hadoop.io.NullWritable) InputFormat(org.apache.hadoop.mapred.InputFormat) Test(org.junit.Test)

Example 43 with ObjectInspectorFactory.getReflectionObjectInspector

use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getReflectionObjectInspector in project hive by apache.

the class TestOrcRawRecordMerger method testGetLogicalLength.

/**
 * {@link org.apache.hive.hcatalog.streaming.TestStreaming#testInterleavedTransactionBatchCommits} has more tests
 */
@Test
public void testGetLogicalLength() throws Exception {
    final int BUCKET = 0;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    OrcOutputFormat of = new OrcOutputFormat();
    Path root = new Path(tmpDir, "testEmpty").makeQualified(fs);
    fs.delete(root, true);
    ObjectInspector inspector;
    synchronized (TestOrcFile.class) {
        inspector = ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    /*create delta_1_1_0/bucket0 with 1 row and close the file*/
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).inspector(inspector).bucket(BUCKET).writingBase(false).minimumWriteId(1).maximumWriteId(1).finalDestination(root);
    Path delta1_1_0 = new Path(root, AcidUtils.deltaSubdir(options.getMinimumWriteId(), options.getMaximumWriteId(), options.getStatementId()));
    Path bucket0 = AcidUtils.createBucketFile(delta1_1_0, BUCKET);
    Path bucket0SideFile = OrcAcidUtils.getSideFile(bucket0);
    RecordUpdater ru = of.getRecordUpdater(root, options);
    ru.insert(options.getMaximumWriteId(), new MyRow("first"));
    ru.close(false);
    FileStatus bucket0File = fs.getFileStatus(bucket0);
    AcidUtils.getLogicalLength(fs, bucket0File);
    Assert.assertTrue("no " + bucket0, fs.exists(bucket0));
    Assert.assertFalse("unexpected " + bucket0SideFile, fs.exists(bucket0SideFile));
    // test getLogicalLength() w/o side file
    Assert.assertEquals("closed file size mismatch", bucket0File.getLen(), AcidUtils.getLogicalLength(fs, bucket0File));
    // create an empty (invalid) side file - make sure getLogicalLength() throws
    FSDataOutputStream flushLengths = fs.create(bucket0SideFile, true, 8);
    flushLengths.close();
    expectedException.expect(IOException.class);
    expectedException.expectMessage(bucket0SideFile.getName() + " found but is not readable");
    AcidUtils.getLogicalLength(fs, bucket0File);
}
Also used : Path(org.apache.hadoop.fs.Path) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) RecordUpdater(org.apache.hadoop.hive.ql.io.RecordUpdater) Test(org.junit.Test)

Example 44 with ObjectInspectorFactory.getReflectionObjectInspector

use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getReflectionObjectInspector in project hive by apache.

the class TestOrcRawRecordMerger method testRecordReaderIncompleteDelta.

/**
 * @param use130Format true means use delta_0001_0001_0000 format, else delta_0001_00001
 */
private void testRecordReaderIncompleteDelta(boolean use130Format) throws Exception {
    final int BUCKET = 1;
    Configuration conf = new Configuration();
    OrcOutputFormat of = new OrcOutputFormat();
    FileSystem fs = FileSystem.getLocal(conf).getRaw();
    Path root = new Path(tmpDir, "testRecordReaderIncompleteDelta").makeQualified(fs);
    fs.delete(root, true);
    ObjectInspector inspector;
    synchronized (TestOrcFile.class) {
        inspector = ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    // write a base
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).writingBase(true).minimumWriteId(0).maximumWriteId(0).bucket(BUCKET).inspector(inspector).filesystem(fs).finalDestination(root);
    if (!use130Format) {
        options.statementId(-1);
    }
    RecordUpdater ru = of.getRecordUpdater(root, options);
    String[] values = new String[] { "1", "2", "3", "4", "5" };
    for (int i = 0; i < values.length; ++i) {
        ru.insert(0, new MyRow(values[i]));
    }
    ru.close(false);
    // write a delta
    options.writingBase(false).minimumWriteId(10).maximumWriteId(19);
    ru = of.getRecordUpdater(root, options);
    values = new String[] { "6", "7", "8" };
    for (int i = 0; i < values.length; ++i) {
        ru.insert(1, new MyRow(values[i]));
    }
    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.input.dir", root.toString());
    job.set("bucket_count", "2");
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty());
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty());
    AcidUtils.setAcidOperationalProperties(job, true, null);
    job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
    // read the keys before the delta is flushed
    InputSplit[] splits = inf.getSplits(job, 1);
    // 1 split since we only have 1 bucket file in base/.  delta is not flushed (committed) yet, i.e. empty
    assertEquals(1, splits.length);
    org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
    NullWritable key = rr.createKey();
    OrcStruct value = rr.createValue();
    System.out.println("Looking at split " + splits[0]);
    for (int i = 1; i < 6; ++i) {
        System.out.println("Checking row " + i);
        assertEquals(true, rr.next(key, value));
        assertEquals(Integer.toString(i), value.getFieldValue(0).toString());
    }
    assertEquals(false, rr.next(key, value));
    ru.flush();
    ru.flush();
    values = new String[] { "9", "10" };
    for (int i = 0; i < values.length; ++i) {
        ru.insert(3, new MyRow(values[i]));
    }
    ru.flush();
    splits = inf.getSplits(job, 1);
    assertEquals(2, splits.length);
    Path sideFile = new Path(root + "/" + (use130Format ? AcidUtils.deltaSubdir(10, 19, 0) : AcidUtils.deltaSubdir(10, 19)) + "/bucket_00001_flush_length");
    assertEquals(true, fs.exists(sideFile));
    assertEquals(32, fs.getFileStatus(sideFile).getLen());
    rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
    for (int i = 1; i <= 5; ++i) {
        assertEquals(true, rr.next(key, value));
        assertEquals(Integer.toString(i), value.getFieldValue(0).toString());
    }
    assertEquals(false, rr.next(key, value));
    rr = inf.getRecordReader(splits[1], job, Reporter.NULL);
    for (int i = 6; i < 11; ++i) {
        assertEquals("i=" + i, true, rr.next(key, value));
        assertEquals(Integer.toString(i), value.getFieldValue(0).toString());
    }
    assertEquals(false, rr.next(key, value));
}
Also used : Path(org.apache.hadoop.fs.Path) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Configuration(org.apache.hadoop.conf.Configuration) NullWritable(org.apache.hadoop.io.NullWritable) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) InputFormat(org.apache.hadoop.mapred.InputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) RecordUpdater(org.apache.hadoop.hive.ql.io.RecordUpdater) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit)

Example 45 with ObjectInspectorFactory.getReflectionObjectInspector

use of org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory.getReflectionObjectInspector in project hive by apache.

the class TestOrcRecordUpdater method testWriter.

@Test
public void testWriter() throws Exception {
    Path root = new Path(workDir, "testWriter");
    Configuration conf = new Configuration();
    // Must use raw local because the checksummer doesn't honor flushes.
    FileSystem fs = FileSystem.getLocal(conf).getRaw();
    ObjectInspector inspector;
    synchronized (TestOrcFile.class) {
        inspector = ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
    }
    AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).filesystem(fs).bucket(10).writingBase(false).minimumWriteId(10).maximumWriteId(19).inspector(inspector).reporter(Reporter.NULL).finalDestination(root);
    RecordUpdater updater = new OrcRecordUpdater(root, options);
    updater.insert(11, new MyRow("first"));
    updater.insert(11, new MyRow("second"));
    updater.insert(11, new MyRow("third"));
    updater.flush();
    updater.insert(12, new MyRow("fourth"));
    updater.insert(12, new MyRow("fifth"));
    updater.flush();
    // Check the stats
    assertEquals(5L, updater.getStats().getRowCount());
    Path bucketPath = AcidUtils.createFilename(root, options);
    Path sidePath = OrcAcidUtils.getSideFile(bucketPath);
    DataInputStream side = fs.open(sidePath);
    // read the stopping point for the first flush and make sure we only see
    // 3 rows
    long len = side.readLong();
    len = side.readLong();
    Reader reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs).maxLength(len));
    assertEquals(3, reader.getNumberOfRows());
    // read the second flush and make sure we see all 5 rows
    len = side.readLong();
    side.close();
    reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs).maxLength(len));
    assertEquals(5, reader.getNumberOfRows());
    RecordReader rows = reader.rows();
    // check the contents of the file
    assertEquals(true, rows.hasNext());
    OrcStruct row = (OrcStruct) rows.next(null);
    assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(row));
    assertEquals(11, OrcRecordUpdater.getCurrentTransaction(row));
    assertEquals(11, OrcRecordUpdater.getOriginalTransaction(row));
    assertEquals(10, getBucketId(row));
    assertEquals(0, OrcRecordUpdater.getRowId(row));
    assertEquals("first", OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(1, OrcRecordUpdater.getRowId(row));
    assertEquals(10, getBucketId(row));
    assertEquals("second", OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(2, OrcRecordUpdater.getRowId(row));
    assertEquals(10, getBucketId(row));
    assertEquals("third", OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(12, OrcRecordUpdater.getCurrentTransaction(row));
    assertEquals(12, OrcRecordUpdater.getOriginalTransaction(row));
    assertEquals(10, getBucketId(row));
    assertEquals(0, OrcRecordUpdater.getRowId(row));
    assertEquals("fourth", OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(true, rows.hasNext());
    row = (OrcStruct) rows.next(null);
    assertEquals(1, OrcRecordUpdater.getRowId(row));
    assertEquals("fifth", OrcRecordUpdater.getRow(row).getFieldValue(0).toString());
    assertEquals(false, rows.hasNext());
    // add one more record and close
    updater.insert(20, new MyRow("sixth"));
    updater.close(false);
    reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs));
    assertEquals(6, reader.getNumberOfRows());
    assertEquals(6L, updater.getStats().getRowCount());
    assertEquals(false, fs.exists(sidePath));
}
Also used : Path(org.apache.hadoop.fs.Path) ObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector) Configuration(org.apache.hadoop.conf.Configuration) DataInputStream(java.io.DataInputStream) AcidOutputFormat(org.apache.hadoop.hive.ql.io.AcidOutputFormat) FileSystem(org.apache.hadoop.fs.FileSystem) RecordUpdater(org.apache.hadoop.hive.ql.io.RecordUpdater) Test(org.junit.Test)

Aggregations

StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)73 Test (org.junit.Test)64 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)60 Configuration (org.apache.hadoop.conf.Configuration)25 StringObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector)25 InputSplit (org.apache.hadoop.mapred.InputSplit)25 BinaryObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.BinaryObjectInspector)24 MapObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector)23 Properties (java.util.Properties)20 IntObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector)20 ListObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector)18 BooleanObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.BooleanObjectInspector)18 ByteObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector)18 DoubleObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector)18 FloatObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector)18 HiveDecimalObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveDecimalObjectInspector)18 LongObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector)18 ShortObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector)18 TimestampObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.primitive.TimestampObjectInspector)18 RecordWriter (org.apache.hadoop.mapred.RecordWriter)18