Search in sources :

Example 1 with OrcInputFormat

use of org.apache.hadoop.hive.ql.io.orc.OrcInputFormat in project hive by apache.

the class StreamingAssert method readRecords.

List<Record> readRecords() throws Exception {
    if (currentDeltas.isEmpty()) {
        throw new AssertionError("No data");
    }
    InputFormat<NullWritable, OrcStruct> inputFormat = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.input.dir", partitionLocation.toString());
    job.set("bucket_count", Integer.toString(table.getSd().getNumBuckets()));
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
    job.set(ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN.varname, "true");
    job.set(ValidTxnList.VALID_TXNS_KEY, txns.toString());
    InputSplit[] splits = inputFormat.getSplits(job, 1);
    assertEquals(1, splits.length);
    final AcidRecordReader<NullWritable, OrcStruct> recordReader = (AcidRecordReader<NullWritable, OrcStruct>) inputFormat.getRecordReader(splits[0], job, Reporter.NULL);
    NullWritable key = recordReader.createKey();
    OrcStruct value = recordReader.createValue();
    List<Record> records = new ArrayList<>();
    while (recordReader.next(key, value)) {
        RecordIdentifier recordIdentifier = recordReader.getRecordIdentifier();
        Record record = new Record(new RecordIdentifier(recordIdentifier.getTransactionId(), recordIdentifier.getBucketId(), recordIdentifier.getRowId()), value.toString());
        System.out.println(record);
        records.add(record);
    }
    recordReader.close();
    return records;
}
Also used : ArrayList(java.util.ArrayList) AcidRecordReader(org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader) NullWritable(org.apache.hadoop.io.NullWritable) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) OrcStruct(org.apache.hadoop.hive.ql.io.orc.OrcStruct) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit)

Example 2 with OrcInputFormat

use of org.apache.hadoop.hive.ql.io.orc.OrcInputFormat in project hive by apache.

the class StreamingAssert method readRecords.

/**
 * TODO: this would be more flexible doing a SQL select statement rather than using InputFormat directly
 * see {@link org.apache.hive.hcatalog.streaming.TestStreaming#checkDataWritten2(Path, long, long, int, String, String...)}
 * @param numSplitsExpected
 * @return
 * @throws Exception
 */
List<Record> readRecords(int numSplitsExpected) throws Exception {
    if (currentDeltas.isEmpty()) {
        throw new AssertionError("No data");
    }
    InputFormat<NullWritable, OrcStruct> inputFormat = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.input.dir", partitionLocation.toString());
    job.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(table.getSd().getNumBuckets()));
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
    AcidUtils.setAcidOperationalProperties(job, true, null);
    job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
    job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString());
    InputSplit[] splits = inputFormat.getSplits(job, 1);
    assertEquals(numSplitsExpected, splits.length);
    List<Record> records = new ArrayList<>();
    for (InputSplit is : splits) {
        final AcidRecordReader<NullWritable, OrcStruct> recordReader = (AcidRecordReader<NullWritable, OrcStruct>) inputFormat.getRecordReader(is, job, Reporter.NULL);
        NullWritable key = recordReader.createKey();
        OrcStruct value = recordReader.createValue();
        while (recordReader.next(key, value)) {
            RecordIdentifier recordIdentifier = recordReader.getRecordIdentifier();
            Record record = new Record(new RecordIdentifier(recordIdentifier.getWriteId(), recordIdentifier.getBucketProperty(), recordIdentifier.getRowId()), value.toString());
            System.out.println(record);
            records.add(record);
        }
        recordReader.close();
    }
    return records;
}
Also used : ArrayList(java.util.ArrayList) AcidRecordReader(org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader) NullWritable(org.apache.hadoop.io.NullWritable) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) OrcStruct(org.apache.hadoop.hive.ql.io.orc.OrcStruct) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit)

Example 3 with OrcInputFormat

use of org.apache.hadoop.hive.ql.io.orc.OrcInputFormat in project hive by apache.

the class TestCompactor method checkExpectedTxnsPresent.

private void checkExpectedTxnsPresent(Path base, Path[] deltas, String columnNamesProperty, String columnTypesProperty, int bucket, long min, long max, int numBuckets) throws IOException {
    ValidWriteIdList writeIdList = new ValidWriteIdList() {

        @Override
        public String getTableName() {
            return "AcidTable";
        }

        @Override
        public boolean isWriteIdValid(long writeid) {
            return true;
        }

        @Override
        public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) {
            return RangeResponse.ALL;
        }

        @Override
        public String writeToString() {
            return "";
        }

        @Override
        public void readFromString(String src) {
        }

        @Override
        public Long getMinOpenWriteId() {
            return null;
        }

        @Override
        public long getHighWatermark() {
            return Long.MAX_VALUE;
        }

        @Override
        public long[] getInvalidWriteIds() {
            return new long[0];
        }

        @Override
        public boolean isValidBase(long writeid) {
            return true;
        }

        @Override
        public boolean isWriteIdAborted(long writeid) {
            return true;
        }

        @Override
        public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) {
            return RangeResponse.ALL;
        }
    };
    OrcInputFormat aif = new OrcInputFormat();
    Configuration conf = new Configuration();
    conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty);
    conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty);
    conf.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(numBuckets));
    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true);
    AcidInputFormat.RawReader<OrcStruct> reader = aif.getRawReader(conf, true, bucket, writeIdList, base, deltas);
    RecordIdentifier identifier = reader.createKey();
    OrcStruct value = reader.createValue();
    long currentTxn = min;
    boolean seenCurrentTxn = false;
    while (reader.next(identifier, value)) {
        if (!seenCurrentTxn) {
            Assert.assertEquals(currentTxn, identifier.getWriteId());
            seenCurrentTxn = true;
        }
        if (currentTxn != identifier.getWriteId()) {
            Assert.assertEquals(currentTxn + 1, identifier.getWriteId());
            currentTxn++;
        }
    }
    Assert.assertEquals(max, currentTxn);
}
Also used : AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) OrcStruct(org.apache.hadoop.hive.ql.io.orc.OrcStruct) Configuration(org.apache.hadoop.conf.Configuration) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList)

Example 4 with OrcInputFormat

use of org.apache.hadoop.hive.ql.io.orc.OrcInputFormat in project hive by apache.

the class TestStreaming method checkDataWritten.

/**
 * @deprecated use {@link #checkDataWritten2(Path, long, long, int, String, boolean, String...)} -
 * there is little value in using InputFormat directly
 */
@Deprecated
private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int buckets, int numExpectedFiles, String... records) throws Exception {
    ValidWriteIdList writeIds = msClient.getValidWriteIds(AcidUtils.getFullTableName(dbName, tblName));
    AcidUtils.Directory dir = AcidUtils.getAcidState(partitionPath, conf, writeIds);
    Assert.assertEquals(0, dir.getObsolete().size());
    Assert.assertEquals(0, dir.getOriginalFiles().size());
    List<AcidUtils.ParsedDelta> current = dir.getCurrentDirectories();
    System.out.println("Files found: ");
    for (AcidUtils.ParsedDelta pd : current) {
        System.out.println(pd.getPath().toString());
    }
    Assert.assertEquals(numExpectedFiles, current.size());
    // find the absolute minimum transaction
    long min = Long.MAX_VALUE;
    long max = Long.MIN_VALUE;
    for (AcidUtils.ParsedDelta pd : current) {
        if (pd.getMaxWriteId() > max) {
            max = pd.getMaxWriteId();
        }
        if (pd.getMinWriteId() < min) {
            min = pd.getMinWriteId();
        }
    }
    Assert.assertEquals(minTxn, min);
    Assert.assertEquals(maxTxn, max);
    InputFormat inf = new OrcInputFormat();
    JobConf job = new JobConf();
    job.set("mapred.input.dir", partitionPath.toString());
    job.set(BUCKET_COUNT, Integer.toString(buckets));
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
    job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
    AcidUtils.setAcidOperationalProperties(job, true, null);
    job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
    job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.toString());
    InputSplit[] splits = inf.getSplits(job, buckets);
    Assert.assertEquals(numExpectedFiles, splits.length);
    org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
    NullWritable key = rr.createKey();
    OrcStruct value = rr.createValue();
    for (String record : records) {
        Assert.assertEquals(true, rr.next(key, value));
        Assert.assertEquals(record, value.toString());
    }
    Assert.assertEquals(false, rr.next(key, value));
}
Also used : NullWritable(org.apache.hadoop.io.NullWritable) OrcStruct(org.apache.hadoop.hive.ql.io.orc.OrcStruct) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) InputFormat(org.apache.hadoop.mapred.InputFormat) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapred.InputSplit) OrcAcidUtils(org.apache.orc.impl.OrcAcidUtils) AcidUtils(org.apache.hadoop.hive.ql.io.AcidUtils)

Example 5 with OrcInputFormat

use of org.apache.hadoop.hive.ql.io.orc.OrcInputFormat in project hive by apache.

the class CompactorTestUtil method checkExpectedTxnsPresent.

static void checkExpectedTxnsPresent(Path base, Path[] deltas, String columnNamesProperty, String columnTypesProperty, int bucket, long min, long max, List<Integer> invaliWriteIDs, int numBuckets) throws IOException {
    ValidWriteIdList writeIdList = new ValidWriteIdList() {

        @Override
        public String getTableName() {
            return "AcidTable";
        }

        @Override
        public boolean isWriteIdValid(long writeid) {
            return true;
        }

        @Override
        public RangeResponse isWriteIdRangeValid(long minWriteId, long maxWriteId) {
            return RangeResponse.ALL;
        }

        @Override
        public String writeToString() {
            return "";
        }

        @Override
        public void readFromString(String src) {
        }

        @Override
        public Long getMinOpenWriteId() {
            return null;
        }

        @Override
        public long getHighWatermark() {
            return Long.MAX_VALUE;
        }

        @Override
        public long[] getInvalidWriteIds() {
            return new long[0];
        }

        @Override
        public boolean isValidBase(long writeid) {
            return true;
        }

        @Override
        public boolean isWriteIdAborted(long writeid) {
            return true;
        }

        @Override
        public RangeResponse isWriteIdRangeAborted(long minWriteId, long maxWriteId) {
            return RangeResponse.ALL;
        }
    };
    OrcInputFormat aif = new OrcInputFormat();
    Configuration conf = new Configuration();
    conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, columnNamesProperty);
    conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, columnTypesProperty);
    conf.set(hive_metastoreConstants.BUCKET_COUNT, Integer.toString(numBuckets));
    conf.setBoolean("orc.schema.evolution.case.sensitive", false);
    HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true);
    AcidInputFormat.RawReader<OrcStruct> reader = aif.getRawReader(conf, true, bucket, writeIdList, base, deltas, new HashMap<String, Integer>());
    RecordIdentifier identifier = reader.createKey();
    OrcStruct value = reader.createValue();
    long currentTxn = min;
    boolean seenCurrentTxn = false;
    while (reader.next(identifier, value)) {
        if (!seenCurrentTxn) {
            Assert.assertEquals(currentTxn, identifier.getWriteId());
            seenCurrentTxn = true;
        }
        if (currentTxn != identifier.getWriteId()) {
            if (invaliWriteIDs != null) {
                Assert.assertFalse(invaliWriteIDs.contains(identifier.getWriteId()));
            }
            currentTxn++;
        }
    }
    Assert.assertEquals(max, currentTxn);
}
Also used : AcidInputFormat(org.apache.hadoop.hive.ql.io.AcidInputFormat) RecordIdentifier(org.apache.hadoop.hive.ql.io.RecordIdentifier) OrcStruct(org.apache.hadoop.hive.ql.io.orc.OrcStruct) Configuration(org.apache.hadoop.conf.Configuration) OrcInputFormat(org.apache.hadoop.hive.ql.io.orc.OrcInputFormat) ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList)

Aggregations

OrcInputFormat (org.apache.hadoop.hive.ql.io.orc.OrcInputFormat)7 OrcStruct (org.apache.hadoop.hive.ql.io.orc.OrcStruct)6 ValidWriteIdList (org.apache.hadoop.hive.common.ValidWriteIdList)4 RecordIdentifier (org.apache.hadoop.hive.ql.io.RecordIdentifier)4 NullWritable (org.apache.hadoop.io.NullWritable)4 InputSplit (org.apache.hadoop.mapred.InputSplit)4 JobConf (org.apache.hadoop.mapred.JobConf)4 ArrayList (java.util.ArrayList)2 Configuration (org.apache.hadoop.conf.Configuration)2 AcidInputFormat (org.apache.hadoop.hive.ql.io.AcidInputFormat)2 AcidRecordReader (org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader)2 AcidUtils (org.apache.hadoop.hive.ql.io.AcidUtils)2 InputFormat (org.apache.hadoop.mapred.InputFormat)2 OrcAcidUtils (org.apache.orc.impl.OrcAcidUtils)2 DataXException (com.alibaba.datax.common.exception.DataXException)1 ColumnEntry (com.alibaba.datax.plugin.unstructuredstorage.reader.ColumnEntry)1 JSONObject (com.alibaba.fastjson.JSONObject)1 IOException (java.io.IOException)1 Path (org.apache.hadoop.fs.Path)1 AcidDirectory (org.apache.hadoop.hive.ql.io.AcidDirectory)1