use of org.apache.hadoop.hive.ql.io.AcidDirectory in project hive by apache.
the class TestOrcRawRecordMerger method testEmpty.
@Test
public void testEmpty() throws Exception {
final int BUCKET = 0;
Configuration conf = new Configuration();
OrcOutputFormat of = new OrcOutputFormat();
FileSystem fs = FileSystem.getLocal(conf);
Path root = new Path(tmpDir, "testEmpty").makeQualified(fs);
fs.delete(root, true);
ObjectInspector inspector;
synchronized (TestOrcFile.class) {
inspector = ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
}
// write the empty base
AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).inspector(inspector).bucket(BUCKET).writingBase(true).maximumWriteId(100).finalDestination(root);
of.getRecordUpdater(root, options).close(false);
conf.set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList(new long[0], new BitSet(), 1000, Long.MAX_VALUE).writeToString());
ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testEmpty:200:" + Long.MAX_VALUE);
AcidDirectory directory = AcidUtils.getAcidState(fs, root, conf, writeIdList, null, false);
Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(), BUCKET);
Reader baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf));
conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty());
conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty());
HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TRANSACTIONAL_TABLE_SCAN, true);
OrcRawRecordMerger merger = new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET, createMaximalTxnList(), new Reader.Options(), AcidUtils.getPaths(directory.getCurrentDirectories()), new OrcRawRecordMerger.Options().isCompacting(false));
RecordIdentifier key = merger.createKey();
OrcStruct value = merger.createValue();
assertEquals(false, merger.next(key, value));
}
use of org.apache.hadoop.hive.ql.io.AcidDirectory in project hive by apache.
the class TestOrcRawRecordMerger method testNewBaseAndDelta.
private void testNewBaseAndDelta(boolean use130Format) throws Exception {
final int BUCKET = 10;
String[] values = new String[] { "first", "second", "third", "fourth", "fifth", "sixth", "seventh", "eighth", "ninth", "tenth" };
Configuration conf = new Configuration();
OrcOutputFormat of = new OrcOutputFormat();
FileSystem fs = FileSystem.getLocal(conf);
Path root = new Path(tmpDir, "testNewBaseAndDelta").makeQualified(fs);
fs.delete(root, true);
ObjectInspector inspector;
synchronized (TestOrcFile.class) {
inspector = ObjectInspectorFactory.getReflectionObjectInspector(MyRow.class, ObjectInspectorFactory.ObjectInspectorOptions.JAVA);
}
// write the base
AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf).inspector(inspector).bucket(BUCKET).finalDestination(root);
final int BUCKET_PROPERTY = BucketCodec.V1.encode(options);
if (!use130Format) {
options.statementId(-1);
}
RecordUpdater ru = of.getRecordUpdater(root, options.writingBase(true).maximumWriteId(100));
for (String v : values) {
ru.insert(0, new MyRow(v));
}
ru.close(false);
// write a delta
ru = of.getRecordUpdater(root, options.writingBase(false).minimumWriteId(200).maximumWriteId(200).recordIdColumn(1));
ru.update(200, new MyRow("update 1", 0, 0, BUCKET_PROPERTY));
ru.update(200, new MyRow("update 2", 2, 0, BUCKET_PROPERTY));
ru.update(200, new MyRow("update 3", 3, 0, BUCKET_PROPERTY));
ru.delete(200, new MyRow("", 7, 0, BUCKET_PROPERTY));
ru.delete(200, new MyRow("", 8, 0, BUCKET_PROPERTY));
ru.close(false);
conf.set(ValidTxnList.VALID_TXNS_KEY, new ValidReadTxnList(new long[0], new BitSet(), 1000, Long.MAX_VALUE).writeToString());
ValidWriteIdList writeIdList = new ValidReaderWriteIdList("testNewBaseAndDelta:200:" + Long.MAX_VALUE);
AcidDirectory directory = AcidUtils.getAcidState(fs, root, conf, writeIdList, null, use130Format);
assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
assertEquals(new Path(root, use130Format ? AcidUtils.deleteDeltaSubdir(200, 200, 0) : AcidUtils.deleteDeltaSubdir(200, 200)), directory.getCurrentDirectories().get(0).getPath());
assertEquals(new Path(root, use130Format ? AcidUtils.deltaSubdir(200, 200, 0) : AcidUtils.deltaSubdir(200, 200)), directory.getCurrentDirectories().get(1).getPath());
Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(), BUCKET);
Path deltaPath = AcidUtils.createBucketFile(directory.getCurrentDirectories().get(1).getPath(), BUCKET);
Path deleteDeltaDir = directory.getCurrentDirectories().get(0).getPath();
conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, MyRow.getColumnNamesProperty());
conf.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, MyRow.getColumnTypesProperty());
AcidUtils.setAcidOperationalProperties(conf, true, null);
conf.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
// the first "split" is for base/
Reader baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf));
OrcRawRecordMerger merger = new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET, createMaximalTxnList(), new Reader.Options(), new Path[] { deleteDeltaDir }, new OrcRawRecordMerger.Options().isCompacting(false));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
RecordIdentifier id = merger.createKey();
OrcStruct event = merger.createValue();
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 1, 0), id);
assertEquals("second", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 4, 0), id);
assertEquals("fifth", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 5, 0), id);
assertEquals("sixth", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 6, 0), id);
assertEquals("seventh", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 9, 0), id);
assertEquals("tenth", getValue(event));
assertEquals(false, merger.next(id, event));
merger.close();
// second "split" is delta_200_200
baseReader = OrcFile.createReader(deltaPath, OrcFile.readerOptions(conf));
merger = new OrcRawRecordMerger(conf, true, baseReader, false, BUCKET, createMaximalTxnList(), new Reader.Options(), new Path[] { deleteDeltaDir }, new OrcRawRecordMerger.Options().isCompacting(false));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 0, 200), id);
assertEquals("update 1", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 1, 200), id);
assertEquals("update 2", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 2, 200), id);
assertEquals("update 3", getValue(event));
assertEquals(false, merger.next(id, event));
merger.close();
// now run as if it's a minor Compaction so we don't collapse events
// here there is only 1 "split" since we only have data for 1 bucket
merger = new OrcRawRecordMerger(conf, false, null, false, BUCKET, createMaximalTxnList(), new Reader.Options(), AcidUtils.getPaths(directory.getCurrentDirectories()), new OrcRawRecordMerger.Options().isCompacting(true));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
assertEquals(true, merger.next(id, event));
// minor comp, so we ignore 'base_0000100' files so all Deletes end up first since
// they all modify primordial rows
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
// data from delta_200_200
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 0, 200), id);
assertEquals("update 1", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 1, 200), id);
assertEquals("update 2", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 2, 200), id);
assertEquals("update 3", getValue(event));
assertEquals(false, merger.next(id, event));
merger.close();
// now run as if it's a major Compaction so we collapse events
// here there is only 1 "split" since we only have data for 1 bucket
baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf));
merger = new OrcRawRecordMerger(conf, true, null, false, BUCKET, createMaximalTxnList(), new Reader.Options(), AcidUtils.getPaths(directory.getCurrentDirectories()), new OrcRawRecordMerger.Options().isCompacting(true).isMajorCompaction(true).baseDir(new Path(root, "base_0000100")));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 0, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 1, 0), id);
assertEquals("second", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 2, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 3, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 4, 0), id);
assertEquals("fifth", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 5, 0), id);
assertEquals("sixth", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 6, 0), id);
assertEquals("seventh", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 7, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.DELETE_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 8, 200), id);
assertNull(OrcRecordUpdater.getRow(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, 9, 0), id);
assertEquals("tenth", getValue(event));
// data from delta_200_200
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 0, 200), id);
assertEquals("update 1", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 1, 200), id);
assertEquals("update 2", getValue(event));
assertEquals(true, merger.next(id, event));
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(200, BUCKET_PROPERTY, 2, 200), id);
assertEquals("update 3", getValue(event));
assertEquals(false, merger.next(id, event));
merger.close();
// try ignoring the 200 transaction and make sure it works still
ValidWriteIdList writeIds = new ValidReaderWriteIdList("testNewBaseAndDelta:2000:200:200");
// again 1st split is for base/
baseReader = OrcFile.createReader(basePath, OrcFile.readerOptions(conf));
merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, writeIds, new Reader.Options(), new Path[] { deleteDeltaDir }, new OrcRawRecordMerger.Options().isCompacting(false));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
for (int i = 0; i < values.length; ++i) {
assertEquals(true, merger.next(id, event));
LOG.info("id = " + id + "event = " + event);
assertEquals(OrcRecordUpdater.INSERT_OPERATION, OrcRecordUpdater.getOperation(event));
assertEquals(new ReaderKey(0, BUCKET_PROPERTY, i, 0), id);
assertEquals(values[i], getValue(event));
}
assertEquals(false, merger.next(id, event));
merger.close();
// 2nd split is for delta_200_200 which is filtered out entirely by "txns"
baseReader = OrcFile.createReader(deltaPath, OrcFile.readerOptions(conf));
merger = new OrcRawRecordMerger(conf, false, baseReader, false, BUCKET, writeIds, new Reader.Options(), new Path[] { deleteDeltaDir }, new OrcRawRecordMerger.Options().isCompacting(false));
assertEquals(null, merger.getMinKey());
assertEquals(null, merger.getMaxKey());
assertEquals(false, merger.next(id, event));
merger.close();
}
use of org.apache.hadoop.hive.ql.io.AcidDirectory in project hive by apache.
the class TestCrudCompactorOnTez method testLlapCacheOffDuringCompaction.
/**
* Tests whether hive.llap.io.etl.skip.format config is handled properly whenever QueryCompactor#runCompactionQueries
* is invoked.
* @throws Exception
*/
@Test
public void testLlapCacheOffDuringCompaction() throws Exception {
// Setup
QueryCompactor qc = new QueryCompactor() {
@Override
void runCompaction(HiveConf hiveConf, Table table, Partition partition, StorageDescriptor storageDescriptor, ValidWriteIdList writeIds, CompactionInfo compactionInfo, AcidDirectory dir) throws IOException {
}
@Override
protected void commitCompaction(String dest, String tmpTableName, HiveConf conf, ValidWriteIdList actualWriteIds, long compactorTxnId) throws IOException, HiveException {
}
};
StorageDescriptor sdMock = mock(StorageDescriptor.class);
doAnswer(invocationOnMock -> {
return null;
}).when(sdMock).getLocation();
CompactionInfo ciMock = mock(CompactionInfo.class);
ciMock.runAs = "hive";
List<String> emptyQueries = new ArrayList<>();
HiveConf hiveConf = new HiveConf();
hiveConf.set(ValidTxnList.VALID_TXNS_KEY, "8:9223372036854775807::");
// Check for default case.
qc.runCompactionQueries(hiveConf, null, sdMock, null, ciMock, null, emptyQueries, emptyQueries, emptyQueries);
Assert.assertEquals("all", hiveConf.getVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT));
// Check for case where hive.llap.io.etl.skip.format is explicitly set to none - as to always use cache.
hiveConf.setVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT, "none");
qc.runCompactionQueries(hiveConf, null, sdMock, null, ciMock, null, emptyQueries, emptyQueries, emptyQueries);
Assert.assertEquals("none", hiveConf.getVar(HiveConf.ConfVars.LLAP_IO_ETL_SKIP_FORMAT));
}
use of org.apache.hadoop.hive.ql.io.AcidDirectory in project hive by apache.
the class TestStreaming method testInterleavedTransactionBatchCommits.
@Test
public void testInterleavedTransactionBatchCommits() throws Exception {
StrictDelimitedInputWriter writer = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
HiveStreamingConnection connection = HiveStreamingConnection.newBuilder().withDatabase(dbName).withTable(tblName).withStaticPartitionValues(partitionVals).withAgentInfo("UT_" + Thread.currentThread().getName()).withRecordWriter(writer).withHiveConf(conf).withTransactionBatchSize(10).connect();
// Acquire 1st Txn Batch
connection.beginTransaction();
// Acquire 2nd Txn Batch
StrictDelimitedInputWriter writer2 = StrictDelimitedInputWriter.newBuilder().withFieldDelimiter(',').build();
HiveStreamingConnection connection2 = HiveStreamingConnection.newBuilder().withDatabase(dbName).withTable(tblName).withStaticPartitionValues(partitionVals).withAgentInfo("UT_" + Thread.currentThread().getName()).withRecordWriter(writer2).withHiveConf(conf).withTransactionBatchSize(10).connect();
connection2.beginTransaction();
// Interleaved writes to both batches
connection.write("1,Hello streaming".getBytes());
connection2.write("3,Hello streaming - once again".getBytes());
checkNothingWritten(partLoc);
connection2.commitTransaction();
String validationQuery = "select id, msg from " + dbName + "." + tblName + " order by id, msg";
checkDataWritten2(partLoc, 11, 20, 1, validationQuery, true, "3\tHello streaming - once again");
connection.commitTransaction();
/*now both batches have committed (but not closed) so we for each primary file we expect a side
file to exist and indicate the true length of primary file*/
FileSystem fs = partLoc.getFileSystem(conf);
AcidDirectory dir = AcidUtils.getAcidState(fs, partLoc, conf, getTransactionContext(conf), null, false);
for (AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) {
for (FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) {
Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath());
Assert.assertTrue(lengthFile + " missing", fs.exists(lengthFile));
long lengthFileSize = fs.getFileStatus(lengthFile).getLen();
Assert.assertTrue("Expected " + lengthFile + " to be non empty. lengh=" + lengthFileSize, lengthFileSize > 0);
long logicalLength = AcidUtils.getLogicalLength(fs, stat);
long actualLength = stat.getLen();
Assert.assertTrue("", logicalLength == actualLength);
}
}
checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "3\tHello streaming - once again");
connection.beginTransaction();
connection.write("2,Welcome to streaming".getBytes());
connection2.beginTransaction();
connection2.write("4,Welcome to streaming - once again".getBytes());
// here each batch has written data and committed (to bucket0 since table only has 1 bucket)
// so each of 2 deltas has 1 bucket0 and 1 bucket0_flush_length. Furthermore, each bucket0
// has now received more data(logically - it's buffered) but it is not yet committed.
// lets check that side files exist, etc
dir = AcidUtils.getAcidState(fs, partLoc, conf, getTransactionContext(conf), null, false);
for (AcidUtils.ParsedDelta pd : dir.getCurrentDirectories()) {
for (FileStatus stat : fs.listStatus(pd.getPath(), AcidUtils.bucketFileFilter)) {
Path lengthFile = OrcAcidUtils.getSideFile(stat.getPath());
Assert.assertTrue(lengthFile + " missing", fs.exists(lengthFile));
long lengthFileSize = fs.getFileStatus(lengthFile).getLen();
Assert.assertTrue("Expected " + lengthFile + " to be non empty. lengh=" + lengthFileSize, lengthFileSize > 0);
long logicalLength = AcidUtils.getLogicalLength(fs, stat);
long actualLength = stat.getLen();
Assert.assertTrue("", logicalLength <= actualLength);
}
}
checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "3\tHello streaming - once again");
connection.commitTransaction();
checkDataWritten2(partLoc, 1, 20, 2, validationQuery, false, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again");
connection2.commitTransaction();
checkDataWritten2(partLoc, 1, 20, 2, validationQuery, true, "1\tHello streaming", "2\tWelcome to streaming", "3\tHello streaming - once again", "4\tWelcome to streaming - once again");
Assert.assertEquals(HiveStreamingConnection.TxnState.COMMITTED, connection.getCurrentTransactionState());
Assert.assertEquals(HiveStreamingConnection.TxnState.COMMITTED, connection2.getCurrentTransactionState());
connection.close();
connection2.close();
}
use of org.apache.hadoop.hive.ql.io.AcidDirectory in project hive by apache.
the class TestStreaming method checkDataWritten.
/**
* @deprecated use {@link #checkDataWritten2(Path, long, long, int, String, boolean, String...)} -
* there is little value in using InputFormat directly
*/
@Deprecated
private void checkDataWritten(Path partitionPath, long minTxn, long maxTxn, int buckets, int numExpectedFiles, String... records) throws Exception {
ValidWriteIdList writeIds = getTransactionContext(conf);
AcidDirectory dir = AcidUtils.getAcidState(null, partitionPath, conf, writeIds, null, false);
Assert.assertEquals(0, dir.getObsolete().size());
Assert.assertEquals(0, dir.getOriginalFiles().size());
List<AcidUtils.ParsedDelta> current = dir.getCurrentDirectories();
System.out.println("Files found: ");
for (AcidUtils.ParsedDelta pd : current) {
System.out.println(pd.getPath().toString());
}
Assert.assertEquals(numExpectedFiles, current.size());
// find the absolute minimum transaction
long min = Long.MAX_VALUE;
long max = Long.MIN_VALUE;
for (AcidUtils.ParsedDelta pd : current) {
if (pd.getMaxWriteId() > max) {
max = pd.getMaxWriteId();
}
if (pd.getMinWriteId() < min) {
min = pd.getMinWriteId();
}
}
// We are doing +1, as DDL operation will also advance the write Id now.
Assert.assertEquals(minTxn + 1, min);
Assert.assertEquals(maxTxn + 1, max);
InputFormat inf = new OrcInputFormat();
JobConf job = new JobConf();
job.set("mapred.input.dir", partitionPath.toString());
job.set(BUCKET_COUNT, Integer.toString(buckets));
job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, "id,msg");
job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, "bigint:string");
AcidUtils.setAcidOperationalProperties(job, true, null);
job.setBoolean(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, true);
job.set(ValidWriteIdList.VALID_WRITEIDS_KEY, writeIds.writeToString());
job.set(ValidTxnList.VALID_TXNS_KEY, conf.get(ValidTxnList.VALID_TXNS_KEY));
InputSplit[] splits = inf.getSplits(job, buckets);
Assert.assertEquals(numExpectedFiles, splits.length);
org.apache.hadoop.mapred.RecordReader<NullWritable, OrcStruct> rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
NullWritable key = rr.createKey();
OrcStruct value = rr.createValue();
for (String record : records) {
Assert.assertEquals(true, rr.next(key, value));
Assert.assertEquals(record, value.toString());
}
Assert.assertEquals(false, rr.next(key, value));
}
Aggregations