use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestPartitionRace method testNoRace.
/**
* V3 computes partition depth in the group scan (which sees all files), and
* so the partition column count does not vary across scans. Also, V3 puts
* partition columns at the end of the row so that data columns don't
* "jump around" when files are shifted to a new partition depth.
*/
@Test
public void testNoRace() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
try {
enableMultiScan();
// Loop to run the query 10 times or until we see both files
// in the first position.
boolean sawRootFirst = false;
boolean sawNestedFirst = false;
for (int i = 0; i < 10; i++) {
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
for (int j = 0; j < 2; j++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String col1 = reader.scalar("a").getString();
if (col1.equals("10")) {
if (i == 0) {
sawRootFirst = true;
}
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
if (i == 0) {
sawNestedFirst = true;
}
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
if (sawRootFirst && sawNestedFirst) {
// The following should appear most of the time.
System.out.println("Both variations occurred");
return;
}
}
// If you see this, maybe something got fixed. Or, maybe the
// min parallelization hack above stopped working.
// Or, you were just unlucky and can try the test again.
// We print messages, rather than using assertTrue, to avoid
// introducing a flaky test.
System.out.println("Some variations did not occur");
System.out.println(String.format("Outer first: %s", sawRootFirst));
System.out.println(String.format("Nested first: %s", sawNestedFirst));
} finally {
resetMultiScan();
}
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class RowSetComparison method unorderedVerify.
public void unorderedVerify(RowSet actual) {
compareSchemasAndCounts(actual);
int testLength = getTestLength();
RowSetReader er = expected.reader();
RowSetReader ar = actual.reader();
for (int i = 0; i < offset; i++) {
er.next();
ar.next();
}
final Multiset<List<Object>> expectedSet = HashMultiset.create();
final Multiset<List<Object>> actualSet = HashMultiset.create();
for (int rowCounter = 0; rowCounter < testLength; rowCounter++) {
er.next();
ar.next();
expectedSet.add(buildRow(er));
actualSet.add(buildRow(ar));
}
Assert.assertEquals(expectedSet, actualSet);
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class RowSetComparison method verify.
/**
* Verify the actual rows using the rules defined in this builder
* @param actual the actual results to verify
*/
public void verify(RowSet actual) {
compareSchemasAndCounts(actual);
int testLength = getTestLength();
RowSetReader er = expected.reader();
RowSetReader ar = actual.reader();
for (int i = 0; i < offset; i++) {
er.next();
ar.next();
}
for (int i = 0; i < testLength; i++) {
er.next();
ar.next();
String label = Integer.toString(er.logicalIndex() + 1);
verifyRow(label, er, ar);
}
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class JsonFileBuilder method build.
/**
* Writes the configured data to the given file in json format.
* @param tableFile The file to write the json data to.
* @throws IOException
*/
public void build(File tableFile) throws IOException {
tableFile.getParentFile().mkdirs();
try (BufferedOutputStream os = new BufferedOutputStream(new FileOutputStream(tableFile))) {
JsonWriter jsonWriter = new JsonWriter(os, pretty, useExtendedOutput);
final RowSetReader reader = rowSet.reader();
while (reader.next()) {
jsonWriter.writeRow(reader);
}
}
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestResultSetLoaderMapArray method testOverwriteRow.
/**
* Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
* that uses nested columns inside an array of maps. Here we must call
* {@code start()} to reset the array back to the initial start position after
* each "discard."
*/
@Test
public void testOverwriteRow() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing overwrites.
ScalarWriter aWriter = rootWriter.scalar("a");
ArrayWriter maWriter = rootWriter.array("m");
TupleWriter mWriter = maWriter.tuple();
ScalarWriter bWriter = mWriter.scalar("b");
ScalarWriter cWriter = mWriter.scalar("c");
// Write 100,000 rows, overwriting 99% of them. This will cause vector
// overflow and data corruption if overwrite does not work; but will happily
// produce the correct result if everything works as it should.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (count < 10_000) {
rootWriter.start();
count++;
aWriter.setInt(count);
for (int i = 0; i < 10; i++) {
bWriter.setInt(count * 10 + i);
cWriter.setBytes(value, value.length);
maWriter.save();
}
if (count % 100 == 0) {
rootWriter.save();
}
}
// Verify using a reader.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(count / 100, result.rowCount());
RowSetReader reader = result.reader();
ArrayReader maReader = reader.array("m");
TupleReader mReader = maReader.tuple();
int rowId = 1;
while (reader.next()) {
assertEquals(rowId * 100, reader.scalar("a").getInt());
assertEquals(10, maReader.size());
for (int i = 0; i < 10; i++) {
assert (maReader.next());
assertEquals(rowId * 1000 + i, mReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
}
rowId++;
}
result.clear();
rsLoader.close();
}
Aggregations