use of org.apache.drill.common.config.DrillConfig in project drill by apache.
the class TestParquetPhysicalPlan method testParseParquetPhysicalPlan.
@Test
@Ignore
public void testParseParquetPhysicalPlan() throws Exception {
final StringBuilder sb = new StringBuilder();
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
DrillConfig config = DrillConfig.create();
try (Drillbit bit1 = new Drillbit(config, serviceSet);
DrillClient client = new DrillClient(config, serviceSet.getCoordinator())) {
bit1.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Resources.toString(Resources.getResource(fileName), Charsets.UTF_8));
RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
int count = 0;
for (QueryDataBatch b : results) {
sb.append(String.format("Got %d results\n", b.getHeader().getRowCount()));
count += b.getHeader().getRowCount();
loader.load(b.getHeader().getDef(), b.getData());
for (VectorWrapper vw : loader) {
sb.append(vw.getValueVector().getField().getName() + ": ");
ValueVector vv = vw.getValueVector();
for (int i = 0; i < vv.getAccessor().getValueCount(); i++) {
Object o = vv.getAccessor().getObject(i);
if (o instanceof byte[]) {
sb.append(" [" + new String((byte[]) o) + "]");
} else {
sb.append(" [" + vv.getAccessor().getObject(i) + "]");
}
}
sb.append('\n');
}
loader.clear();
b.release();
}
client.close();
sb.append(String.format("Got %d total results\n", count));
}
logger.debug(sb.toString());
}
use of org.apache.drill.common.config.DrillConfig in project drill by apache.
the class TestPauseInjection method timedPauseOnSpecificBit.
@Test
public void timedPauseOnSpecificBit() {
final RemoteServiceSet remoteServiceSet = RemoteServiceSet.getLocalServiceSet();
final ZookeeperHelper zkHelper = new ZookeeperHelper();
zkHelper.startZookeeper(1);
final long pauseDuration = 2000L;
final long expectedDuration = pauseDuration;
try {
// Creating two drillbits
final Drillbit drillbit1, drillbit2;
final DrillConfig drillConfig = zkHelper.getConfig();
try {
drillbit1 = Drillbit.start(drillConfig, remoteServiceSet);
drillbit2 = Drillbit.start(drillConfig, remoteServiceSet);
} catch (final DrillbitStartupException e) {
throw new RuntimeException("Failed to start two drillbits.", e);
}
final DrillbitContext drillbitContext1 = drillbit1.getContext();
final DrillbitContext drillbitContext2 = drillbit2.getContext();
final UserSession session = UserSession.Builder.newBuilder().withCredentials(UserCredentials.newBuilder().setUserName("foo").build()).withUserProperties(UserProperties.getDefaultInstance()).withOptionManager(drillbitContext1.getOptionManager()).build();
final DrillbitEndpoint drillbitEndpoint1 = drillbitContext1.getEndpoint();
final String controls = Controls.newBuilder().addTimedPauseOnBit(DummyClass.class, DummyClass.PAUSES, drillbitEndpoint1, 0, pauseDuration).build();
ControlsInjectionUtil.setControls(session, controls);
{
final ExtendedLatch trigger = new ExtendedLatch(1);
final Pointer<Exception> ex = new Pointer<>();
final QueryContext queryContext = new QueryContext(session, drillbitContext1, QueryId.getDefaultInstance());
// test that the pause happens
final DummyClass dummyClass = new DummyClass(queryContext, trigger);
final long actualDuration = dummyClass.pauses();
assertTrue(String.format("Test should stop for at least %d milliseconds.", expectedDuration), expectedDuration <= actualDuration);
assertNull("No exception should be thrown.", ex.value);
try {
queryContext.close();
} catch (final Exception e) {
fail("Failed to close query context: " + e);
}
}
{
final ExtendedLatch trigger = new ExtendedLatch(1);
final QueryContext queryContext = new QueryContext(session, drillbitContext2, QueryId.getDefaultInstance());
// if the resume did not happen, the test would hang
final DummyClass dummyClass = new DummyClass(queryContext, trigger);
dummyClass.pauses();
try {
queryContext.close();
} catch (final Exception e) {
fail("Failed to close query context: " + e);
}
}
} finally {
zkHelper.stopZookeeper();
}
}
use of org.apache.drill.common.config.DrillConfig in project drill by apache.
the class TestSplitAndTransfer method test.
@Test
public void test() throws Exception {
final DrillConfig drillConfig = DrillConfig.create();
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final MaterializedField field = MaterializedField.create("field", Types.optional(MinorType.VARCHAR));
final NullableVarCharVector varCharVector = new NullableVarCharVector(field, allocator);
varCharVector.allocateNew(10000, 1000);
final int valueCount = 500;
final String[] compareArray = new String[valueCount];
final NullableVarCharVector.Mutator mutator = varCharVector.getMutator();
for (int i = 0; i < valueCount; i += 3) {
final String s = String.format("%010d", i);
mutator.set(i, s.getBytes());
compareArray[i] = s;
}
mutator.setValueCount(valueCount);
final TransferPair tp = varCharVector.getTransferPair(allocator);
final NullableVarCharVector newVarCharVector = (NullableVarCharVector) tp.getTo();
final Accessor accessor = newVarCharVector.getAccessor();
final int[][] startLengths = { { 0, 201 }, { 201, 200 }, { 401, 99 } };
for (final int[] startLength : startLengths) {
final int start = startLength[0];
final int length = startLength[1];
tp.splitAndTransfer(start, length);
newVarCharVector.getMutator().setValueCount(length);
for (int i = 0; i < length; i++) {
final boolean expectedSet = ((start + i) % 3) == 0;
if (expectedSet) {
final byte[] expectedValue = compareArray[start + i].getBytes();
assertFalse(accessor.isNull(i));
assertArrayEquals(expectedValue, accessor.get(i));
} else {
assertTrue(accessor.isNull(i));
}
}
newVarCharVector.clear();
}
varCharVector.close();
allocator.close();
}
use of org.apache.drill.common.config.DrillConfig in project drill by apache.
the class TestSplitAndTransfer method testBitVectorImpl.
public void testBitVectorImpl(int valueCount, final int[][] startLengths, TestBitPattern pattern) throws Exception {
final DrillConfig drillConfig = DrillConfig.create();
final BufferAllocator allocator = RootAllocatorFactory.newRoot(drillConfig);
final MaterializedField field = MaterializedField.create("field", Types.optional(MinorType.BIT));
final BitVector bitVector = new BitVector(field, allocator);
// extra byte at the end that gets filled with junk
bitVector.allocateNew(valueCount + 8);
final int[] compareArray = new int[valueCount];
int testBitValue = 0;
final BitVector.Mutator mutator = bitVector.getMutator();
for (int i = 0; i < valueCount; i++) {
testBitValue = getBit(pattern, i);
mutator.set(i, testBitValue);
compareArray[i] = testBitValue;
}
// off-by-one out-of-bound reads
for (int j = valueCount; j < valueCount + 8; j++) {
// fill with compliment of testBit
mutator.set(j, ~testBitValue);
}
mutator.setValueCount(valueCount);
final TransferPair tp = bitVector.getTransferPair(allocator);
final BitVector newBitVector = (BitVector) tp.getTo();
final BitVector.Accessor accessor = newBitVector.getAccessor();
for (final int[] startLength : startLengths) {
final int start = startLength[0];
final int length = startLength[1];
tp.splitAndTransfer(start, length);
assertEquals(newBitVector.getAccessor().getValueCount(), length);
for (int i = 0; i < length; i++) {
final int expectedValue = compareArray[start + i];
assertEquals(expectedValue, accessor.get(i));
}
newBitVector.clear();
}
bitVector.close();
allocator.close();
}
use of org.apache.drill.common.config.DrillConfig in project drill by apache.
the class TestIcebergTablesMetastoreConfigAndVersion method testLoadWithSameProperties.
@Test
public void testLoadWithSameProperties() {
DrillConfig config = new DrillConfig(baseIcebergConfig(baseLocation.getRoot()).withValue(String.format(COMPONENTS_COMMON_PROPERTIES_PATTERN, TableProperties.SPLIT_SIZE), ConfigValueFactory.fromAnyRef(10)));
Map<String, String> initialProperties = Collections.singletonMap(TableProperties.SPLIT_SIZE, "10");
Metastore initialMetastore = new IcebergMetastore(config);
assertEquals(initialProperties, initialMetastore.tables().metadata().properties());
Metastore newMetastore = new IcebergMetastore(config);
assertEquals(initialProperties, newMetastore.tables().metadata().properties());
}
Aggregations