use of io.prestosql.spi.HostAddress in project hetu-core by openlookeng.
the class TestQuery method testHBaseRecordSetGetFiltersFromDomains.
/**
* testHBaseRecordSetGetFiltersFromDomains
*/
@Test
public void testHBaseRecordSetGetFiltersFromDomains() {
List<HBaseColumnHandle> list = new ArrayList<>();
list.add(TestUtils.createHBaseColumnRowId("rowkey"));
HBaseTableHandle tableHandle = new HBaseTableHandle("hbase", "test_table", "rowkey", false, "io.hetu.core.plugin.hbase.utils.serializers.StringRowSerializer", Optional.of("test_table"), "", null, list, 0, OptionalLong.empty());
// case ABOVE
Map<Integer, List<Range>> ranges = new HashMap<>();
long startRow = 1;
long endRow = 12345678;
List<Range> range = new ArrayList<>();
range.add(new Range(Marker.above(BIGINT, startRow), Marker.below(BIGINT, endRow)));
ranges.put(0, range);
HBaseSplit hBasesplit = new HBaseSplit("rowkey", tableHandle, new ArrayList<HostAddress>(1), "1", "12345678", ranges, -1, false, null);
HBaseRecordSet rSet = new HBaseRecordSet(hconn, session, hBasesplit, tableHandle, list);
rSet.getFiltersFromDomains(ranges);
// List<Range> is null
ranges.clear();
ranges.put(0, null);
rSet.getFiltersFromDomains(ranges);
// case EXACTLY
range.clear();
ranges.clear();
long exactly = 12345678;
range.add(Range.equal(BIGINT, exactly));
ranges.put(0, range);
rSet.getFiltersFromDomains(ranges);
// other case
range.add(Range.range(BIGINT, exactly, true, exactly * 2, true));
ranges.put(1, range);
list.add(TestUtils.createHBaseColumnHandle("a", "f_a", "q_a", 1));
HBaseRecordSet rSet2Column = new HBaseRecordSet(hconn, session, hBasesplit, tableHandle, list);
rSet2Column.getFiltersFromDomains(ranges);
}
use of io.prestosql.spi.HostAddress in project hetu-core by openlookeng.
the class TestHiveSplitSource method testGroupSmallSplitConfigSetMaxSmallSplitsGrouped.
@Test
public void testGroupSmallSplitConfigSetMaxSmallSplitsGrouped() {
// testing setMaxSmallSplitsGrouped, need to 30 splits
HiveConfig hiveConfig = new HiveConfig();
hiveConfig.setMaxSplitsToGroup(30);
HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce(HiveTestUtils.SESSION, "database", "table", 10, 10, new DataSize(1, MEGABYTE), Integer.MAX_VALUE, new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat(), null, null, null, hiveConfig, HiveStorageFormat.ORC);
for (int i = 0; i < 90; i++) {
List<HostAddress> hostAddress = new ArrayList<>();
hostAddress.add(new HostAddress("vm1", 1));
hiveSplitSource.addToQueue(new TestSplit(i, hostAddress));
}
List<ConnectorSplit> connectorSplits = getSplits(hiveSplitSource, 100);
List<ConnectorSplit> groupedConnectorSplits = hiveSplitSource.groupSmallSplits(connectorSplits, 1);
assertEquals(groupedConnectorSplits.size(), 3);
List<HiveSplitWrapper> hiveSplitWrappers = new ArrayList<>();
groupedConnectorSplits.forEach(pendingSplit -> hiveSplitWrappers.add((HiveSplitWrapper) pendingSplit));
for (int i = 0; i < 3; i++) {
assertEquals(hiveSplitWrappers.get(i).getSplits().size(), 30);
}
}
use of io.prestosql.spi.HostAddress in project hetu-core by openlookeng.
the class TestHiveSplitSource method testGroupSmallSplitReplicationFactor1.
@Test
public void testGroupSmallSplitReplicationFactor1() {
HiveConfig hiveConfig = new HiveConfig();
hiveConfig.setMaxSplitsToGroup(10);
// ReplicationFactor 1 & all splits have same location
HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce(HiveTestUtils.SESSION, "database", "table", 10, 10, new DataSize(1, MEGABYTE), Integer.MAX_VALUE, new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat(), null, null, null, hiveConfig, HiveStorageFormat.ORC);
List<HostAddress> hostAddress = new ArrayList<>();
hostAddress.add(new HostAddress("vm1", 1));
for (int i = 0; i < 30; i++) {
hiveSplitSource.addToQueue(new TestSplit(i, hostAddress));
assertEquals(hiveSplitSource.getBufferedInternalSplitCount(), i + 1);
}
List<ConnectorSplit> connectorSplits = getSplits(hiveSplitSource, 100);
List<ConnectorSplit> groupedConnectorSplits = hiveSplitSource.groupSmallSplits(connectorSplits, 1);
assertEquals(groupedConnectorSplits.size(), 3);
List<HiveSplitWrapper> hiveSplitWrappers = new ArrayList<>();
groupedConnectorSplits.forEach(pendingSplit -> hiveSplitWrappers.add((HiveSplitWrapper) pendingSplit));
assertEquals(hiveSplitWrappers.get(0).getSplits().size(), 10);
assertEquals(hiveSplitWrappers.get(1).getSplits().size(), 10);
assertEquals(hiveSplitWrappers.get(2).getSplits().size(), 10);
}
use of io.prestosql.spi.HostAddress in project hetu-core by openlookeng.
the class TestHiveSplitSource method testGroupSmallSplitAllBigSizeFiles.
@Test
public void testGroupSmallSplitAllBigSizeFiles() {
// alternative big and small size total 100 files
HiveConfig hiveConfig = new HiveConfig();
hiveConfig.setMaxSplitsToGroup(100);
HiveSplitSource hiveSplitSource = HiveSplitSource.allAtOnce(HiveTestUtils.SESSION, "database", "table", 10, 10, new DataSize(1, MEGABYTE), Integer.MAX_VALUE, new TestingHiveSplitLoader(), Executors.newFixedThreadPool(5), new CounterStat(), null, null, null, hiveConfig, HiveStorageFormat.ORC);
for (int i = 0; i < 100; i++) {
List<HostAddress> hostAddress = new ArrayList<>();
hostAddress.add(new HostAddress("vm1", 1));
hiveSplitSource.addToQueue(new TestSplit(i, OptionalInt.empty(), 67108864, hostAddress));
}
List<ConnectorSplit> connectorSplits = getSplits(hiveSplitSource, 100);
List<ConnectorSplit> groupedConnectorSplits = hiveSplitSource.groupSmallSplits(connectorSplits, 1);
assertEquals(groupedConnectorSplits.size(), 100);
List<HiveSplitWrapper> hiveSplitWrappers = new ArrayList<>();
groupedConnectorSplits.forEach(pendingSplit -> hiveSplitWrappers.add((HiveSplitWrapper) pendingSplit));
System.out.println("hiveSplitWrappers.get(i).getSplits().size() " + groupedConnectorSplits.size());
for (int i = 0; i < groupedConnectorSplits.size(); i++) {
assertEquals(hiveSplitWrappers.get(i).getSplits().size(), 1);
}
}
use of io.prestosql.spi.HostAddress in project hetu-core by openlookeng.
the class Failures method toFailure.
private static ExecutionFailureInfo toFailure(Throwable throwable, Set<Throwable> seenFailures) {
if (throwable == null) {
return null;
}
String type;
HostAddress remoteHost = null;
SemanticErrorCode semanticErrorCode = null;
if (throwable instanceof Failure) {
type = ((Failure) throwable).getType();
} else {
Class<?> clazz = throwable.getClass();
type = firstNonNull(clazz.getCanonicalName(), clazz.getName());
}
if (throwable instanceof PrestoTransportException) {
remoteHost = ((PrestoTransportException) throwable).getRemoteHost();
}
if (throwable instanceof SemanticException) {
semanticErrorCode = ((SemanticException) throwable).getCode();
}
if (seenFailures.contains(throwable)) {
return new ExecutionFailureInfo(type, "[cyclic] " + throwable.getMessage(), null, ImmutableList.of(), ImmutableList.of(), null, GENERIC_INTERNAL_ERROR.toErrorCode(), Optional.ofNullable(semanticErrorCode), remoteHost);
}
seenFailures.add(throwable);
ExecutionFailureInfo cause = toFailure(throwable.getCause(), seenFailures);
ErrorCode errorCode = toErrorCode(throwable);
if (errorCode == null) {
if (cause == null) {
errorCode = GENERIC_INTERNAL_ERROR.toErrorCode();
} else {
errorCode = cause.getErrorCode();
}
}
return new ExecutionFailureInfo(type, throwable.getMessage(), cause, Arrays.stream(throwable.getSuppressed()).map(failure -> toFailure(failure, seenFailures)).collect(toImmutableList()), Lists.transform(asList(throwable.getStackTrace()), toStringFunction()), getErrorLocation(throwable), errorCode, Optional.ofNullable(semanticErrorCode), remoteHost);
}
Aggregations