use of org.apache.drill.exec.store.hive.HiveMetadataProvider.LogicalInputSplit in project drill by axbaretto.
the class HiveScan method getSpecificScan.
@Override
public SubScan getSpecificScan(final int minorFragmentId) throws ExecutionSetupException {
try {
final List<LogicalInputSplit> splits = mappings.get(minorFragmentId);
List<HivePartitionWrapper> parts = Lists.newArrayList();
final List<List<String>> encodedInputSplits = Lists.newArrayList();
final List<String> splitTypes = Lists.newArrayList();
for (final LogicalInputSplit split : splits) {
final Partition splitPartition = split.getPartition();
if (splitPartition != null) {
HiveTableWithColumnCache table = hiveReadEntry.getTable();
parts.add(createPartitionWithSpecColumns(new HiveTableWithColumnCache(table, new ColumnListsCache(table)), splitPartition));
}
encodedInputSplits.add(split.serialize());
splitTypes.add(split.getType());
}
if (parts.size() <= 0) {
parts = null;
}
final HiveReadEntry subEntry = new HiveReadEntry(hiveReadEntry.getTableWrapper(), parts);
return new HiveSubScan(getUserName(), encodedInputSplits, subEntry, splitTypes, columns, hiveStoragePlugin);
} catch (IOException | ReflectiveOperationException e) {
throw new ExecutionSetupException(e);
}
}
use of org.apache.drill.exec.store.hive.HiveMetadataProvider.LogicalInputSplit in project drill by axbaretto.
the class HiveScan method getOperatorAffinity.
@Override
public List<EndpointAffinity> getOperatorAffinity() {
final Map<String, DrillbitEndpoint> endpointMap = new HashMap<>();
for (final DrillbitEndpoint endpoint : hiveStoragePlugin.getContext().getBits()) {
endpointMap.put(endpoint.getAddress(), endpoint);
logger.debug("endpoing address: {}", endpoint.getAddress());
}
final Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<>();
try {
long totalSize = 0;
final List<LogicalInputSplit> inputSplits = getInputSplits();
for (final LogicalInputSplit split : inputSplits) {
totalSize += Math.max(1, split.getLength());
}
for (final LogicalInputSplit split : inputSplits) {
final float affinity = ((float) Math.max(1, split.getLength())) / totalSize;
for (final String loc : split.getLocations()) {
logger.debug("split location: {}", loc);
final DrillbitEndpoint endpoint = endpointMap.get(loc);
if (endpoint != null) {
if (affinityMap.containsKey(endpoint)) {
affinityMap.get(endpoint).addAffinity(affinity);
} else {
affinityMap.put(endpoint, new EndpointAffinity(endpoint, affinity));
}
}
}
}
} catch (final IOException e) {
throw new DrillRuntimeException(e);
}
for (final DrillbitEndpoint ep : affinityMap.keySet()) {
Preconditions.checkNotNull(ep);
}
for (final EndpointAffinity a : affinityMap.values()) {
Preconditions.checkNotNull(a.getEndpoint());
}
return Lists.newArrayList(affinityMap.values());
}
use of org.apache.drill.exec.store.hive.HiveMetadataProvider.LogicalInputSplit in project drill by apache.
the class HiveScan method getOperatorAffinity.
@Override
public List<EndpointAffinity> getOperatorAffinity() {
final Map<String, DrillbitEndpoint> endpointMap = new HashMap<>();
for (final DrillbitEndpoint endpoint : hiveStoragePlugin.getContext().getBits()) {
endpointMap.put(endpoint.getAddress(), endpoint);
logger.debug("Endpoint address: {}", endpoint.getAddress());
}
final Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<>();
try {
long totalSize = 0;
final List<LogicalInputSplit> inputSplits = getInputSplits();
for (final LogicalInputSplit split : inputSplits) {
totalSize += Math.max(1, split.getLength());
}
for (final LogicalInputSplit split : inputSplits) {
final float affinity = ((float) Math.max(1, split.getLength())) / totalSize;
for (final String loc : split.getLocations()) {
logger.debug("Split location: {}", loc);
final DrillbitEndpoint endpoint = endpointMap.get(loc);
if (endpoint != null) {
if (affinityMap.containsKey(endpoint)) {
affinityMap.get(endpoint).addAffinity(affinity);
} else {
affinityMap.put(endpoint, new EndpointAffinity(endpoint, affinity));
}
}
}
}
} catch (final IOException e) {
throw new DrillRuntimeException(e);
}
return new ArrayList<>(affinityMap.values());
}
use of org.apache.drill.exec.store.hive.HiveMetadataProvider.LogicalInputSplit in project drill by apache.
the class HiveScan method getSpecificScan.
@Override
public SubScan getSpecificScan(final int minorFragmentId) throws ExecutionSetupException {
try {
final List<LogicalInputSplit> splits = mappings.get(minorFragmentId);
List<HivePartitionWrapper> parts = new ArrayList<>();
final List<List<String>> encodedInputSplits = new ArrayList<>();
final List<String> splitTypes = new ArrayList<>();
for (final LogicalInputSplit split : splits) {
final Partition splitPartition = split.getPartition();
if (splitPartition != null) {
HiveTableWithColumnCache table = hiveReadEntry.getTable();
parts.add(createPartitionWithSpecColumns(new HiveTableWithColumnCache(table, new ColumnListsCache(table)), splitPartition));
}
encodedInputSplits.add(split.serialize());
splitTypes.add(split.getType());
}
if (parts.size() <= 0) {
parts = null;
}
final HiveReadEntry subEntry = new HiveReadEntry(hiveReadEntry.getTableWrapper(), parts);
return new HiveSubScan(getUserName(), encodedInputSplits, subEntry, splitTypes, columns, hiveStoragePlugin, confProperties);
} catch (IOException | ReflectiveOperationException e) {
throw new ExecutionSetupException(e);
}
}
use of org.apache.drill.exec.store.hive.HiveMetadataProvider.LogicalInputSplit in project drill by axbaretto.
the class HiveScan method applyAssignments.
@Override
public void applyAssignments(final List<CoordinationProtos.DrillbitEndpoint> endpoints) {
mappings = new ArrayList<>();
for (int i = 0; i < endpoints.size(); i++) {
mappings.add(new ArrayList<LogicalInputSplit>());
}
final int count = endpoints.size();
final List<LogicalInputSplit> inputSplits = getInputSplits();
for (int i = 0; i < inputSplits.size(); i++) {
mappings.get(i % count).add(inputSplits.get(i));
}
}
Aggregations