use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class FindHardDistributionScans method visit.
@Override
public RelNode visit(TableScan scan) {
DrillTable unwrap;
unwrap = scan.getTable().unwrap(DrillTable.class);
if (unwrap == null) {
unwrap = scan.getTable().unwrap(DrillTranslatableTable.class).getDrillTable();
}
try {
if (unwrap.getGroupScan().getDistributionAffinity() == DistributionAffinity.HARD) {
contains = true;
}
} catch (final IOException e) {
throw new DrillRuntimeException("Failed to get GroupScan from table.");
}
return scan;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class DrillParquetGroupConverter method getConverterForType.
@SuppressWarnings("resource")
private PrimitiveConverter getConverterForType(String name, PrimitiveType type) {
switch(type.getPrimitiveTypeName()) {
case INT32:
{
if (type.getOriginalType() == null) {
IntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).integer() : mapWriter.integer(name);
return new DrillIntConverter(writer);
}
switch(type.getOriginalType()) {
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
Decimal9Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal9() : mapWriter.decimal9(name);
return new DrillDecimal9Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
}
case DATE:
{
DateWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).date() : mapWriter.date(name);
switch(containsCorruptedDates) {
case META_SHOWS_CORRUPTION:
return new DrillCorruptedDateConverter(writer);
case META_SHOWS_NO_CORRUPTION:
return new DrillDateConverter(writer);
case META_UNCLEAR_TEST_VALUES:
return new CorruptionDetectingDateConverter(writer);
default:
throw new DrillRuntimeException(String.format("Issue setting up parquet reader for date type, " + "unrecognized date corruption status %s. See DRILL-4203 for more info.", containsCorruptedDates));
}
}
case TIME_MILLIS:
{
TimeWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).time() : mapWriter.time(name);
return new DrillTimeConverter(writer);
}
default:
{
throw new UnsupportedOperationException("Unsupported type: " + type.getOriginalType());
}
}
}
case INT64:
{
if (type.getOriginalType() == null) {
BigIntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bigInt() : mapWriter.bigInt(name);
return new DrillBigIntConverter(writer);
}
switch(type.getOriginalType()) {
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
Decimal18Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal18() : mapWriter.decimal18(name);
return new DrillDecimal18Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
}
case TIMESTAMP_MILLIS:
{
TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
return new DrillTimeStampConverter(writer);
}
default:
{
throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
}
}
}
case INT96:
{
// TODO: replace null with TIMESTAMP_NANOS once parquet support such type annotation.
if (type.getOriginalType() == null) {
if (options.getOption(ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP).bool_val) {
TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
return new DrillFixedBinaryToTimeStampConverter(writer);
} else {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillFixedBinaryToVarbinaryConverter(writer, ParquetColumnMetadata.getTypeLengthInBits(type.getPrimitiveTypeName()) / 8, mutator.getManagedBuffer());
}
}
}
case FLOAT:
{
Float4Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float4() : mapWriter.float4(name);
return new DrillFloat4Converter(writer);
}
case DOUBLE:
{
Float8Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float8() : mapWriter.float8(name);
return new DrillFloat8Converter(writer);
}
case BOOLEAN:
{
BitWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bit() : mapWriter.bit(name);
return new DrillBoolConverter(writer);
}
case BINARY:
{
if (type.getOriginalType() == null) {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillVarBinaryConverter(writer, mutator.getManagedBuffer());
}
switch(type.getOriginalType()) {
case UTF8:
{
VarCharWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varChar() : mapWriter.varChar(name);
return new DrillVarCharConverter(writer, mutator.getManagedBuffer());
}
//TODO not sure if BINARY/DECIMAL is actually supported
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
DecimalMetadata metadata = type.getDecimalMetadata();
if (metadata.getPrecision() <= 28) {
Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
} else {
Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
}
}
default:
{
throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
}
}
}
case FIXED_LEN_BYTE_ARRAY:
if (type.getOriginalType() == OriginalType.DECIMAL) {
ParquetReaderUtility.checkDecimalTypeEnabled(options);
DecimalMetadata metadata = type.getDecimalMetadata();
if (metadata.getPrecision() <= 28) {
Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
} else {
Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
}
} else if (type.getOriginalType() == OriginalType.INTERVAL) {
IntervalWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).interval() : mapWriter.interval(name);
return new DrillFixedLengthByteArrayToInterval(writer);
} else {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillFixedBinaryToVarbinaryConverter(writer, type.getTypeLength(), mutator.getManagedBuffer());
}
default:
throw new UnsupportedOperationException("Unsupported type: " + type.getPrimitiveTypeName());
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class AssignmentCreator method getMappings.
/**
* Does the work of creating the mappings for this AssignmentCreator
* @return the minor fragment id to work units mapping
*/
private ListMultimap<Integer, T> getMappings() {
Stopwatch watch = Stopwatch.createStarted();
maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
Map<DrillbitEndpoint, FragIteratorWrapper> endpointIterators = getEndpointIterators();
// Assign upto maxCount per node based on locality.
unassignedWorkList = assign(workList, endpointIterators, false);
// Assign upto minCount per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, true);
// Assign upto maxCount + leftovers per node based on locality.
unassignedWorkList = assign(unassignedWorkList, endpointIterators, true);
// Assign upto maxCount + leftovers per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, false);
if (unassignedWorkList.size() != 0) {
throw new DrillRuntimeException("There are still unassigned work units");
}
logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS), units.size(), incomingEndpoints.size());
return mappings;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class ZookeeperPersistentStore method put.
@Override
public void put(final String key, final V value, final DataChangeVersion version) {
final InstanceSerializer<V> serializer = config.getSerializer();
try {
final byte[] bytes = serializer.serialize(value);
client.put(key, bytes, version);
} catch (final IOException e) {
throw new DrillRuntimeException(String.format("unable to de/serialize value of type %s", value.getClass()), e);
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class HiveScan method getOperatorAffinity.
@Override
public List<EndpointAffinity> getOperatorAffinity() {
final Map<String, DrillbitEndpoint> endpointMap = new HashMap<>();
for (final DrillbitEndpoint endpoint : storagePlugin.getContext().getBits()) {
endpointMap.put(endpoint.getAddress(), endpoint);
logger.debug("endpoing address: {}", endpoint.getAddress());
}
final Map<DrillbitEndpoint, EndpointAffinity> affinityMap = new HashMap<>();
try {
long totalSize = 0;
final List<InputSplitWrapper> inputSplits = getInputSplits();
for (final InputSplitWrapper split : inputSplits) {
totalSize += Math.max(1, split.getSplit().getLength());
}
for (final InputSplitWrapper split : inputSplits) {
final float affinity = ((float) Math.max(1, split.getSplit().getLength())) / totalSize;
for (final String loc : split.getSplit().getLocations()) {
logger.debug("split location: {}", loc);
final DrillbitEndpoint endpoint = endpointMap.get(loc);
if (endpoint != null) {
if (affinityMap.containsKey(endpoint)) {
affinityMap.get(endpoint).addAffinity(affinity);
} else {
affinityMap.put(endpoint, new EndpointAffinity(endpoint, affinity));
}
}
}
}
} catch (final IOException e) {
throw new DrillRuntimeException(e);
}
for (final DrillbitEndpoint ep : affinityMap.keySet()) {
Preconditions.checkNotNull(ep);
}
for (final EndpointAffinity a : affinityMap.values()) {
Preconditions.checkNotNull(a.getEndpoint());
}
return Lists.newArrayList(affinityMap.values());
}
Aggregations