use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class DropFunctionHandler method unregister.
/**
* Gets remote function registry with version.
* Version is used to ensure that we update the same registry we removed jars from.
* Looks for a jar to be deleted, if founds one,
* attempts to update remote registry with list of jars, that excludes jar to be deleted.
* If during update {@link VersionMismatchException} was detected,
* attempts to repeat unregistration process till retry attempts exceeds the limit.
* If retry attempts number hits 0, throws exception that failed to update remote function registry.
*
* @param jarName jar name
* @param remoteFunctionRegistry remote function registry
* @return jar that was unregistered, null otherwise
*/
private Jar unregister(String jarName, RemoteFunctionRegistry remoteFunctionRegistry) {
int retryAttempts = remoteFunctionRegistry.getRetryAttempts();
while (retryAttempts >= 0) {
DataChangeVersion version = new DataChangeVersion();
Registry registry = remoteFunctionRegistry.getRegistry(version);
Jar jarToBeDeleted = null;
List<Jar> jars = Lists.newArrayList();
for (Jar j : registry.getJarList()) {
if (j.getName().equals(jarName)) {
jarToBeDeleted = j;
} else {
jars.add(j);
}
}
if (jarToBeDeleted == null) {
return null;
}
Registry updatedRegistry = Registry.newBuilder().addAllJar(jars).build();
try {
remoteFunctionRegistry.updateRegistry(updatedRegistry, version);
return jarToBeDeleted;
} catch (VersionMismatchException ex) {
logger.debug("Failed to update function registry during unregistration, version mismatch was detected.", ex);
retryAttempts--;
}
}
throw new DrillRuntimeException("Failed to update remote function registry. Exceeded retry attempts limit.");
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class FindHardDistributionScans method visit.
@Override
public RelNode visit(TableScan scan) {
DrillTable unwrap;
unwrap = scan.getTable().unwrap(DrillTable.class);
if (unwrap == null) {
unwrap = scan.getTable().unwrap(DrillTranslatableTable.class).getDrillTable();
}
try {
if (unwrap.getGroupScan().getDistributionAffinity() == DistributionAffinity.HARD) {
contains = true;
}
} catch (final IOException e) {
throw new DrillRuntimeException("Failed to get GroupScan from table.");
}
return scan;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class DrillParquetGroupConverter method getConverterForType.
@SuppressWarnings("resource")
private PrimitiveConverter getConverterForType(String name, PrimitiveType type) {
switch(type.getPrimitiveTypeName()) {
case INT32:
{
if (type.getOriginalType() == null) {
IntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).integer() : mapWriter.integer(name);
return new DrillIntConverter(writer);
}
switch(type.getOriginalType()) {
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
Decimal9Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal9() : mapWriter.decimal9(name);
return new DrillDecimal9Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
}
case DATE:
{
DateWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).date() : mapWriter.date(name);
switch(containsCorruptedDates) {
case META_SHOWS_CORRUPTION:
return new DrillCorruptedDateConverter(writer);
case META_SHOWS_NO_CORRUPTION:
return new DrillDateConverter(writer);
case META_UNCLEAR_TEST_VALUES:
return new CorruptionDetectingDateConverter(writer);
default:
throw new DrillRuntimeException(String.format("Issue setting up parquet reader for date type, " + "unrecognized date corruption status %s. See DRILL-4203 for more info.", containsCorruptedDates));
}
}
case TIME_MILLIS:
{
TimeWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).time() : mapWriter.time(name);
return new DrillTimeConverter(writer);
}
default:
{
throw new UnsupportedOperationException("Unsupported type: " + type.getOriginalType());
}
}
}
case INT64:
{
if (type.getOriginalType() == null) {
BigIntWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bigInt() : mapWriter.bigInt(name);
return new DrillBigIntConverter(writer);
}
switch(type.getOriginalType()) {
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
Decimal18Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal18() : mapWriter.decimal18(name);
return new DrillDecimal18Converter(writer, type.getDecimalMetadata().getPrecision(), type.getDecimalMetadata().getScale());
}
case TIMESTAMP_MILLIS:
{
TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
return new DrillTimeStampConverter(writer);
}
default:
{
throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
}
}
}
case INT96:
{
// TODO: replace null with TIMESTAMP_NANOS once parquet support such type annotation.
if (type.getOriginalType() == null) {
if (options.getOption(ExecConstants.PARQUET_READER_INT96_AS_TIMESTAMP).bool_val) {
TimeStampWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).timeStamp() : mapWriter.timeStamp(name);
return new DrillFixedBinaryToTimeStampConverter(writer);
} else {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillFixedBinaryToVarbinaryConverter(writer, ParquetColumnMetadata.getTypeLengthInBits(type.getPrimitiveTypeName()) / 8, mutator.getManagedBuffer());
}
}
}
case FLOAT:
{
Float4Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float4() : mapWriter.float4(name);
return new DrillFloat4Converter(writer);
}
case DOUBLE:
{
Float8Writer writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).float8() : mapWriter.float8(name);
return new DrillFloat8Converter(writer);
}
case BOOLEAN:
{
BitWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).bit() : mapWriter.bit(name);
return new DrillBoolConverter(writer);
}
case BINARY:
{
if (type.getOriginalType() == null) {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillVarBinaryConverter(writer, mutator.getManagedBuffer());
}
switch(type.getOriginalType()) {
case UTF8:
{
VarCharWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varChar() : mapWriter.varChar(name);
return new DrillVarCharConverter(writer, mutator.getManagedBuffer());
}
//TODO not sure if BINARY/DECIMAL is actually supported
case DECIMAL:
{
ParquetReaderUtility.checkDecimalTypeEnabled(options);
DecimalMetadata metadata = type.getDecimalMetadata();
if (metadata.getPrecision() <= 28) {
Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
} else {
Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
}
}
default:
{
throw new UnsupportedOperationException("Unsupported type " + type.getOriginalType());
}
}
}
case FIXED_LEN_BYTE_ARRAY:
if (type.getOriginalType() == OriginalType.DECIMAL) {
ParquetReaderUtility.checkDecimalTypeEnabled(options);
DecimalMetadata metadata = type.getDecimalMetadata();
if (metadata.getPrecision() <= 28) {
Decimal28SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal28Sparse() : mapWriter.decimal28Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal28Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
} else {
Decimal38SparseWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).decimal38Sparse() : mapWriter.decimal38Sparse(name, metadata.getScale(), metadata.getPrecision());
return new DrillBinaryToDecimal38Converter(writer, metadata.getPrecision(), metadata.getScale(), mutator.getManagedBuffer());
}
} else if (type.getOriginalType() == OriginalType.INTERVAL) {
IntervalWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).interval() : mapWriter.interval(name);
return new DrillFixedLengthByteArrayToInterval(writer);
} else {
VarBinaryWriter writer = type.getRepetition() == Repetition.REPEATED ? mapWriter.list(name).varBinary() : mapWriter.varBinary(name);
return new DrillFixedBinaryToVarbinaryConverter(writer, type.getTypeLength(), mutator.getManagedBuffer());
}
default:
throw new UnsupportedOperationException("Unsupported type: " + type.getPrimitiveTypeName());
}
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class AssignmentCreator method getMappings.
/**
* Does the work of creating the mappings for this AssignmentCreator
* @return the minor fragment id to work units mapping
*/
private ListMultimap<Integer, T> getMappings() {
Stopwatch watch = Stopwatch.createStarted();
maxWork = (int) Math.ceil(units.size() / ((float) incomingEndpoints.size()));
LinkedList<WorkEndpointListPair<T>> workList = getWorkList();
LinkedList<WorkEndpointListPair<T>> unassignedWorkList;
Map<DrillbitEndpoint, FragIteratorWrapper> endpointIterators = getEndpointIterators();
// Assign upto maxCount per node based on locality.
unassignedWorkList = assign(workList, endpointIterators, false);
// Assign upto minCount per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, true);
// Assign upto maxCount + leftovers per node based on locality.
unassignedWorkList = assign(unassignedWorkList, endpointIterators, true);
// Assign upto maxCount + leftovers per node in a round robin fashion.
assignLeftovers(unassignedWorkList, endpointIterators, false);
if (unassignedWorkList.size() != 0) {
throw new DrillRuntimeException("There are still unassigned work units");
}
logger.debug("Took {} ms to assign {} work units to {} fragments", watch.elapsed(TimeUnit.MILLISECONDS), units.size(), incomingEndpoints.size());
return mappings;
}
use of org.apache.drill.common.exceptions.DrillRuntimeException in project drill by apache.
the class ZookeeperPersistentStore method put.
@Override
public void put(final String key, final V value, final DataChangeVersion version) {
final InstanceSerializer<V> serializer = config.getSerializer();
try {
final byte[] bytes = serializer.serialize(value);
client.put(key, bytes, version);
} catch (final IOException e) {
throw new DrillRuntimeException(String.format("unable to de/serialize value of type %s", value.getClass()), e);
}
}
Aggregations