use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by axbaretto.
the class PersistentStoreRegistry method newPStoreProvider.
@SuppressWarnings("unchecked")
public PersistentStoreProvider newPStoreProvider() throws ExecutionSetupException {
try {
String storeProviderClassName = config.getString(ExecConstants.SYS_STORE_PROVIDER_CLASS);
logger.info("Using the configured PStoreProvider class: '{}'.", storeProviderClassName);
Class<? extends PersistentStoreProvider> storeProviderClass = (Class<? extends PersistentStoreProvider>) Class.forName(storeProviderClassName);
Constructor<? extends PersistentStoreProvider> c = storeProviderClass.getConstructor(PersistentStoreRegistry.class);
return new CachingPersistentStoreProvider(c.newInstance(this));
} catch (ConfigException.Missing | ClassNotFoundException | NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
logger.error(e.getMessage(), e);
throw new ExecutionSetupException("A System Table provider was either not specified or could not be found or instantiated", e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveUtilities method getInputFormatClass.
/**
* Utility method which gets table or partition {@link InputFormat} class. First it
* tries to get the class name from given StorageDescriptor object. If it doesn't contain it tries to get it from
* StorageHandler class set in table properties. If not found throws an exception.
* @param job {@link JobConf} instance needed incase the table is StorageHandler based table.
* @param sd {@link StorageDescriptor} instance of currently reading partition or table (for non-partitioned tables).
* @param table Table object
*/
public static Class<? extends InputFormat<?, ?>> getInputFormatClass(final JobConf job, final StorageDescriptor sd, final Table table) throws Exception {
final String inputFormatName = sd.getInputFormat();
if (Strings.isNullOrEmpty(inputFormatName)) {
final String storageHandlerClass = table.getParameters().get(META_TABLE_STORAGE);
if (Strings.isNullOrEmpty(storageHandlerClass)) {
throw new ExecutionSetupException("Unable to get Hive table InputFormat class. There is neither " + "InputFormat class explicitly specified nor StorageHandler class");
}
final HiveStorageHandler storageHandler = HiveUtils.getStorageHandler(job, storageHandlerClass);
TableDesc tableDesc = new TableDesc();
tableDesc.setProperties(new org.apache.hadoop.hive.ql.metadata.Table(table).getMetadata());
storageHandler.configureInputJobProperties(tableDesc, table.getParameters());
return (Class<? extends InputFormat<?, ?>>) storageHandler.getInputFormatClass();
} else {
return (Class<? extends InputFormat<?, ?>>) Class.forName(inputFormatName);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveScan method getSpecificScan.
@Override
public SubScan getSpecificScan(final int minorFragmentId) throws ExecutionSetupException {
try {
final List<LogicalInputSplit> splits = mappings.get(minorFragmentId);
List<HivePartitionWrapper> parts = new ArrayList<>();
final List<List<String>> encodedInputSplits = new ArrayList<>();
final List<String> splitTypes = new ArrayList<>();
for (final LogicalInputSplit split : splits) {
final Partition splitPartition = split.getPartition();
if (splitPartition != null) {
HiveTableWithColumnCache table = hiveReadEntry.getTable();
parts.add(createPartitionWithSpecColumns(new HiveTableWithColumnCache(table, new ColumnListsCache(table)), splitPartition));
}
encodedInputSplits.add(split.serialize());
splitTypes.add(split.getType());
}
if (parts.size() <= 0) {
parts = null;
}
final HiveReadEntry subEntry = new HiveReadEntry(hiveReadEntry.getTableWrapper(), parts);
return new HiveSubScan(getUserName(), encodedInputSplits, subEntry, splitTypes, columns, hiveStoragePlugin, confProperties);
} catch (IOException | ReflectiveOperationException e) {
throw new ExecutionSetupException(e);
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveTextRecordReader method next.
/**
* Reads batch of records skipping footer rows when necessary.
*
* @return count of read records
*/
@Override
public int next() {
if (skipFooterValueHolder == null) {
return super.next();
} else {
try {
// starting new batch, reset processed records count
skipFooterValueHolder.reset();
while (!skipFooterValueHolder.isBatchFull() && hasNextValue(skipFooterValueHolder.getValueHolder())) {
Object value = skipFooterValueHolder.getNextValue();
if (value != null) {
Object deSerializedValue = partitionToTableSchemaConverter.convert(partitionDeserializer.deserialize((Writable) value));
outputWriter.setPosition(skipFooterValueHolder.getProcessedRecordCount());
readHiveRecordAndInsertIntoRecordBatch(deSerializedValue);
skipFooterValueHolder.incrementProcessedRecordCount();
}
}
outputWriter.setValueCount(skipFooterValueHolder.getProcessedRecordCount());
return skipFooterValueHolder.getProcessedRecordCount();
} catch (ExecutionSetupException | IOException | SerDeException e) {
throw new DrillRuntimeException(e.getMessage(), e);
}
}
}
use of org.apache.drill.common.exceptions.ExecutionSetupException in project drill by apache.
the class HiveDefaultRecordReader method next.
@Override
public int next() {
outputWriter.allocate();
outputWriter.reset();
if (empty) {
outputWriter.setValueCount(0);
populatePartitionVectors(0);
return 0;
}
try {
int recordCount;
for (recordCount = 0; (recordCount < TARGET_RECORD_COUNT && hasNextValue(valueHolder)); recordCount++) {
Object deserializedHiveRecord = partitionToTableSchemaConverter.convert(partitionDeserializer.deserialize((Writable) valueHolder));
outputWriter.setPosition(recordCount);
readHiveRecordAndInsertIntoRecordBatch(deserializedHiveRecord);
}
outputWriter.setValueCount(recordCount);
populatePartitionVectors(recordCount);
return recordCount;
} catch (ExecutionSetupException | IOException | SerDeException e) {
throw new DrillRuntimeException(e.getMessage(), e);
}
}
Aggregations