use of org.apache.flink.connectors.hive.FlinkHiveException in project flink by apache.
the class HiveParserUtils method rexSubQueryIn.
/**
* Proxy to {@link RexSubQuery#in(RelNode, com.google.common.collect.ImmutableList)}.
*/
public static RexSubQuery rexSubQueryIn(RelNode relNode, Collection<RexNode> rexNodes) {
Class[] argTypes = new Class[] { RelNode.class, null };
argTypes[1] = useShadedImmutableList ? shadedImmutableListClz : immutableListClz;
Method method = HiveReflectionUtils.tryGetMethod(RexSubQuery.class, "in", argTypes);
Preconditions.checkState(method != null, "Cannot get the method to create an IN sub-query");
try {
return (RexSubQuery) method.invoke(null, relNode, toImmutableList(rexNodes));
} catch (IllegalAccessException | InvocationTargetException e) {
throw new FlinkHiveException("Failed to create RexSubQuery", e);
}
}
use of org.apache.flink.connectors.hive.FlinkHiveException in project flink by apache.
the class HiveParser method startSessionState.
private void startSessionState(HiveConf hiveConf, CatalogManager catalogManager) {
final ClassLoader contextCL = Thread.currentThread().getContextClassLoader();
try {
HiveParserSessionState sessionState = new HiveParserSessionState(hiveConf, contextCL);
sessionState.initTxnMgr(hiveConf);
sessionState.setCurrentDatabase(catalogManager.getCurrentDatabase());
// some Hive functions needs the timestamp
setCurrentTimestamp(sessionState);
SessionState.setCurrentSessionState(sessionState);
} catch (LockException e) {
throw new FlinkHiveException("Failed to init SessionState", e);
} finally {
// don't let SessionState mess up with our context classloader
Thread.currentThread().setContextClassLoader(contextCL);
}
}
use of org.apache.flink.connectors.hive.FlinkHiveException in project flink by apache.
the class HivePartitionUtils method getAllPartitions.
/**
* Returns all HiveTablePartitions of a hive table, returns single HiveTablePartition if the
* hive table is not partitioned.
*/
public static List<HiveTablePartition> getAllPartitions(JobConf jobConf, String hiveVersion, ObjectPath tablePath, List<String> partitionColNames, List<Map<String, String>> remainingPartitions) {
List<HiveTablePartition> allHivePartitions = new ArrayList<>();
try (HiveMetastoreClientWrapper client = HiveMetastoreClientFactory.create(HiveConfUtils.create(jobConf), hiveVersion)) {
String dbName = tablePath.getDatabaseName();
String tableName = tablePath.getObjectName();
Table hiveTable = client.getTable(dbName, tableName);
Properties tableProps = HiveReflectionUtils.getTableMetadata(HiveShimLoader.loadHiveShim(hiveVersion), hiveTable);
if (partitionColNames != null && partitionColNames.size() > 0) {
List<Partition> partitions = new ArrayList<>();
if (remainingPartitions != null) {
for (Map<String, String> spec : remainingPartitions) {
partitions.add(client.getPartition(dbName, tableName, partitionSpecToValues(spec, partitionColNames)));
}
} else {
partitions.addAll(client.listPartitions(dbName, tableName, (short) -1));
}
for (Partition partition : partitions) {
HiveTablePartition hiveTablePartition = toHiveTablePartition(partitionColNames, tableProps, partition);
allHivePartitions.add(hiveTablePartition);
}
} else {
allHivePartitions.add(new HiveTablePartition(hiveTable.getSd(), tableProps));
}
} catch (TException e) {
throw new FlinkHiveException("Failed to collect all partitions from hive metaStore", e);
}
return allHivePartitions;
}
use of org.apache.flink.connectors.hive.FlinkHiveException in project flink by apache.
the class HiveWriterFactory method createRecordWriter.
/**
* Create a {@link RecordWriter} from path.
*/
public RecordWriter createRecordWriter(Path path) {
try {
checkInitialize();
JobConf conf = new JobConf(confWrapper.conf());
if (isCompressed) {
String codecStr = conf.get(HiveConf.ConfVars.COMPRESSINTERMEDIATECODEC.varname);
if (!StringUtils.isNullOrWhitespaceOnly(codecStr)) {
// noinspection unchecked
Class<? extends CompressionCodec> codec = (Class<? extends CompressionCodec>) Class.forName(codecStr, true, Thread.currentThread().getContextClassLoader());
FileOutputFormat.setOutputCompressorClass(conf, codec);
}
String typeStr = conf.get(HiveConf.ConfVars.COMPRESSINTERMEDIATETYPE.varname);
if (!StringUtils.isNullOrWhitespaceOnly(typeStr)) {
SequenceFile.CompressionType style = SequenceFile.CompressionType.valueOf(typeStr);
SequenceFileOutputFormat.setOutputCompressionType(conf, style);
}
}
return hiveShim.getHiveRecordWriter(conf, hiveOutputFormatClz, recordSerDe.getSerializedClass(), isCompressed, tableProperties, path);
} catch (Exception e) {
throw new FlinkHiveException(e);
}
}
use of org.apache.flink.connectors.hive.FlinkHiveException in project flink by apache.
the class HiveShimV230 method init.
private static void init() {
if (!inited) {
synchronized (HiveShimV230.class) {
if (!inited) {
try {
isMaterializedView = org.apache.hadoop.hive.ql.metadata.Table.class.getDeclaredMethod("isMaterializedView");
inited = true;
} catch (Exception e) {
throw new FlinkHiveException(e);
}
}
}
}
}
Aggregations