use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project hive by apache.
the class CompactorTest method setup.
@Before
public void setup() throws Exception {
conf = new HiveConf();
TxnDbUtil.setConfValues(conf);
TxnDbUtil.cleanDb(conf);
ms = new HiveMetaStoreClient(conf);
txnHandler = TxnUtils.getTxnStore(conf);
tmpdir = new File(Files.createTempDirectory("compactor_test_table_").toString());
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project SQLWindowing by hbutani.
the class HiveUtils method getRowResolver.
public static RowResolver getRowResolver(String db, String table, String alias, HiveConf conf) throws WindowingException {
LOG.info("HiveUtils::getRowResolver invoked on " + table);
try {
HiveMetaStoreClient client = getClient(conf);
db = validateDB(client, db);
org.apache.hadoop.hive.ql.metadata.Table t = Hive.get(conf).getTable(db, table);
StructObjectInspector rowObjectInspector = (StructObjectInspector) t.getDeserializer().getObjectInspector();
RowResolver rwsch = getRowResolver(alias, rowObjectInspector);
for (FieldSchema part_col : t.getPartCols()) {
LOG.trace("Adding partition col: " + part_col);
rwsch.put(alias, part_col.getName(), new ColumnInfo(part_col.getName(), TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), alias, true));
}
Iterator<VirtualColumn> vcs = VirtualColumn.getRegistry(conf).iterator();
// use a list for easy cumtomize
List<VirtualColumn> vcList = new ArrayList<VirtualColumn>();
while (vcs.hasNext()) {
VirtualColumn vc = vcs.next();
rwsch.put(alias, vc.getName(), new ColumnInfo(vc.getName(), vc.getTypeInfo(), alias, true, vc.getIsHidden()));
vcList.add(vc);
}
return rwsch;
} catch (WindowingException w) {
throw w;
} catch (Exception me) {
throw new WindowingException(me);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project SQLWindowing by hbutani.
the class IOUtils method createTableWindowingInput.
@SuppressWarnings("unchecked")
public static WindowingInput createTableWindowingInput(String dbName, String tableName, Configuration conf) throws WindowingException {
try {
HiveMetaStoreClient client = HiveUtils.getClient(conf);
String db = HiveUtils.validateDB(client, dbName);
Table t = HiveUtils.getTable(client, db, tableName);
StorageDescriptor sd = t.getSd();
HiveConf hConf = new HiveConf(conf, IOUtils.class);
JobConf job = new JobConf(hConf);
Class<? extends InputFormat<? extends Writable, ? extends Writable>> inputFormatClass = (Class<? extends InputFormat<? extends Writable, ? extends Writable>>) Class.forName(sd.getInputFormat());
hConf.setClass("mapred.input.format.class", inputFormatClass, InputFormat.class);
hConf.set(INPUT_INPUTFORMAT_CLASS, inputFormatClass.getName());
InputFormat<? extends Writable, ? extends Writable> iFmt = inputFormatClass.newInstance();
if (iFmt instanceof TextInputFormat) {
((TextInputFormat) iFmt).configure(job);
}
Path p = new Path(sd.getLocation());
/*
* Convert the Path in the StorageDescriptor into a Path in the current FileSystem.
* Used in testing: Jobs run on MiniDFSCluster, whereas hive metadata refers to a real cluster.
*/
{
p = makeQualified(p, conf);
}
FileInputFormat.addInputPath(job, p);
InputSplit[] iSplits = iFmt.getSplits(job, 1);
org.apache.hadoop.mapred.RecordReader<Writable, Writable> rdr = (org.apache.hadoop.mapred.RecordReader<Writable, Writable>) iFmt.getRecordReader(iSplits[0], job, Reporter.NULL);
hConf.set(INPUT_PATH, sd.getLocation());
hConf.set(INPUT_KEY_CLASS, rdr.createKey().getClass().getName());
hConf.set(INPUT_VALUE_CLASS, rdr.createValue().getClass().getName());
hConf.set(INPUT_SERDE_CLASS, sd.getSerdeInfo().getSerializationLib());
TableWindowingInput tIn = new TableWindowingInput();
tIn.initialize(null, hConf, MetaStoreUtils.getSchema(t));
return tIn;
} catch (WindowingException w) {
throw w;
} catch (Exception e) {
throw new WindowingException(e);
}
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project cdap by caskdata.
the class BaseHiveExploreService method getMetaStoreClient.
private IMetaStoreClient getMetaStoreClient() throws ExploreException {
if (metastoreClientLocal.get() == null) {
try {
IMetaStoreClient client = new HiveMetaStoreClient(createHiveConf());
Supplier<IMetaStoreClient> supplier = Suppliers.ofInstance(client);
metastoreClientLocal.set(supplier);
// We use GC of the supplier as a signal for us to know that a thread is gone
// The supplier is set into the thread local, which will get GC'ed when the thread is gone.
// Since we use a weak reference key to the supplier that points to the client
// (in the metastoreClientReferences map), it won't block GC of the supplier instance.
// We can use the weak reference, which is retrieved through polling the ReferenceQueue,
// to get back the client and call close() on it.
metastoreClientReferences.put(new WeakReference<>(supplier, metastoreClientReferenceQueue), client);
} catch (MetaException e) {
throw new ExploreException("Error initializing Hive Metastore client", e);
}
}
return metastoreClientLocal.get().get();
}
use of org.apache.hadoop.hive.metastore.HiveMetaStoreClient in project streamline by hortonworks.
the class HiveMetadataService method newInstance.
/**
* Creates secure {@link HiveMetadataService}, which delegates to {@link HiveMetaStoreClient}
* instantiated with the {@link HiveConf} provided using the first parameter
*/
public static HiveMetadataService newInstance(HiveConf hiveConf, SecurityContext securityContext, Subject subject, Component hiveMetastore, Collection<ComponentProcess> hiveMetastoreProcesses) throws MetaException, IOException, EntityNotFoundException, PrivilegedActionException {
if (SecurityUtil.isKerberosAuthenticated(securityContext)) {
// Sets Kerberos rules
UserGroupInformation.setConfiguration(hiveConf);
// Adds User principal to this subject
UserGroupInformation.getUGIFromSubject(subject);
return new HiveMetadataService(SecurityUtil.execute(() -> new HiveMetaStoreClient(hiveConf), securityContext, subject), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses);
} else {
return new HiveMetadataService(new HiveMetaStoreClient(hiveConf), hiveConf, securityContext, subject, hiveMetastore, hiveMetastoreProcesses);
}
}
Aggregations