use of org.apache.hadoop.conf.Configurable in project hive by apache.
the class HadoopThriftAuthBridge23 method getHadoopSaslProperties.
/**
* Read and return Hadoop SASL configuration which can be configured using
* "hadoop.rpc.protection"
*
* @param conf
* @return Hadoop SASL configuration
*/
@SuppressWarnings("unchecked")
@Override
public Map<String, String> getHadoopSaslProperties(Configuration conf) {
if (SASL_PROPS_FIELD != null) {
// hadoop 2.4 and earlier way of finding the sasl property settings
// Initialize the SaslRpcServer to ensure QOP parameters are read from
// conf
SaslRpcServer.init(conf);
try {
return (Map<String, String>) SASL_PROPS_FIELD.get(null);
} catch (Exception e) {
throw new IllegalStateException("Error finding hadoop SASL properties", e);
}
}
// 2.5 and later way of finding sasl property
try {
Configurable saslPropertiesResolver = (Configurable) RES_GET_INSTANCE_METHOD.invoke(null, conf);
saslPropertiesResolver.setConf(conf);
return (Map<String, String>) GET_DEFAULT_PROP_METHOD.invoke(saslPropertiesResolver);
} catch (Exception e) {
throw new IllegalStateException("Error finding hadoop SASL properties", e);
}
}
use of org.apache.hadoop.conf.Configurable in project nutch by apache.
the class GenericWritableConfigurable method readFields.
@Override
public void readFields(DataInput in) throws IOException {
byte type = in.readByte();
Class<?> clazz = getTypes()[type];
try {
set((Writable) clazz.getConstructor().newInstance());
} catch (Exception e) {
e.printStackTrace();
throw new IOException("Cannot initialize the class: " + clazz);
}
Writable w = get();
if (w instanceof Configurable)
((Configurable) w).setConf(conf);
w.readFields(in);
}
use of org.apache.hadoop.conf.Configurable in project flink by apache.
the class HadoopInputSplit method readObject.
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
// read the parent fields and the final fields
in.defaultReadObject();
try {
hadoopInputSplit = (org.apache.hadoop.mapred.InputSplit) WritableFactories.newInstance(splitType);
} catch (Exception e) {
throw new RuntimeException("Unable to instantiate Hadoop InputSplit", e);
}
if (needsJobConf(hadoopInputSplit)) {
// the job conf knows how to deserialize itself
jobConf = new JobConf();
jobConf.readFields(in);
if (hadoopInputSplit instanceof Configurable) {
((Configurable) hadoopInputSplit).setConf(this.jobConf);
} else if (hadoopInputSplit instanceof JobConfigurable) {
((JobConfigurable) hadoopInputSplit).configure(this.jobConf);
}
}
hadoopInputSplit.readFields(in);
}
use of org.apache.hadoop.conf.Configurable in project gora by apache.
the class DataStoreFactory method createDataStore.
/**
* Instantiate a new {@link DataStore}.
*
* @param <D> The class of datastore.
* @param <K> The class of keys in the datastore.
* @param <T> The class of persistent objects in the datastore.
* @param dataStoreClass The datastore implementation class.
* @param keyClass The key class.
* @param persistent The value class.
* @param conf {@link Configuration} to be used be the store.
* @param properties The properties to be used be the store.
* @param schemaName A default schemaname that will be put on the properties.
* @return A new store instance.
* @throws GoraException If any error occurred.
*/
public static <D extends DataStore<K, T>, K, T extends Persistent> D createDataStore(Class<D> dataStoreClass, Class<K> keyClass, Class<T> persistent, Configuration conf, Properties properties, String schemaName) throws GoraException {
try {
setDefaultSchemaName(properties, schemaName);
D dataStore = ReflectionUtils.newInstance(dataStoreClass);
if ((dataStore instanceof Configurable) && conf != null) {
((Configurable) dataStore).setConf(conf);
}
initializeDataStore(dataStore, keyClass, persistent, properties);
return dataStore;
} catch (GoraException ex) {
throw ex;
} catch (Exception ex) {
throw new GoraException(ex);
}
}
use of org.apache.hadoop.conf.Configurable in project mongo-hadoop by mongodb.
the class BSONSplitter method run.
/**
* When run as a Tool, BSONSplitter can be used to pre-split and compress
* BSON files. This can be especially useful before uploading large BSON
* files to HDFS to save time. The compressed splits are written to the
* given output path or to the directory containing the input file, if
* the output path is unspecified. A ".splits" file is not generated, since
* each output file is expected to be its own split.
*
* @param args command-line arguments. Run with zero arguments to see usage.
* @return exit status
* @throws Exception
*/
@Override
public int run(final String[] args) throws Exception {
if (args.length < 1) {
printUsage();
return 1;
}
// Parse command-line arguments.
Path filePath = new Path(args[0]);
String compressorName = null, outputDirectoryStr = null;
Path outputDirectory;
CompressionCodec codec;
Compressor compressor;
for (int i = 1; i < args.length; ++i) {
if ("-c".equals(args[i]) && args.length > i) {
compressorName = args[++i];
} else if ("-o".equals(args[i]) && args.length > i) {
outputDirectoryStr = args[++i];
} else {
// CHECKSTYLE:OFF
System.err.println("unrecognized option: " + args[i]);
// CHECKSTYLE:ON
printUsage();
return 1;
}
}
// Supply default values for unspecified arguments.
if (null == outputDirectoryStr) {
outputDirectory = filePath.getParent();
} else {
outputDirectory = new Path(outputDirectoryStr);
}
if (null == compressorName) {
codec = new DefaultCodec();
} else {
Class<?> codecClass = Class.forName(compressorName);
codec = (CompressionCodec) ReflectionUtils.newInstance(codecClass, getConf());
}
if (codec instanceof Configurable) {
((Configurable) codec).setConf(getConf());
}
// Do not write a .splits file so as not to confuse BSONSplitter.
// Each compressed file will be its own split.
MongoConfigUtil.setBSONWriteSplits(getConf(), false);
// Open the file.
FileSystem inputFS = FileSystem.get(filePath.toUri(), getConf());
FileSystem outputFS = FileSystem.get(outputDirectory.toUri(), getConf());
FSDataInputStream inputStream = inputFS.open(filePath);
// Use BSONSplitter to split the file.
Path splitFilePath = getSplitsFilePath(filePath, getConf());
try {
loadSplitsFromSplitFile(inputFS.getFileStatus(filePath), splitFilePath);
} catch (NoSplitFileException e) {
LOG.info("did not find .splits file in " + splitFilePath.toUri());
setInputPath(filePath);
readSplits();
}
List<BSONFileSplit> splits = getAllSplits();
LOG.info("compressing " + splits.size() + " splits.");
byte[] buf = new byte[1024 * 1024];
for (int i = 0; i < splits.size(); ++i) {
// e.g., hdfs:///user/hive/warehouse/mongo/OutputFile-42.bz2
Path splitOutputPath = new Path(outputDirectory, filePath.getName() + "-" + i + codec.getDefaultExtension());
// Compress the split into a new file.
compressor = CodecPool.getCompressor(codec);
CompressionOutputStream compressionOutputStream = null;
try {
compressionOutputStream = codec.createOutputStream(outputFS.create(splitOutputPath), compressor);
int totalBytes = 0, bytesRead = 0;
BSONFileSplit split = splits.get(i);
inputStream.seek(split.getStart());
LOG.info("writing " + splitOutputPath.toUri() + ".");
while (totalBytes < split.getLength() && bytesRead >= 0) {
bytesRead = inputStream.read(buf, 0, (int) Math.min(buf.length, split.getLength() - totalBytes));
if (bytesRead > 0) {
compressionOutputStream.write(buf, 0, bytesRead);
totalBytes += bytesRead;
}
}
} finally {
if (compressionOutputStream != null) {
compressionOutputStream.close();
}
CodecPool.returnCompressor(compressor);
}
}
LOG.info("done.");
return 0;
}
Aggregations