use of org.apache.hadoop.io.LongWritable in project camel by apache.
the class HdfsConsumerTest method testReadLong.
@Test
public void testReadLong() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-long").getAbsolutePath());
Configuration conf = new Configuration();
SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, LongWritable.class);
NullWritable keyWritable = NullWritable.get();
LongWritable valueWritable = new LongWritable();
long value = 31415926535L;
valueWritable.set(value);
writer.append(keyWritable, valueWritable);
writer.sync();
writer.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(1);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
}
});
context.start();
resultEndpoint.assertIsSatisfied();
}
use of org.apache.hadoop.io.LongWritable in project camel by apache.
the class HdfsProducerTest method testWriteLong.
@Test
public void testWriteLong() throws Exception {
if (!canTest()) {
return;
}
long aLong = 1234567890;
template.sendBody("direct:write_long", aLong);
Configuration conf = new Configuration();
Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-long");
FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
SequenceFile.Reader reader = new SequenceFile.Reader(fs1, file1, conf);
Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
reader.next(key, value);
long rLong = ((LongWritable) value).get();
assertEquals(rLong, aLong);
IOHelper.close(reader);
}
use of org.apache.hadoop.io.LongWritable in project camel by apache.
the class HdfsProducerTest method testWriteLong.
@Test
public void testWriteLong() throws Exception {
if (!canTest()) {
return;
}
long aLong = 1234567890;
template.sendBody("direct:write_long", aLong);
Configuration conf = new Configuration();
Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-long");
SequenceFile.Reader reader = new SequenceFile.Reader(conf, SequenceFile.Reader.file(file1));
Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
reader.next(key, value);
long rLong = ((LongWritable) value).get();
assertEquals(rLong, aLong);
IOHelper.close(reader);
}
use of org.apache.hadoop.io.LongWritable in project gatk by broadinstitute.
the class VariantsSparkSource method getParallelVariantContexts.
/**
* Loads variants in parallel using Hadoop-BAM for vcfs and bcfs.
* @param vcf file to load variants from.
* @param intervals intervals of variants to include, or null if all should be included.
* @return JavaRDD<VariantContext> of variants from all files.
*/
public JavaRDD<VariantContext> getParallelVariantContexts(final String vcf, final List<SimpleInterval> intervals) {
Configuration conf = new Configuration();
conf.setStrings("io.compression.codecs", BGZFEnhancedGzipCodec.class.getCanonicalName(), BGZFCodec.class.getCanonicalName());
if (intervals != null && !intervals.isEmpty()) {
VCFInputFormat.setIntervals(conf, intervals);
}
final JavaPairRDD<LongWritable, VariantContextWritable> rdd2 = ctx.newAPIHadoopFile(vcf, VCFInputFormat.class, LongWritable.class, VariantContextWritable.class, conf);
return rdd2.map(v1 -> v1._2().get());
}
use of org.apache.hadoop.io.LongWritable in project incubator-systemml by apache.
the class MLContextConversionUtil method javaRDDStringCSVToMatrixObject.
/**
* Convert a {@code JavaRDD<String>} in CSV format to a {@code MatrixObject}
*
* @param javaRDD
* the Java RDD of strings
* @param matrixMetadata
* matrix metadata
* @return the {@code JavaRDD<String>} converted to a {@code MatrixObject}
*/
public static MatrixObject javaRDDStringCSVToMatrixObject(JavaRDD<String> javaRDD, MatrixMetadata matrixMetadata) {
JavaPairRDD<LongWritable, Text> javaPairRDD = javaRDD.mapToPair(new ConvertStringToLongTextPair());
MatrixCharacteristics mc = (matrixMetadata != null) ? matrixMetadata.asMatrixCharacteristics() : new MatrixCharacteristics();
MatrixObject matrixObject = new MatrixObject(ValueType.DOUBLE, OptimizerUtils.getUniqueTempFileName(), new MetaDataFormat(mc, OutputInfo.CSVOutputInfo, InputInfo.CSVInputInfo));
JavaPairRDD<LongWritable, Text> javaPairRDD2 = javaPairRDD.mapToPair(new CopyTextInputFunction());
matrixObject.setRDDHandle(new RDDObject(javaPairRDD2));
return matrixObject;
}
Aggregations