use of org.apache.hadoop.conf.Configuration in project camel by apache.
the class HdfsConsumerTest method testReadLong.
@Test
public void testReadLong() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-long").getAbsolutePath());
Configuration conf = new Configuration();
SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, LongWritable.class);
NullWritable keyWritable = NullWritable.get();
LongWritable valueWritable = new LongWritable();
long value = 31415926535L;
valueWritable.set(value);
writer.append(keyWritable, valueWritable);
writer.sync();
writer.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(1);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0").to("mock:result");
}
});
context.start();
resultEndpoint.assertIsSatisfied();
}
use of org.apache.hadoop.conf.Configuration in project camel by apache.
the class HdfsProducerConsumerIntegrationTest method testMultipleConsumers.
@Test
public // see https://issues.apache.org/jira/browse/CAMEL-7318
void testMultipleConsumers() throws Exception {
Path p = new Path("hdfs://localhost:9000/tmp/test/multiple-consumers");
FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
fs.mkdirs(p);
for (int i = 1; i <= ITERATIONS; i++) {
FSDataOutputStream os = fs.create(new Path(p, String.format("file-%03d.txt", i)));
os.write(String.format("hello (%03d)\n", i).getBytes());
os.close();
}
final Set<String> fileNames = new HashSet<String>();
final CountDownLatch latch = new CountDownLatch(ITERATIONS);
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.whenAnyExchangeReceived(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
fileNames.add(exchange.getIn().getHeader(Exchange.FILE_NAME, String.class));
latch.countDown();
}
});
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
// difference in chunkSize only to allow multiple consumers
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=128").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=256").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=512").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=1024").to("mock:result");
}
});
context.start();
resultEndpoint.expectedMessageCount(ITERATIONS);
latch.await(30, TimeUnit.SECONDS);
resultEndpoint.assertIsSatisfied();
assertThat(fileNames.size(), equalTo(ITERATIONS));
}
use of org.apache.hadoop.conf.Configuration in project camel by apache.
the class HdfsProducerConsumerIntegrationTest method tearDown.
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
Thread.sleep(250);
Configuration conf = new Configuration();
Path dir = new Path("hdfs://localhost:9000/tmp/test");
FileSystem fs = FileSystem.get(dir.toUri(), conf);
fs.delete(dir, true);
fs.delete(new Path("hdfs://localhost:9000/tmp/test/multiple-consumers"), true);
}
use of org.apache.hadoop.conf.Configuration in project camel by apache.
the class HdfsConsumerTest method testSimpleConsumerWithEmptyFile.
@Test
public void testSimpleConsumerWithEmptyFile() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-normal-file").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(file.toUri(), conf);
FSDataOutputStream out = fs.create(file);
out.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
// TODO: See comment from Claus at ticket: https://issues.apache.org/jira/browse/CAMEL-8434
resultEndpoint.expectedMinimumMessageCount(1);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.toUri() + "?fileSystemType=LOCAL&chunkSize=4096&initialDelay=0").to("mock:result");
}
});
context.start();
Thread.sleep(2000);
resultEndpoint.assertIsSatisfied();
assertThat(resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length, equalTo(0));
}
use of org.apache.hadoop.conf.Configuration in project camel by apache.
the class HdfsConsumerTest method testReadStringArrayFile.
@Test
public void testReadStringArrayFile() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-string").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs1 = FileSystem.get(file.toUri(), conf);
ArrayFile.Writer writer = new ArrayFile.Writer(conf, fs1, "target/test/test-camel-string1", Text.class, CompressionType.NONE, new Progressable() {
@Override
public void progress() {
}
});
Text valueWritable = new Text();
String value = "CIAO!";
valueWritable.set(value);
writer.append(valueWritable);
writer.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(1);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.getParent().toUri() + "?fileSystemType=LOCAL&fileType=ARRAY_FILE&initialDelay=0").to("mock:result");
}
});
context.start();
resultEndpoint.assertIsSatisfied();
}
Aggregations