use of org.apache.hadoop.fs.FSDataOutputStream in project camel by apache.
the class HdfsConsumerTest method testSimpleConsumerFileWithSizeEqualToNChunks.
@Test
public void testSimpleConsumerFileWithSizeEqualToNChunks() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-normal-file").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(file.toUri(), conf);
FSDataOutputStream out = fs.create(file);
// size = 5 times chunk size = 210 bytes
for (int i = 0; i < 42; ++i) {
out.write(new byte[] { 0x61, 0x62, 0x63, 0x64, 0x65 });
out.flush();
}
out.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(5);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs:localhost/" + file.toUri() + "?fileSystemType=LOCAL&chunkSize=42&initialDelay=0").to("mock:result");
}
});
context.start();
resultEndpoint.assertIsSatisfied();
assertThat(resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length, equalTo(42));
}
use of org.apache.hadoop.fs.FSDataOutputStream in project camel by apache.
the class HdfsProducerConsumerIntegrationTest method testMultipleConsumers.
@Test
public // see https://issues.apache.org/jira/browse/CAMEL-7318
void testMultipleConsumers() throws Exception {
Path p = new Path("hdfs://localhost:9000/tmp/test/multiple-consumers");
FileSystem fs = FileSystem.get(p.toUri(), new Configuration());
fs.mkdirs(p);
for (int i = 1; i <= ITERATIONS; i++) {
FSDataOutputStream os = fs.create(new Path(p, String.format("file-%03d.txt", i)));
os.write(String.format("hello (%03d)\n", i).getBytes());
os.close();
}
final Set<String> fileNames = new HashSet<String>();
final CountDownLatch latch = new CountDownLatch(ITERATIONS);
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.whenAnyExchangeReceived(new Processor() {
@Override
public void process(Exchange exchange) throws Exception {
fileNames.add(exchange.getIn().getHeader(Exchange.FILE_NAME, String.class));
latch.countDown();
}
});
context.addRoutes(new RouteBuilder() {
@Override
public void configure() {
// difference in chunkSize only to allow multiple consumers
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=128").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=256").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=512").to("mock:result");
from("hdfs2://localhost:9000/tmp/test/multiple-consumers?pattern=*.txt&fileSystemType=HDFS&chunkSize=1024").to("mock:result");
}
});
context.start();
resultEndpoint.expectedMessageCount(ITERATIONS);
latch.await(30, TimeUnit.SECONDS);
resultEndpoint.assertIsSatisfied();
assertThat(fileNames.size(), equalTo(ITERATIONS));
}
use of org.apache.hadoop.fs.FSDataOutputStream in project camel by apache.
the class HdfsConsumerTest method testSimpleConsumerWithEmptyFile.
@Test
public void testSimpleConsumerWithEmptyFile() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-normal-file").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(file.toUri(), conf);
FSDataOutputStream out = fs.create(file);
out.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
// TODO: See comment from Claus at ticket: https://issues.apache.org/jira/browse/CAMEL-8434
resultEndpoint.expectedMinimumMessageCount(1);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.toUri() + "?fileSystemType=LOCAL&chunkSize=4096&initialDelay=0").to("mock:result");
}
});
context.start();
Thread.sleep(2000);
resultEndpoint.assertIsSatisfied();
assertThat(resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length, equalTo(0));
}
use of org.apache.hadoop.fs.FSDataOutputStream in project camel by apache.
the class HdfsConsumerTest method testSimpleConsumerFileWithSizeEqualToNChunks.
@Test
public void testSimpleConsumerFileWithSizeEqualToNChunks() throws Exception {
if (!canTest()) {
return;
}
final Path file = new Path(new File("target/test/test-camel-normal-file").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(file.toUri(), conf);
FSDataOutputStream out = fs.create(file);
// size = 5 times chunk size = 210 bytes
for (int i = 0; i < 42; ++i) {
out.write(new byte[] { 0x61, 0x62, 0x63, 0x64, 0x65 });
out.flush();
}
out.close();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(5);
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.toUri() + "?fileSystemType=LOCAL&chunkSize=42&initialDelay=0").to("mock:result");
}
});
context.start();
resultEndpoint.assertIsSatisfied();
assertThat(resultEndpoint.getReceivedExchanges().get(0).getIn().getBody(ByteArrayOutputStream.class).toByteArray().length, equalTo(42));
}
use of org.apache.hadoop.fs.FSDataOutputStream in project hadoop by apache.
the class ITUseMiniCluster method simpleReadAfterWrite.
public void simpleReadAfterWrite(final FileSystem fs) throws IOException {
LOG.info("Testing read-after-write with FS implementation: {}", fs);
final Path path = new Path(TEST_PATH, FILENAME);
if (!fs.mkdirs(path.getParent())) {
throw new IOException("Mkdirs failed to create " + TEST_PATH);
}
try (final FSDataOutputStream out = fs.create(path)) {
out.writeUTF(TEXT);
}
try (final FSDataInputStream in = fs.open(path)) {
final String result = in.readUTF();
Assert.assertEquals("Didn't read back text we wrote.", TEXT, result);
}
}
Aggregations