use of org.apache.hadoop.io.BooleanWritable in project goldenorb by jzachr.
the class SampleBooleanMessageTest method startServer.
/**
*
*/
@SuppressWarnings("unchecked")
@Before
public void startServer() throws IOException {
server = new RPCServer<BooleanMessage, BooleanWritable>(SERVER_PORT);
server.start();
Configuration conf = new Configuration();
InetSocketAddress addr = new InetSocketAddress("localhost", SERVER_PORT);
if (client == null)
client = (RPCProtocol<BooleanMessage, BooleanWritable>) RPC.waitForProxy(RPCProtocol.class, RPCProtocol.versionID, addr, conf);
}
use of org.apache.hadoop.io.BooleanWritable in project camel by apache.
the class HdfsConsumerTest method testReadWithReadSuffix.
@Test
public void testReadWithReadSuffix() throws Exception {
if (!canTest()) {
return;
}
String[] beforeFiles = new File("target/test").list();
int before = beforeFiles != null ? beforeFiles.length : 0;
final Path file = new Path(new File("target/test/test-camel-boolean").getAbsolutePath());
Configuration conf = new Configuration();
FileSystem fs1 = FileSystem.get(file.toUri(), conf);
SequenceFile.Writer writer = createWriter(fs1, conf, file, NullWritable.class, BooleanWritable.class);
NullWritable keyWritable = NullWritable.get();
BooleanWritable valueWritable = new BooleanWritable();
valueWritable.set(true);
writer.append(keyWritable, valueWritable);
writer.sync();
writer.close();
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs:localhost/" + file.getParent().toUri() + "?scheduler=#myScheduler&pattern=*&fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0&readSuffix=handled").to("mock:result");
}
});
ScheduledExecutorService pool = context.getExecutorServiceManager().newScheduledThreadPool(null, "unitTestPool", 1);
DefaultScheduledPollConsumerScheduler scheduler = new DefaultScheduledPollConsumerScheduler(pool);
((JndiRegistry) ((PropertyPlaceholderDelegateRegistry) context.getRegistry()).getRegistry()).bind("myScheduler", scheduler);
context.start();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(1);
resultEndpoint.assertIsSatisfied();
// synchronize on pool that was used to run hdfs consumer thread
scheduler.getScheduledExecutorService().shutdown();
scheduler.getScheduledExecutorService().awaitTermination(5000, TimeUnit.MILLISECONDS);
Set<String> files = new HashSet<String>(Arrays.asList(new File("target/test").list()));
// there may be some leftover files before, so test that we only added 2 new files
assertThat(files.size() - before, equalTo(2));
assertTrue(files.remove("test-camel-boolean.handled"));
assertTrue(files.remove(".test-camel-boolean.handled.crc"));
}
use of org.apache.hadoop.io.BooleanWritable in project camel by apache.
the class HdfsConsumerTest method testReadWithReadSuffix.
@Test
public void testReadWithReadSuffix() throws Exception {
if (!canTest()) {
return;
}
String[] beforeFiles = new File("target/test").list();
int before = beforeFiles != null ? beforeFiles.length : 0;
final Path file = new Path(new File("target/test/test-camel-boolean").getAbsolutePath());
Configuration conf = new Configuration();
SequenceFile.Writer writer = createWriter(conf, file, NullWritable.class, BooleanWritable.class);
NullWritable keyWritable = NullWritable.get();
BooleanWritable valueWritable = new BooleanWritable();
valueWritable.set(true);
writer.append(keyWritable, valueWritable);
writer.sync();
writer.close();
context.addRoutes(new RouteBuilder() {
public void configure() {
from("hdfs2:localhost/" + file.getParent().toUri() + "?scheduler=#myScheduler&pattern=*&fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0&readSuffix=handled").to("mock:result");
}
});
ScheduledExecutorService pool = context.getExecutorServiceManager().newScheduledThreadPool(null, "unitTestPool", 1);
DefaultScheduledPollConsumerScheduler scheduler = new DefaultScheduledPollConsumerScheduler(pool);
((JndiRegistry) ((PropertyPlaceholderDelegateRegistry) context.getRegistry()).getRegistry()).bind("myScheduler", scheduler);
context.start();
MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
resultEndpoint.expectedMessageCount(1);
resultEndpoint.assertIsSatisfied();
// synchronize on pool that was used to run hdfs consumer thread
scheduler.getScheduledExecutorService().shutdown();
scheduler.getScheduledExecutorService().awaitTermination(5000, TimeUnit.MILLISECONDS);
Set<String> files = new HashSet<String>(Arrays.asList(new File("target/test").list()));
// there may be some leftover files before, so test that we only added 2 new files
assertThat(files.size() - before, equalTo(2));
assertTrue(files.remove("test-camel-boolean.handled"));
assertTrue(files.remove(".test-camel-boolean.handled.crc"));
}
use of org.apache.hadoop.io.BooleanWritable in project camel by apache.
the class HdfsProducerTest method testWriteBoolean.
@Test
public void testWriteBoolean() throws Exception {
if (!canTest()) {
return;
}
Boolean aBoolean = true;
template.sendBody("direct:write_boolean", aBoolean);
Configuration conf = new Configuration();
Path file1 = new Path("file:///" + TEMP_DIR.toUri() + "/test-camel-boolean");
FileSystem fs1 = FileSystem.get(file1.toUri(), conf);
SequenceFile.Reader reader = new SequenceFile.Reader(fs1, file1, conf);
Writable key = (Writable) ReflectionUtils.newInstance(reader.getKeyClass(), conf);
Writable value = (Writable) ReflectionUtils.newInstance(reader.getValueClass(), conf);
reader.next(key, value);
Boolean rBoolean = ((BooleanWritable) value).get();
assertEquals(rBoolean, aBoolean);
IOHelper.close(reader);
}
use of org.apache.hadoop.io.BooleanWritable in project hadoop by apache.
the class TestPipeApplication method testPipesReduser.
/**
* test org.apache.hadoop.mapred.pipes.PipesReducer
* test the transfer of data: key and value
*
* @throws Exception
*/
@Test
public void testPipesReduser() throws Exception {
File[] psw = cleanTokenPasswordFile();
JobConf conf = new JobConf();
try {
Token<AMRMTokenIdentifier> token = new Token<AMRMTokenIdentifier>("user".getBytes(), "password".getBytes(), new Text("kind"), new Text("service"));
TokenCache.setJobToken(token, conf.getCredentials());
File fCommand = getFileCommand("org.apache.hadoop.mapred.pipes.PipeReducerStub");
conf.set(MRJobConfig.CACHE_LOCALFILES, fCommand.getAbsolutePath());
PipesReducer<BooleanWritable, Text, IntWritable, Text> reducer = new PipesReducer<BooleanWritable, Text, IntWritable, Text>();
reducer.configure(conf);
BooleanWritable bw = new BooleanWritable(true);
conf.set(MRJobConfig.TASK_ATTEMPT_ID, taskName);
initStdOut(conf);
conf.setBoolean(MRJobConfig.SKIP_RECORDS, true);
CombineOutputCollector<IntWritable, Text> output = new CombineOutputCollector<IntWritable, Text>(new Counters.Counter(), new Progress());
Reporter reporter = new TestTaskReporter();
List<Text> texts = new ArrayList<Text>();
texts.add(new Text("first"));
texts.add(new Text("second"));
texts.add(new Text("third"));
reducer.reduce(bw, texts.iterator(), output, reporter);
reducer.close();
String stdOut = readStdOut(conf);
// test data: key
assertTrue(stdOut.contains("reducer key :true"));
// and values
assertTrue(stdOut.contains("reduce value :first"));
assertTrue(stdOut.contains("reduce value :second"));
assertTrue(stdOut.contains("reduce value :third"));
} finally {
if (psw != null) {
// remove password files
for (File file : psw) {
file.deleteOnExit();
}
}
}
}
Aggregations