use of org.apache.storm.tuple.Tuple in project storm by apache.
the class EsPercolateBoltTest method testEsPercolateBolt.
@Test
public void testEsPercolateBolt() throws Exception {
String source = "{\"user\":\"user1\"}";
String index = "index1";
String type = ".percolator";
node.client().prepareIndex("index1", ".percolator").setId("1").setSource("{\"query\":{\"match\":{\"user\":\"user1\"}}}").execute().actionGet();
Tuple tuple = EsTestUtil.generateTestTuple(source, index, type, null);
bolt.execute(tuple);
verify(outputCollector).ack(tuple);
verify(outputCollector).emit(new Values(source, any(PercolateResponse.Match.class)));
}
use of org.apache.storm.tuple.Tuple in project storm by apache.
the class TestHiveBolt method testWithoutPartitions.
@Test
public void testWithoutPartitions() throws Exception {
HiveSetupUtil.dropDB(conf, dbName1);
HiveSetupUtil.createDbAndTable(conf, dbName1, tblName1, null, colNames, colTypes, null, dbLocation);
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName1, tblName1, mapper).withTxnsPerBatch(2).withBatchSize(2).withAutoCreatePartitions(false);
bolt = new HiveBolt(hiveOptions);
bolt.prepare(config, null, collector);
Integer id = 100;
String msg = "test-123";
String city = "sunnyvale";
String state = "ca";
checkRecordCountInTable(tblName1, dbName1, 0);
Set<Tuple> tupleSet = new HashSet<Tuple>();
for (int i = 0; i < 4; i++) {
Tuple tuple = generateTestTuple(id, msg, city, state);
bolt.execute(tuple);
tupleSet.add(tuple);
}
for (Tuple t : tupleSet) verify(collector).ack(t);
bolt.cleanup();
checkRecordCountInTable(tblName1, dbName1, 4);
}
use of org.apache.storm.tuple.Tuple in project storm by apache.
the class TestHiveBolt method testNoTickEmptyBatches.
@Test
public void testNoTickEmptyBatches() throws Exception {
JsonRecordHiveMapper mapper = new JsonRecordHiveMapper().withColumnFields(new Fields(colNames1)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(2).withBatchSize(2);
bolt = new HiveBolt(hiveOptions);
bolt.prepare(config, null, new OutputCollector(collector));
//The tick should NOT cause any acks since the batch was empty except for acking itself
Tuple mockTick = MockTupleHelpers.mockTickTuple();
bolt.execute(mockTick);
verifyZeroInteractions(collector);
bolt.cleanup();
}
use of org.apache.storm.tuple.Tuple in project storm by apache.
the class TestHiveBolt method testNoAcksUntilFlushed.
@Test
public void testNoAcksUntilFlushed() {
JsonRecordHiveMapper mapper = new JsonRecordHiveMapper().withColumnFields(new Fields(colNames1)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(2).withBatchSize(2);
bolt = new HiveBolt(hiveOptions);
bolt.prepare(config, null, new OutputCollector(collector));
Tuple tuple1 = generateTestTuple(1, "SJC", "Sunnyvale", "CA");
Tuple tuple2 = generateTestTuple(2, "SFO", "San Jose", "CA");
bolt.execute(tuple1);
verifyZeroInteractions(collector);
bolt.execute(tuple2);
verify(collector).ack(tuple1);
verify(collector).ack(tuple2);
bolt.cleanup();
}
use of org.apache.storm.tuple.Tuple in project storm by apache.
the class TestHiveBolt method testWithByteArrayIdandMessage.
@Test
public void testWithByteArrayIdandMessage() throws Exception {
DelimitedRecordHiveMapper mapper = new DelimitedRecordHiveMapper().withColumnFields(new Fields(colNames)).withPartitionFields(new Fields(partNames));
HiveOptions hiveOptions = new HiveOptions(metaStoreURI, dbName, tblName, mapper).withTxnsPerBatch(2).withBatchSize(2);
bolt = new HiveBolt(hiveOptions);
bolt.prepare(config, null, collector);
Integer id = 100;
String msg = "test-123";
String city = "sunnyvale";
String state = "ca";
checkRecordCountInTable(tblName, dbName, 0);
Set<Tuple> tupleSet = new HashSet<Tuple>();
for (int i = 0; i < 4; i++) {
Tuple tuple = generateTestTuple(id, msg, city, state);
bolt.execute(tuple);
tupleSet.add(tuple);
}
for (Tuple t : tupleSet) verify(collector).ack(t);
checkRecordCountInTable(tblName, dbName, 4);
bolt.cleanup();
}
Aggregations