use of org.apache.phoenix.flume.sink.PhoenixSink in project phoenix by apache.
the class JsonEventSerializerIT method testBatchEvents.
@Test
public void testBatchEvents() throws EventDeliveryException, SQLException {
final String fullTableName = "FLUME_JSON_TEST";
initSinkContextWithDefaults(fullTableName);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
int numEvents = 150;
String col1 = "val1";
String a1 = "[aaa,bbb,ccc]";
String a2 = "[1,2,3,4]";
String eventBody = null;
List<Event> eventList = Lists.newArrayListWithCapacity(numEvents);
for (int i = 0; i < eventList.size(); i++) {
eventBody = "{\"col1\" : \"" + (col1 + i) + "\", \"col2\" : " + i * 10.5 + " , \"col3\" : " + a1 + " , \"col4\" : " + a2 + "}";
Event event = EventBuilder.withBody(Bytes.toBytes(eventBody));
eventList.add(event);
}
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
for (Event event : eventList) {
channel.put(event);
}
transaction.commit();
transaction.close();
sink.process();
int rowsInDb = countRows(fullTableName);
assertEquals(eventList.size(), rowsInDb);
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
use of org.apache.phoenix.flume.sink.PhoenixSink in project phoenix by apache.
the class JsonEventSerializerIT method testWithOutColumnsMapping.
@Test
public void testWithOutColumnsMapping() throws EventDeliveryException, SQLException {
final String fullTableName = "FLUME_JSON_TEST";
String ddl = "CREATE TABLE IF NOT EXISTS " + fullTableName + " (flume_time timestamp not null, col1 varchar , col2 double, col3 varchar[], col4 integer[]" + " CONSTRAINT pk PRIMARY KEY (flume_time))\n";
String columns = "col1,col2,col3,col4";
String rowkeyType = DefaultKeyGenerator.TIMESTAMP.name();
initSinkContext(fullTableName, ddl, columns, null, rowkeyType, null);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
final String eventBody = "{\"col1\" : \"kalyan\", \"col2\" : 10.5, \"col3\" : [\"abc\",\"pqr\",\"xyz\"], \"col4\" : [1,2,3,4]}";
final Event event = EventBuilder.withBody(Bytes.toBytes(eventBody));
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
channel.put(event);
transaction.commit();
transaction.close();
sink.process();
int rowsInDb = countRows(fullTableName);
assertEquals(1, rowsInDb);
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
use of org.apache.phoenix.flume.sink.PhoenixSink in project phoenix by apache.
the class JsonEventSerializerIT method testInnerColumns.
@Test
public void testInnerColumns() throws EventDeliveryException, SQLException {
final String fullTableName = "FLUME_JSON_TEST";
String ddl = "CREATE TABLE IF NOT EXISTS " + fullTableName + " (flume_time timestamp not null, col1 varchar , col2 double, col3 varchar[], col4 integer[]" + " CONSTRAINT pk PRIMARY KEY (flume_time))\n";
String columns = "col1,col2,col3,col4";
String rowkeyType = DefaultKeyGenerator.TIMESTAMP.name();
String columnsMapping = "{\"col1\":\"col1\",\"col2\":\"x.y\",\"col3\":\"a.b1.c\",\"col4\":\"col4\"}";
initSinkContext(fullTableName, ddl, columns, columnsMapping, rowkeyType, null);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
final String eventBody = "{\"col1\" : \"kalyan\", \"x\" : {\"y\" : 10.5}, \"a\" : {\"b1\" : {\"c\" : [\"abc\",\"pqr\",\"xyz\"] }, \"b2\" : 111}, \"col4\" : [1,2,3,4]}";
final Event event = EventBuilder.withBody(Bytes.toBytes(eventBody));
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
channel.put(event);
transaction.commit();
transaction.close();
sink.process();
int rowsInDb = countRows(fullTableName);
assertEquals(1, rowsInDb);
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
use of org.apache.phoenix.flume.sink.PhoenixSink in project phoenix by apache.
the class JsonEventSerializerIT method testInnerColumnsWithArrayMapping.
@Test
public void testInnerColumnsWithArrayMapping() throws EventDeliveryException, SQLException {
final String fullTableName = "FLUME_JSON_TEST";
String ddl = "CREATE TABLE IF NOT EXISTS " + fullTableName + " (flume_time timestamp not null, col1 varchar , col2 double, col3 varchar[], col4 integer[]" + " CONSTRAINT pk PRIMARY KEY (flume_time))\n";
String columns = "col1,col2,col3,col4";
String rowkeyType = DefaultKeyGenerator.TIMESTAMP.name();
String columnsMapping = "{\"col1\":\"col1\",\"col2\":\"x.y\",\"col3\":\"a.b[*].c\",\"col4\":\"col4\"}";
initSinkContext(fullTableName, ddl, columns, columnsMapping, rowkeyType, null);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
final String eventBody = "{\"col1\" : \"kalyan\", \"x\" : {\"y\" : 10.5}, \"a\" : {\"b\" : [{\"c\" : \"abc\"}, {\"c\" : \"pqr\"}, {\"c\" : \"xyz\"}] , \"b2\" : 111}, \"col4\" : [1,2,3,4]}";
final Event event = EventBuilder.withBody(Bytes.toBytes(eventBody));
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
channel.put(event);
transaction.commit();
transaction.close();
sink.process();
int rowsInDb = countRows(fullTableName);
assertEquals(1, rowsInDb);
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
use of org.apache.phoenix.flume.sink.PhoenixSink in project phoenix by apache.
the class CsvEventSerializerIT method testEventsWithHeaders.
@Test
public void testEventsWithHeaders() throws Exception {
sinkContext = new Context();
final String fullTableName = "FLUME_CSV_TEST";
final String ddl = "CREATE TABLE IF NOT EXISTS " + fullTableName + " (rowkey VARCHAR not null, col1 varchar , col2 double, col3 varchar[], col4 integer[], host varchar , source varchar \n" + " CONSTRAINT pk PRIMARY KEY (rowkey))\n";
String columns = "col1,col2,col3,col4";
String rowkeyType = DefaultKeyGenerator.UUID.name();
String headers = "host,source";
initSinkContext(fullTableName, ddl, columns, null, null, null, null, rowkeyType, headers);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
int numEvents = 10;
String col1 = "val1";
String a1 = "\"aaa,bbb,ccc\"";
String a2 = "\"1,2,3,4\"";
String hostHeader = "host1";
String sourceHeader = "source1";
String eventBody = null;
List<Event> eventList = Lists.newArrayListWithCapacity(numEvents);
for (int i = 0; i < numEvents; i++) {
eventBody = (col1 + i) + "," + i * 10.5 + "," + a1 + "," + a2;
Map<String, String> headerMap = Maps.newHashMapWithExpectedSize(2);
headerMap.put("host", hostHeader);
headerMap.put("source", sourceHeader);
Event event = EventBuilder.withBody(Bytes.toBytes(eventBody), headerMap);
eventList.add(event);
}
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
for (Event event : eventList) {
channel.put(event);
}
transaction.commit();
transaction.close();
sink.process();
final String query = " SELECT * FROM \n " + fullTableName;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
final ResultSet rs;
final Connection conn = DriverManager.getConnection(getUrl(), props);
try {
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("host1", rs.getString("host"));
assertEquals("source1", rs.getString("source"));
assertTrue(rs.next());
assertEquals("host1", rs.getString("host"));
assertEquals("source1", rs.getString("source"));
} finally {
if (conn != null) {
conn.close();
}
}
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
Aggregations