use of org.apache.flume.Context in project phoenix by apache.
the class PhoenixSink method initializeSerializer.
/**
* Initializes the serializer for flume events.
* @param eventSerializerType
*/
private void initializeSerializer(final Context context, final String eventSerializerType) {
String serializerClazz = null;
EventSerializers eventSerializer = null;
try {
eventSerializer = EventSerializers.valueOf(eventSerializerType.toUpperCase());
} catch (IllegalArgumentException iae) {
serializerClazz = eventSerializerType;
}
final Context serializerContext = new Context();
serializerContext.putAll(context.getSubProperties(FlumeConstants.CONFIG_SERIALIZER_PREFIX));
copyPropertiesToSerializerContext(context, serializerContext);
try {
@SuppressWarnings("unchecked") Class<? extends EventSerializer> clazz = null;
if (serializerClazz == null) {
clazz = (Class<? extends EventSerializer>) Class.forName(eventSerializer.getClassName());
} else {
clazz = (Class<? extends EventSerializer>) Class.forName(serializerClazz);
}
serializer = clazz.newInstance();
serializer.configure(serializerContext);
} catch (Exception e) {
logger.error("Could not instantiate event serializer.", e);
Throwables.propagate(e);
}
}
use of org.apache.flume.Context in project phoenix by apache.
the class JsonEventSerializerIT method testEventsWithHeaders.
@Test
public void testEventsWithHeaders() throws Exception {
sinkContext = new Context();
final String fullTableName = "FLUME_JSON_TEST";
final String ddl = "CREATE TABLE IF NOT EXISTS " + fullTableName + " (rowkey VARCHAR not null, col1 varchar , col2 double, col3 varchar[], col4 integer[], host varchar , source varchar \n" + " CONSTRAINT pk PRIMARY KEY (rowkey))\n";
String columns = "col1,col2,col3,col4";
String columnsMapping = "{\"col1\":\"col1\",\"col2\":\"col2\",\"col3\":\"col3\",\"col4\":\"col4\"}";
String rowkeyType = DefaultKeyGenerator.UUID.name();
String headers = "host,source";
initSinkContext(fullTableName, ddl, columns, columnsMapping, rowkeyType, headers);
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
int numEvents = 10;
String col1 = "val1";
String a1 = "[aaa,bbb,ccc]";
String a2 = "[1,2,3,4]";
String hostHeader = "host1";
String sourceHeader = "source1";
String eventBody = null;
List<Event> eventList = Lists.newArrayListWithCapacity(numEvents);
for (int i = 0; i < numEvents; i++) {
eventBody = "{\"col1\" : \"" + (col1 + i) + "\", \"col2\" : " + i * 10.5 + " , \"col3\" : " + a1 + " , \"col4\" : " + a2 + "}";
Map<String, String> headerMap = Maps.newHashMapWithExpectedSize(2);
headerMap.put("host", hostHeader);
headerMap.put("source", sourceHeader);
Event event = EventBuilder.withBody(Bytes.toBytes(eventBody), headerMap);
eventList.add(event);
}
// put event in channel
Transaction transaction = channel.getTransaction();
transaction.begin();
for (Event event : eventList) {
channel.put(event);
}
transaction.commit();
transaction.close();
sink.process();
final String query = " SELECT * FROM \n " + fullTableName;
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
final ResultSet rs;
final Connection conn = DriverManager.getConnection(getUrl(), props);
try {
rs = conn.createStatement().executeQuery(query);
assertTrue(rs.next());
assertEquals("host1", rs.getString("host"));
assertEquals("source1", rs.getString("source"));
assertTrue(rs.next());
assertEquals("host1", rs.getString("host"));
assertEquals("source1", rs.getString("source"));
} finally {
if (conn != null) {
conn.close();
}
}
sink.stop();
assertEquals(LifecycleState.STOP, sink.getLifecycleState());
dropTable(fullTableName);
}
use of org.apache.flume.Context in project phoenix by apache.
the class JsonEventSerializerIT method initSinkContext.
private void initSinkContext(final String fullTableName, final String ddl, final String columns, final String columnsMapping, final String rowkeyType, final String headers) {
Preconditions.checkNotNull(fullTableName);
sinkContext = new Context();
sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName);
sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER, EventSerializers.JSON.name());
sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES, columns);
if (null != columnsMapping)
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMNS_MAPPING, columnsMapping);
if (null != rowkeyType)
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR, rowkeyType);
if (null != headers)
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_HEADER_NAMES, headers);
}
use of org.apache.flume.Context in project phoenix by apache.
the class PhoenixSinkIT method testCreateTable.
@Test
public void testCreateTable() throws Exception {
String tableName = generateUniqueName();
String ddl = "CREATE TABLE " + tableName + " " + " (flume_time timestamp not null, col1 varchar , col2 varchar" + " CONSTRAINT pk PRIMARY KEY (flume_time))\n";
final String fullTableName = tableName;
sinkContext = new Context();
sinkContext.put(FlumeConstants.CONFIG_TABLE, fullTableName);
sinkContext.put(FlumeConstants.CONFIG_JDBC_URL, getUrl());
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER, EventSerializers.REGEX.name());
sinkContext.put(FlumeConstants.CONFIG_TABLE_DDL, ddl);
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_REGULAR_EXPRESSION, "^([^\t]+)\t([^\t]+)$");
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_COLUMN_NAMES, "col1,col2");
sinkContext.put(FlumeConstants.CONFIG_SERIALIZER_PREFIX + FlumeConstants.CONFIG_ROWKEY_TYPE_GENERATOR, DefaultKeyGenerator.TIMESTAMP.name());
sink = new PhoenixSink();
Configurables.configure(sink, sinkContext);
Assert.assertEquals(LifecycleState.IDLE, sink.getLifecycleState());
final Channel channel = this.initChannel();
sink.setChannel(channel);
sink.start();
HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
try {
boolean exists = admin.tableExists(fullTableName);
Assert.assertTrue(exists);
} finally {
admin.close();
}
}
use of org.apache.flume.Context in project phoenix by apache.
the class PhoenixSinkIT method initChannel.
private Channel initChannel() {
//Channel configuration
Context channelContext = new Context();
channelContext.put("capacity", "10000");
channelContext.put("transactionCapacity", "200");
Channel channel = new MemoryChannel();
channel.setName("memorychannel");
Configurables.configure(channel, channelContext);
return channel;
}
Aggregations