use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.
the class TestDbusEventStatsCollectorPartitioner method testStatsCollectorPartitionerMultipleDbs.
@Test
public void testStatsCollectorPartitionerMultipleDbs() {
DbusEventStatsCollectorsPartitioner collector = new DbusEventStatsCollectorsPartitioner(1, ":inbound", null);
TestStatsCollectorCallback callback = new TestStatsCollectorCallback();
collector.registerStatsCallback(callback);
// Add 2 collectors for DB1
PhysicalPartition p1 = new PhysicalPartition(1, "db1");
PhysicalPartition p2 = new PhysicalPartition(2, "db1");
DbusEventsStatisticsCollector c1 = new DbusEventsStatisticsCollector(1, "db1:1", true, false, null);
DbusEventsStatisticsCollector c2 = new DbusEventsStatisticsCollector(1, "db1:2", true, false, null);
collector.addStatsCollector(p1, c1);
collector.addStatsCollector(p2, c2);
// Add 2 collectors for DB2
PhysicalPartition p3 = new PhysicalPartition(1, "db2");
PhysicalPartition p4 = new PhysicalPartition(2, "db2");
DbusEventsStatisticsCollector c3 = new DbusEventsStatisticsCollector(1, "db2:1", true, false, null);
DbusEventsStatisticsCollector c4 = new DbusEventsStatisticsCollector(1, "db2:2", true, false, null);
collector.addStatsCollector(p3, c3);
collector.addStatsCollector(p4, c4);
// Add 2 collectors for DB3
PhysicalPartition p5 = new PhysicalPartition(3, "db3");
PhysicalPartition p6 = new PhysicalPartition(4, "db3");
DbusEventsStatisticsCollector c5 = new DbusEventsStatisticsCollector(1, "db3:3", true, false, null);
DbusEventsStatisticsCollector c6 = new DbusEventsStatisticsCollector(1, "db3:4", true, false, null);
collector.addStatsCollector(p5, c5);
collector.addStatsCollector(p6, c6);
StatsWriter w1 = new StatsWriter(c1);
StatsWriter w2 = new StatsWriter(c2);
StatsWriter w3 = new StatsWriter(c3);
StatsWriter w4 = new StatsWriter(c4);
StatsWriter w5 = new StatsWriter(c5);
StatsWriter w6 = new StatsWriter(c6);
w1.addEvents(2, 2, 100);
w2.addEvents(2, 2, 200);
w3.addEvents(3, 2, 300);
w4.addEvents(3, 2, 400);
w5.addEvents(4, 2, 500);
w6.addEvents(4, 2, 600);
// Verify DB1 collector
StatsCollectors<DbusEventsStatisticsCollector> col = collector.getDBStatsCollector("db1");
Assert.assertNotNull(col);
col.mergeStatsCollectors();
LOG.info("Merged Stats : " + col.getStatsCollector().getTotalStats());
LOG.info("C1 Stats : " + c1.getTotalStats());
LOG.info("C2 Stats : " + c2.getTotalStats());
DbusEventsTotalStats s = col.getStatsCollector().getTotalStats();
Assert.assertEquals("Total Events", 8, s.getNumDataEvents());
Assert.assertEquals("Sys Events", 4, s.getNumSysEvents());
Assert.assertEquals("Min Scn", 101, s.getMinScn());
Assert.assertEquals("Max Scn", 205, s.getMaxScn());
// Verify DB2 collector
col = collector.getDBStatsCollector("db2");
Assert.assertNotNull(col);
col.mergeStatsCollectors();
LOG.info("Merged Stats : " + col.getStatsCollector().getTotalStats());
LOG.info("C3 Stats : " + c3.getTotalStats());
LOG.info("C4 Stats : " + c4.getTotalStats());
s = col.getStatsCollector().getTotalStats();
Assert.assertEquals("Total Events", 12, s.getNumDataEvents());
Assert.assertEquals("Sys Events", 4, s.getNumSysEvents());
Assert.assertEquals("Min Scn", 301, s.getMinScn());
Assert.assertEquals("Max Scn", 407, s.getMaxScn());
// Verify DB3 collector
col = collector.getDBStatsCollector("db3");
Assert.assertNotNull(col);
col.mergeStatsCollectors();
LOG.info("Merged Stats : " + col.getStatsCollector().getTotalStats());
LOG.info("C3 Stats : " + c5.getTotalStats());
LOG.info("C4 Stats : " + c6.getTotalStats());
s = col.getStatsCollector().getTotalStats();
Assert.assertEquals("Total Events", 16, s.getNumDataEvents());
Assert.assertEquals("Sys Events", 4, s.getNumSysEvents());
Assert.assertEquals("Min Scn", 501, s.getMinScn());
Assert.assertEquals("Max Scn", 609, s.getMaxScn());
Assert.assertEquals("Num Stats Callback", 3, callback.getCollectorsAddedList().size());
collector.removeAllStatsCollector();
Assert.assertEquals("Num Stats Callback", 3, callback.getCollectorsRemovedList().size());
}
use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.
the class TestPhysicalSourceConfig method testPhysicalSourceConfigConstructor.
// test partial constructor
@Test
public void testPhysicalSourceConfigConstructor() {
Integer pPartitionId = 10;
String name = "dbName";
PhysicalPartition pPartition = new PhysicalPartition(pPartitionId, name);
PhysicalSource pSource = new PhysicalSource("uri");
PhysicalSourceConfig pConfig = new PhysicalSourceConfig(pPartition.getName(), pSource.getUri(), pPartition.getId());
int lSourceId = 10;
String lSourceName = "lName";
for (int i = 0; i < 10; i++) {
LogicalSourceConfig lSC = new LogicalSourceConfig();
lSourceId = lSourceId + i;
lSC.setId((short) lSourceId);
lSC.setName(lSourceName + lSourceId);
lSC.setPartition((short) 0);
lSC.setUri("lUri");
pConfig.addSource(lSC);
}
assertEquals(10, pConfig.getSources().size(), "number of logical source doesn't match");
}
use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.
the class TestPhysicalSourceConfig method testParsePhysicalPartitionString.
@Test
public void testParsePhysicalPartitionString() throws IOException {
String partString = "abc_123";
PhysicalPartition pPart = PhysicalPartition.parsePhysicalPartitionString(partString, "_");
assertEquals("abc", pPart.getName());
assertEquals(123, pPart.getId().intValue());
partString = "abc.123";
pPart = PhysicalPartition.parsePhysicalPartitionString(partString, "\\.");
assertEquals("abc", pPart.getName());
assertEquals(123, pPart.getId().intValue());
String[] partStrings = new String[] { "abc.123", "abc123", "123", "abc", "" };
for (String s : partStrings) {
try {
PhysicalPartition.parsePhysicalPartitionString(s, "_");
fail("should fail on invalid partition string");
} catch (IOException e) {
// expected
}
}
}
use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.
the class RelayCommandRequestProcessor method process.
@Override
public DatabusRequest process(DatabusRequest request) throws IOException, RequestProcessingException {
String command = request.getParams().getProperty(DatabusRequest.PATH_PARAM_NAME);
if (null == command) {
throw new InvalidRequestParamValueException(COMMAND_NAME, "command", "null");
}
String reply = "Command " + command + " completed ";
LOG.info("got relayCommand = " + command);
if (command.equals(SAVE_META_STATE_PARAM)) {
_relay.saveBufferMetaInfo(true);
} else if (command.equals(SHUTDOWN_RELAY_PARAM)) {
String msg = "received shutdown curl request from: " + request.getRemoteAddress() + ". Shutting down\n";
LOG.warn(msg);
request.getResponseContent().write(ByteBuffer.wrap(msg.getBytes("UTF-8")));
request.getResponseContent().close();
_relay.shutdown();
} else if (command.equals(VALIDATE_RELAY_BUFFER_PARAM)) {
_relay.validateRelayBuffers();
} else if (command.equals(DISCONNECT_CLIENTS)) {
Channel rspChannel = request.getResponseContent().getRawChannel();
_relay.disconnectDBusClients(rspChannel);
} else if (command.equals(RUN_GC_PARAM)) {
Runtime rt = Runtime.getRuntime();
long mem = rt.freeMemory();
LOG.info("mem before gc = " + rt.freeMemory() + " out of " + rt.totalMemory());
long time = System.currentTimeMillis();
System.gc();
time = System.currentTimeMillis() - time;
mem = rt.freeMemory() - mem;
reply = new String("GC run. Took " + time + " millsecs. Freed " + mem + " bytes out of " + rt.totalMemory());
} else if (command.startsWith(RESET_RELAY_BUFFER_PARAM)) {
// We expect the request to be of the format:
// resetRelayBuffer/<dbName>/<partitionId>?prevScn=<long>&binlogOffset=<long>
String[] resetCommands = command.split("/");
if (resetCommands.length != 3) {
throw new InvalidRequestParamValueException(COMMAND_NAME, "command", command);
}
String dbName = resetCommands[1];
String dbPart = resetCommands[2];
long prevScn = request.getRequiredLongParam(PREV_SCN_PARAM);
long binlogOffset = request.getOptionalLongParam(BINLOG_OFFSET_PARAM, 0L);
LOG.info("reset command = " + dbName + " part =" + dbPart);
try {
_relay.resetBuffer(new PhysicalPartition(Integer.parseInt(dbPart), dbName), prevScn, binlogOffset);
} catch (BufferNotFoundException e) {
reply = new String("command " + command + ":" + e.getMessage());
}
} else if (command.startsWith(GET_BINLOG_OFFSET_PARAM)) {
String[] getOfsArgs = command.split("/");
if (getOfsArgs.length != 2) {
throw new InvalidRequestParamValueException(GET_BINLOG_OFFSET_PARAM, "Server ID", "");
}
int serverId;
try {
serverId = Integer.parseInt(getOfsArgs[1]);
int[] offset = _relay.getBinlogOffset(serverId);
if (offset.length != 2) {
reply = "Error getting binlog offset";
} else {
reply = new String("RelayLastEvent(" + offset[0] + "," + offset[1] + ")");
}
} catch (NumberFormatException e) {
throw new InvalidRequestParamValueException(GET_BINLOG_OFFSET_PARAM, "Server ID", getOfsArgs[1]);
} catch (DatabusException e) {
reply = new String("command " + command + "failed with:" + e.getMessage());
}
} else if (command.startsWith(PRINT_RELAY_INFO_PARAM)) {
try {
Map<String, String> infoMap = _relay.printInfo();
reply = makeJsonResponse(infoMap, request);
} catch (Exception e) {
reply = new String("command " + command + " failed with:" + e.getMessage());
}
} else {
// invalid command
reply = new String("command " + command + " is invalid. Valid commands are: " + SAVE_META_STATE_PARAM + "|" + SHUTDOWN_RELAY_PARAM + "|" + VALIDATE_RELAY_BUFFER_PARAM + "|" + RUN_GC_PARAM + "|" + RESET_RELAY_BUFFER_PARAM + "|" + GET_BINLOG_OFFSET_PARAM + "|" + PRINT_RELAY_INFO_PARAM + "|" + DISCONNECT_CLIENTS);
}
byte[] responseBytes = new byte[(reply.length() + 2)];
System.arraycopy(reply.getBytes("UTF-8"), 0, responseBytes, 0, reply.length());
int idx = reply.length();
responseBytes[idx] = (byte) '\r';
responseBytes[idx + 1] = (byte) '\n';
request.getResponseContent().write(ByteBuffer.wrap(responseBytes));
return request;
}
use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.
the class HttpRelay method dropDatabase.
@Override
public void dropDatabase(String dbName) throws DatabusException {
_schemaRegistryService.dropDatabase(dbName);
DbusEventBufferMult eventMult = getEventBuffer();
/*
* Close the buffers
*/
for (DbusEventBuffer dBuf : eventMult.bufIterable()) {
PhysicalPartition pp = dBuf.getPhysicalPartition();
if (pp.getName().equals(dbName)) {
dBuf.closeBuffer(false);
dBuf.removeMMapFiles();
PhysicalPartitionKey pKey = new PhysicalPartitionKey(pp);
eventMult.removeBuffer(pKey, null);
}
}
eventMult.deallocateRemovedBuffers(true);
return;
}
Aggregations