use of com.linkedin.databus.client.pub.ClusterCheckpointPersistenceProvider.ClusterCheckpointException in project databus by linkedin.
the class DatabusV2ClusterRegistrationImpl method activateOnePartition.
/**
* Callback to activate one partition that was added
* @param partition
* @throws DatabusException
*/
private synchronized void activateOnePartition(DbusPartitionInfo partition) throws DatabusClientException {
_log.info("Trying to activate partition :" + partition);
try {
if (regMap.containsKey(partition)) {
_log.info("Partition (" + partition + ") is already added and is currently in state : " + regMap.get(partition).getState() + " skipping !!");
return;
}
// Call factories to get consumer callbacks and server-side filter config.
Collection<DatabusCombinedConsumer> consumers = _consumerFactory.createPartitionedConsumers(_clusterInfo, partition);
DbusKeyCompositeFilterConfig filterConfig = null;
if (_serverSideFilterFactory != null)
filterConfig = _serverSideFilterFactory.createServerSideFilter(_clusterInfo, partition);
if ((null == consumers) || (consumers.isEmpty())) {
_log.error("ConsumerFactory for cluster (" + _clusterInfo + ") returned null or empty consumers ");
throw new DatabusClientException("ConsumerFactory for cluster (" + _clusterInfo + ") returned null or empty consumers");
}
// Create Registration
RegistrationId id = new RegistrationId(_id + "-" + partition.getPartitionId());
CheckpointPersistenceProvider ckptProvider = createCheckpointPersistenceProvider(partition);
DatabusV2RegistrationImpl reg = createChildRegistration(id, _client, ckptProvider);
reg.addDatabusConsumers(consumers);
String[] srcs = new String[_sources.size()];
srcs = _sources.toArray(srcs);
reg.addSubscriptions(srcs);
regMap.put(partition, reg);
reg.onRegister();
// Add Server-Side Filter
if (null != filterConfig)
reg.withServerSideFilter(filterConfig);
// Notify Listener
if (null != _partitionListener)
_partitionListener.onAddPartition(partition, reg);
// Start the registration
try {
reg.start();
} catch (DatabusClientException e) {
_log.error("Got exception while starting the registration for partition (" + partition + ")", e);
throw e;
}
//Add partition Specific metrics to cluster-merge
_relayEventStatsMerger.addStatsCollector(id.getId(), (DbusEventsStatisticsCollector) reg.getRelayEventStats());
_bootstrapEventStatsMerger.addStatsCollector(id.getId(), (DbusEventsStatisticsCollector) reg.getBootstrapEventStats());
_relayCallbackStatsMerger.addStatsCollector(id.getId(), (ConsumerCallbackStats) reg.getRelayCallbackStats());
_bootstrapCallbackStatsMerger.addStatsCollector(id.getId(), (ConsumerCallbackStats) reg.getBootstrapCallbackStats());
_log.info("Partition (" + partition + ") started !!");
} catch (DatabusException e) {
_log.error("Got exception while activating partition(" + partition + ")", e);
throw new DatabusClientException(e);
} catch (ClusterCheckpointException e) {
_log.error("Got exception while activating partition(" + partition + ")", e);
throw new DatabusClientException(e);
}
}
use of com.linkedin.databus.client.pub.ClusterCheckpointPersistenceProvider.ClusterCheckpointException in project databus by linkedin.
the class TestClusterCheckpointPersistenceProvider method testClusterCheckpointPersistence.
@Test
public void testClusterCheckpointPersistence() {
Checkpoint cp = new Checkpoint();
cp.setWindowScn(50532L);
cp.setWindowOffset(-1);
cp.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
String id = "4";
String clusterName = "test-cluster-persistence";
ClusterCheckpointPersistenceProvider.createCluster(zkAddr, clusterName);
ClusterCheckpointPersistenceProvider.Config conf = new ClusterCheckpointPersistenceProvider.Config();
conf.setClusterName(clusterName);
conf.setZkAddr(zkAddr);
ArrayList<String> sources = new ArrayList<String>(3);
sources.add("source1");
sources.add("source2");
sources.add("source3");
try {
ClusterCheckpointPersistenceProvider ccp = new ClusterCheckpointPersistenceProvider(id, conf);
ccp.storeCheckpoint(sources, cp);
Checkpoint newCp = ccp.loadCheckpoint(sources);
Assert.assertTrue(newCp != null);
Assert.assertTrue(newCp.getWindowOffset() == cp.getWindowOffset());
Assert.assertTrue(newCp.getWindowScn() == cp.getWindowScn());
Assert.assertTrue(newCp.getConsumptionMode() == cp.getConsumptionMode());
} catch (InvalidConfigException e) {
System.err.println("Invalid config: " + e);
Assert.assertTrue(false);
} catch (IOException e) {
System.err.println("Error storing checkpoint: " + e);
Assert.assertTrue(false);
} catch (ClusterCheckpointException e) {
Assert.assertTrue(false);
} finally {
ClusterCheckpointPersistenceProvider.close(clusterName);
}
}
Aggregations