use of org.apache.helix.NotificationContext in project pinot by linkedin.
the class HybridClusterIntegrationTest method setUp.
@BeforeClass
public void setUp() throws Exception {
//Clean up
ensureDirectoryExistsAndIsEmpty(_tmpDir);
ensureDirectoryExistsAndIsEmpty(_segmentDir);
ensureDirectoryExistsAndIsEmpty(_tarDir);
tableName = TABLE_NAME;
// Start Zk, Kafka and Pinot
startHybridCluster(10);
// Unpack the Avro files
TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class.getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir);
_tmpDir.mkdirs();
final List<File> avroFiles = getAllAvroFiles();
File schemaFile = getSchemaFile();
schema = Schema.fromFile(schemaFile);
addSchema(schemaFile, schema.getSchemaName());
final List<String> invertedIndexColumns = makeInvertedIndexColumns();
final String sortedColumn = makeSortedColumn();
// Create Pinot table
addHybridTable(tableName, "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null, false);
LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns);
// Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);
// Load data into H2
ExecutorService executor = Executors.newCachedThreadPool();
setupH2AndInsertAvro(avroFiles, executor);
// Create segments from Avro data
LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, tableName, false, null);
// Initialize query generator
setupQueryGenerator(avroFiles, executor);
executor.shutdown();
executor.awaitTermination(10, TimeUnit.MINUTES);
// Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
final CountDownLatch latch = new CountDownLatch(1);
HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
manager.connect();
manager.addExternalViewChangeListener(new ExternalViewChangeListener() {
@Override
public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) {
for (ExternalView externalView : externalViewList) {
if (externalView.getId().contains(tableName)) {
Set<String> partitionSet = externalView.getPartitionSet();
if (partitionSet.size() == offlineSegmentCount) {
int onlinePartitionCount = 0;
for (String partitionId : partitionSet) {
Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
if (partitionStateMap.containsValue("ONLINE")) {
onlinePartitionCount++;
}
}
if (onlinePartitionCount == offlineSegmentCount) {
// System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread");
latch.countDown();
}
}
}
}
}
});
// Upload the segments
int i = 0;
for (String segmentName : _tarDir.list()) {
// System.out.println("Uploading segment " + (i++) + " : " + segmentName);
File file = new File(_tarDir, segmentName);
FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
}
// Wait for all offline segments to be online
latch.await();
// Load realtime data into Kafka
LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);
// Wait until the Pinot event count matches with the number of events in the Avro files
int pinotRecordCount, h2RecordCount;
long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;
Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
statement.execute("select count(*) from " + tableName);
ResultSet rs = statement.getResultSet();
rs.first();
h2RecordCount = rs.getInt(1);
rs.close();
waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}
use of org.apache.helix.NotificationContext in project incubator-gobblin by apache.
the class GobblinServiceManager method start.
@Override
public void start() throws ApplicationException {
LOGGER.info("[Init] Starting the Gobblin Service Manager");
if (this.helixManager.isPresent()) {
connectHelixManager();
}
this.eventBus.register(this);
this.serviceLauncher.start();
if (this.helixManager.isPresent()) {
// Subscribe to leadership changes
this.helixManager.get().addControllerListener(new ControllerChangeListener() {
@Override
public void onControllerChange(NotificationContext changeContext) {
handleLeadershipChange(changeContext);
}
});
// Update for first time since there might be no notification
if (helixManager.get().isLeader()) {
if (this.isSchedulerEnabled) {
LOGGER.info("[Init] Gobblin Service is running in master instance mode, enabling Scheduler.");
this.scheduler.setActive(true);
}
if (this.isGitConfigMonitorEnabled) {
this.gitConfigMonitor.setActive(true);
}
} else {
if (this.isSchedulerEnabled) {
LOGGER.info("[Init] Gobblin Service is running in slave instance mode, not enabling Scheduler.");
}
}
} else {
// No Helix manager, hence standalone service instance
// .. designate scheduler to itself
LOGGER.info("[Init] Gobblin Service is running in single instance mode, enabling Scheduler.");
this.scheduler.setActive(true);
if (this.isGitConfigMonitorEnabled) {
this.gitConfigMonitor.setActive(true);
}
}
// This has to be done after the topologyCatalog service is launched
if (this.isTopologySpecFactoryEnabled) {
Collection<TopologySpec> topologySpecs = this.topologySpecFactory.getTopologies();
for (TopologySpec topologySpec : topologySpecs) {
this.topologyCatalog.put(topologySpec);
}
}
// This has to be done after topologySpecFactory has updated spec store, so that listeners will have the latest updates.
if (isSchedulerEnabled) {
this.topologyCatalog.addListener(this.orchestrator);
}
// Notify now topologyCatalog has the right information
this.topologyCatalog.getInitComplete().countDown();
}
use of org.apache.helix.NotificationContext in project helix by apache.
the class TestDistControllerElection method testControllerParticipant.
@Test()
public void testControllerParticipant() throws Exception {
String className = getShortClassName();
LOG.info("RUN " + className + " at " + new Date(System.currentTimeMillis()));
final String clusterName = CONTROLLER_CLUSTER_PREFIX + "_" + className + "_" + "testControllerParticipant";
String path = "/" + clusterName;
if (_gZkClient.exists(path)) {
_gZkClient.deleteRecursively(path);
}
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
final String controllerName = "controller_0";
HelixManager manager = new MockZKHelixManager(clusterName, controllerName, InstanceType.CONTROLLER_PARTICIPANT, _gZkClient);
GenericHelixController controller0 = new GenericHelixController();
List<HelixTimerTask> timerTasks = Collections.emptyList();
DistributedLeaderElection election = new DistributedLeaderElection(manager, controller0, timerTasks);
NotificationContext context = new NotificationContext(manager);
context.setType(NotificationContext.Type.CALLBACK);
election.onControllerChange(context);
LiveInstance liveInstance = accessor.getProperty(keyBuilder.controllerLeader());
AssertJUnit.assertEquals(controllerName, liveInstance.getInstanceName());
// path = PropertyPathConfig.getPath(PropertyType.LEADER, clusterName);
// ZNRecord leaderRecord = _gZkClient.<ZNRecord> readData(path);
// AssertJUnit.assertEquals(controllerName, leaderRecord.getSimpleField("LEADER"));
// AssertJUnit.assertNotNull(election.getController());
// AssertJUnit.assertNotNull(election.getLeader());
manager = new MockZKHelixManager(clusterName, "controller_1", InstanceType.CONTROLLER_PARTICIPANT, _gZkClient);
GenericHelixController controller1 = new GenericHelixController();
election = new DistributedLeaderElection(manager, controller1, timerTasks);
context = new NotificationContext(manager);
context.setType(NotificationContext.Type.CALLBACK);
election.onControllerChange(context);
liveInstance = accessor.getProperty(keyBuilder.controllerLeader());
AssertJUnit.assertEquals(controllerName, liveInstance.getInstanceName());
// leaderRecord = _gZkClient.<ZNRecord> readData(path);
// AssertJUnit.assertEquals(controllerName, leaderRecord.getSimpleField("LEADER"));
// AssertJUnit.assertNull(election.getController());
// AssertJUnit.assertNull(election.getLeader());
LOG.info("END " + getShortClassName() + " at " + new Date(System.currentTimeMillis()));
}
use of org.apache.helix.NotificationContext in project helix by apache.
the class TestDistControllerElection method testParticipant.
@Test()
public void testParticipant() throws Exception {
String className = getShortClassName();
LOG.info("RUN " + className + " at " + new Date(System.currentTimeMillis()));
final String clusterName = CLUSTER_PREFIX + "_" + className + "_" + "testParticipant";
String path = "/" + clusterName;
if (_gZkClient.exists(path)) {
_gZkClient.deleteRecursively(path);
}
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
final String controllerName = "participant_0";
HelixManager manager = new MockZKHelixManager(clusterName, controllerName, InstanceType.PARTICIPANT, _gZkClient);
GenericHelixController participant0 = new GenericHelixController();
List<HelixTimerTask> timerTasks = Collections.emptyList();
DistributedLeaderElection election = new DistributedLeaderElection(manager, participant0, timerTasks);
NotificationContext context = new NotificationContext(manager);
context.setType(NotificationContext.Type.INIT);
election.onControllerChange(context);
path = PropertyPathBuilder.controllerLeader(clusterName);
ZNRecord leaderRecord = _gZkClient.<ZNRecord>readData(path, true);
AssertJUnit.assertNull(leaderRecord);
// AssertJUnit.assertNull(election.getController());
// AssertJUnit.assertNull(election.getLeader());
}
use of org.apache.helix.NotificationContext in project helix by apache.
the class TestDistControllerElection method testController.
@Test()
public void testController() throws Exception {
System.out.println("START TestDistControllerElection at " + new Date(System.currentTimeMillis()));
String className = getShortClassName();
final String clusterName = CLUSTER_PREFIX + "_" + className + "_" + "testController";
String path = "/" + clusterName;
if (_gZkClient.exists(path)) {
_gZkClient.deleteRecursively(path);
}
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
TestHelper.setupEmptyCluster(_gZkClient, clusterName);
final String controllerName = "controller_0";
HelixManager manager = new MockZKHelixManager(clusterName, controllerName, InstanceType.CONTROLLER, _gZkClient);
GenericHelixController controller0 = new GenericHelixController();
List<HelixTimerTask> timerTasks = Collections.emptyList();
DistributedLeaderElection election = new DistributedLeaderElection(manager, controller0, timerTasks);
NotificationContext context = new NotificationContext(manager);
context.setType(NotificationContext.Type.INIT);
election.onControllerChange(context);
// path = PropertyPathConfig.getPath(PropertyType.LEADER, clusterName);
// ZNRecord leaderRecord = _gZkClient.<ZNRecord> readData(path);
LiveInstance liveInstance = accessor.getProperty(keyBuilder.controllerLeader());
AssertJUnit.assertEquals(controllerName, liveInstance.getInstanceName());
// AssertJUnit.assertNotNull(election.getController());
// AssertJUnit.assertNull(election.getLeader());
manager = new MockZKHelixManager(clusterName, "controller_1", InstanceType.CONTROLLER, _gZkClient);
GenericHelixController controller1 = new GenericHelixController();
election = new DistributedLeaderElection(manager, controller1, timerTasks);
context = new NotificationContext(manager);
context.setType(NotificationContext.Type.INIT);
election.onControllerChange(context);
// leaderRecord = _gZkClient.<ZNRecord> readData(path);
liveInstance = accessor.getProperty(keyBuilder.controllerLeader());
AssertJUnit.assertEquals(controllerName, liveInstance.getInstanceName());
// AssertJUnit.assertNull(election.getController());
// AssertJUnit.assertNull(election.getLeader());
System.out.println("END TestDistControllerElection at " + new Date(System.currentTimeMillis()));
}
Aggregations