Search in sources :

Example 1 with HelixManager

use of org.apache.helix.HelixManager in project pinot by linkedin.

the class LLCSegmentCommitTest method createMockHelixManager.

HelixManager createMockHelixManager() {
    HelixManager helixManager = mock(HelixManager.class);
    when(helixManager.isLeader()).thenReturn(true);
    return helixManager;
}
Also used : HelixManager(org.apache.helix.HelixManager)

Example 2 with HelixManager

use of org.apache.helix.HelixManager in project pinot by linkedin.

the class SegmentCompletionTest method createMockHelixManager.

private static HelixManager createMockHelixManager(boolean isLeader) {
    HelixManager helixManager = mock(HelixManager.class);
    when(helixManager.isLeader()).thenReturn(isLeader);
    return helixManager;
}
Also used : HelixManager(org.apache.helix.HelixManager)

Example 3 with HelixManager

use of org.apache.helix.HelixManager in project pinot by linkedin.

the class HybridClusterIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);
    tableName = TABLE_NAME;
    // Start Zk, Kafka and Pinot
    startHybridCluster(10);
    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class.getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir);
    _tmpDir.mkdirs();
    final List<File> avroFiles = getAllAvroFiles();
    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();
    // Create Pinot table
    addHybridTable(tableName, "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null, false);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns);
    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);
    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);
    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, tableName, false, null);
    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);
    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {

        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains(tableName)) {
                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;
                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }
                        if (onlinePartitionCount == offlineSegmentCount) {
                            //                System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });
    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        //      System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
    }
    // Wait for all offline segments to be online
    latch.await();
    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);
    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;
    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from " + tableName);
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();
    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}
Also used : ExternalView(org.apache.helix.model.ExternalView) HelixManager(org.apache.helix.HelixManager) ResultSet(java.sql.ResultSet) Set(java.util.Set) Statement(java.sql.Statement) CountDownLatch(java.util.concurrent.CountDownLatch) NotificationContext(org.apache.helix.NotificationContext) ExecutorService(java.util.concurrent.ExecutorService) ResultSet(java.sql.ResultSet) ExternalViewChangeListener(org.apache.helix.ExternalViewChangeListener) File(java.io.File) Map(java.util.Map) BeforeClass(org.testng.annotations.BeforeClass)

Example 4 with HelixManager

use of org.apache.helix.HelixManager in project ambry by linkedin.

the class HelixParticipantTest method testSetReplicaSealedState.

/**
 * Tests setReplicaSealedState method for {@link HelixParticipant}
 * @throws IOException
 */
@Test
public void testSetReplicaSealedState() throws IOException {
    // setup HelixParticipant and dependencies
    String partitionIdStr = "somePartitionId";
    String partitionIdStr2 = "someOtherPartitionId";
    ReplicaId replicaId = createMockAmbryReplica(partitionIdStr);
    ReplicaId replicaId2 = createMockAmbryReplica(partitionIdStr2);
    String hostname = "localhost";
    int port = 2200;
    String instanceName = ClusterMapUtils.getInstanceName(hostname, port);
    HelixParticipant helixParticipant = new HelixParticipant(new ClusterMapConfig(new VerifiableProperties(props)), helixManagerFactory);
    helixParticipant.initialize(hostname, port, Collections.EMPTY_LIST);
    HelixManager helixManager = helixManagerFactory.getZKHelixManager(null, null, null, null);
    HelixAdmin helixAdmin = helixManager.getClusterManagmentTool();
    InstanceConfig instanceConfig = new InstanceConfig("someInstanceId");
    helixAdmin.setInstanceConfig(clusterName, instanceName, instanceConfig);
    // Make sure the current sealedReplicas list is null
    List<String> sealedReplicas = ClusterMapUtils.getSealedReplicas(instanceConfig);
    assertNull("sealedReplicas is not null", sealedReplicas);
    String listName = "sealedReplicas";
    // Check that invoking setReplicaSealedState with a non-AmbryReplica ReplicaId throws an IllegalArgumentException
    ReplicaId notAmbryReplica = createMockNotAmbryReplica(partitionIdStr);
    try {
        helixParticipant.setReplicaSealedState(notAmbryReplica, true);
        fail("Expected an IllegalArgumentException here");
    } catch (IllegalArgumentException e) {
    // Expected exception
    }
    // Check that invoking setReplicaSealedState adds the partition to the list of sealed replicas
    helixParticipant.setReplicaSealedState(replicaId, true);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 1, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Seal another replicaId
    helixParticipant.setReplicaSealedState(replicaId2, true);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 2, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Check that sealed replica list doesn't take duplicates (and that dups are detected by partitionId comparison, not
    // replicaId object comparison
    ReplicaId dup = createMockAmbryReplica(partitionIdStr);
    helixParticipant.setReplicaSealedState(dup, true);
    helixParticipant.setReplicaSealedState(replicaId2, true);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 2, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Check that invoking setReplicaSealedState with isSealed == false removes partition from list of sealed replicas
    helixParticipant.setReplicaSealedState(replicaId, false);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 1, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertFalse(sealedReplicas.contains(partitionIdStr));
    // Removing a replicaId that's already been removed doesn't hurt anything
    helixParticipant.setReplicaSealedState(replicaId, false);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 1, listName);
    // Removing all replicas yields expected behavior (and removal works by partitionId, not replicaId itself)
    dup = createMockAmbryReplica(partitionIdStr2);
    helixParticipant.setReplicaSealedState(dup, false);
    sealedReplicas = ClusterMapUtils.getSealedReplicas(helixAdmin.getInstanceConfig(clusterName, instanceName));
    listIsExpectedSize(sealedReplicas, 0, listName);
}
Also used : HelixManager(org.apache.helix.HelixManager) InstanceConfig(org.apache.helix.model.InstanceConfig) VerifiableProperties(com.github.ambry.config.VerifiableProperties) HelixAdmin(org.apache.helix.HelixAdmin) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) Test(org.junit.Test)

Example 5 with HelixManager

use of org.apache.helix.HelixManager in project helix by apache.

the class MessageGenerationPhase method process.

@Override
public void process(ClusterEvent event) throws Exception {
    HelixManager manager = event.getAttribute(AttributeName.helixmanager.name());
    ClusterDataCache cache = event.getAttribute(AttributeName.ClusterDataCache.name());
    Map<String, Resource> resourceMap = event.getAttribute(AttributeName.RESOURCES_TO_REBALANCE.name());
    Map<String, List<Message>> pendingMessagesToCleanUp = new HashMap<>();
    CurrentStateOutput currentStateOutput = event.getAttribute(AttributeName.CURRENT_STATE.name());
    IntermediateStateOutput intermediateStateOutput = event.getAttribute(AttributeName.INTERMEDIATE_STATE.name());
    if (manager == null || cache == null || resourceMap == null || currentStateOutput == null || intermediateStateOutput == null) {
        throw new StageException("Missing attributes in event:" + event + ". Requires HelixManager|DataCache|RESOURCES|CURRENT_STATE|INTERMEDIATE_STATE");
    }
    Map<String, LiveInstance> liveInstances = cache.getLiveInstances();
    Map<String, String> sessionIdMap = new HashMap<String, String>();
    for (LiveInstance liveInstance : liveInstances.values()) {
        sessionIdMap.put(liveInstance.getInstanceName(), liveInstance.getSessionId());
    }
    MessageGenerationOutput output = new MessageGenerationOutput();
    for (String resourceName : resourceMap.keySet()) {
        Resource resource = resourceMap.get(resourceName);
        StateModelDefinition stateModelDef = cache.getStateModelDef(resource.getStateModelDefRef());
        if (stateModelDef == null) {
            logger.error("State Model Definition null, skip generating messages for resource: " + resourceName);
            continue;
        }
        for (Partition partition : resource.getPartitions()) {
            Map<String, String> instanceStateMap = new HashMap<String, String>(intermediateStateOutput.getInstanceStateMap(resourceName, partition));
            Map<String, String> pendingStateMap = currentStateOutput.getPendingStateMap(resourceName, partition);
            for (String instance : pendingStateMap.keySet()) {
                if (!instanceStateMap.containsKey(instance)) {
                    instanceStateMap.put(instance, NO_DESIRED_STATE);
                }
            }
            // we should generate message based on the desired-state priority
            // so keep generated messages in a temp map keyed by state
            // desired-state->list of generated-messages
            Map<String, List<Message>> messageMap = new HashMap<String, List<Message>>();
            for (String instanceName : instanceStateMap.keySet()) {
                String desiredState = instanceStateMap.get(instanceName);
                String currentState = currentStateOutput.getCurrentState(resourceName, partition, instanceName);
                if (currentState == null) {
                    currentState = stateModelDef.getInitialState();
                }
                Message pendingMessage = currentStateOutput.getPendingState(resourceName, partition, instanceName);
                boolean isCancellationEnabled = cache.getClusterConfig().isStateTransitionCancelEnabled();
                Message cancellationMessage = currentStateOutput.getCancellationState(resourceName, partition, instanceName);
                String nextState = stateModelDef.getNextStateForTransition(currentState, desiredState);
                Message message = null;
                if (shouldCleanUpPendingMessage(pendingMessage, currentState, currentStateOutput.getEndTime(resourceName, partition, instanceName))) {
                    logger.info("Adding pending message {} on instance {} to clean up. Msg: {}->{}, current state of resource {}:{} is {}", pendingMessage.getMsgId(), instanceName, pendingMessage.getFromState(), pendingMessage.getToState(), resourceName, partition, currentState);
                    if (!pendingMessagesToCleanUp.containsKey(instanceName)) {
                        pendingMessagesToCleanUp.put(instanceName, new ArrayList<Message>());
                    }
                    pendingMessagesToCleanUp.get(instanceName).add(pendingMessage);
                }
                if (desiredState.equals(NO_DESIRED_STATE) || desiredState.equalsIgnoreCase(currentState)) {
                    if (desiredState.equals(NO_DESIRED_STATE) || pendingMessage != null && !currentState.equalsIgnoreCase(pendingMessage.getToState())) {
                        message = createStateTransitionCancellationMessage(manager, resource, partition.getPartitionName(), instanceName, sessionIdMap.get(instanceName), stateModelDef.getId(), pendingMessage.getFromState(), pendingMessage.getToState(), null, cancellationMessage, isCancellationEnabled, currentState);
                    }
                } else {
                    if (nextState == null) {
                        logger.error("Unable to find a next state for resource: " + resource.getResourceName() + " partition: " + partition.getPartitionName() + " from stateModelDefinition" + stateModelDef.getClass() + " from:" + currentState + " to:" + desiredState);
                        continue;
                    }
                    if (pendingMessage != null) {
                        String pendingState = pendingMessage.getToState();
                        if (nextState.equalsIgnoreCase(pendingState)) {
                            logger.debug("Message already exists for " + instanceName + " to transit " + resource.getResourceName() + "." + partition.getPartitionName() + " from " + currentState + " to " + nextState);
                        } else if (currentState.equalsIgnoreCase(pendingState)) {
                            logger.info("Message hasn't been removed for " + instanceName + " to transit " + resource.getResourceName() + "." + partition.getPartitionName() + " to " + pendingState + ", desiredState: " + desiredState);
                        } else {
                            logger.info("IdealState changed before state transition completes for " + resource.getResourceName() + "." + partition.getPartitionName() + " on " + instanceName + ", pendingState: " + pendingState + ", currentState: " + currentState + ", nextState: " + nextState);
                            message = createStateTransitionCancellationMessage(manager, resource, partition.getPartitionName(), instanceName, sessionIdMap.get(instanceName), stateModelDef.getId(), pendingMessage.getFromState(), pendingState, nextState, cancellationMessage, isCancellationEnabled, currentState);
                        }
                    } else {
                        // Create new state transition message
                        message = createStateTransitionMessage(manager, resource, partition.getPartitionName(), instanceName, currentState, nextState, sessionIdMap.get(instanceName), stateModelDef.getId());
                    }
                }
                if (message != null) {
                    IdealState idealState = cache.getIdealState(resourceName);
                    if (idealState != null && idealState.getStateModelDefRef().equalsIgnoreCase(DefaultSchedulerMessageHandlerFactory.SCHEDULER_TASK_QUEUE)) {
                        if (idealState.getRecord().getMapField(partition.getPartitionName()) != null) {
                            message.getRecord().setMapField(Message.Attributes.INNER_MESSAGE.toString(), idealState.getRecord().getMapField(partition.getPartitionName()));
                        }
                    }
                    int timeout = getTimeOut(cache.getClusterConfig(), cache.getResourceConfig(resourceName), currentState, nextState, idealState, partition);
                    if (timeout > 0) {
                        message.setExecutionTimeout(timeout);
                    }
                    message.setAttribute(Message.Attributes.ClusterEventName, event.getEventType().name());
                    // output.addMessage(resourceName, partition, message);
                    if (!messageMap.containsKey(desiredState)) {
                        messageMap.put(desiredState, new ArrayList<Message>());
                    }
                    messageMap.get(desiredState).add(message);
                }
            }
            // add generated messages to output according to state priority
            List<String> statesPriorityList = stateModelDef.getStatesPriorityList();
            for (String state : statesPriorityList) {
                if (messageMap.containsKey(state)) {
                    for (Message message : messageMap.get(state)) {
                        output.addMessage(resourceName, partition, message);
                    }
                }
            }
        }
    // end of for-each-partition
    }
    // Asynchronously clean up pending messages if necessary
    if (!pendingMessagesToCleanUp.isEmpty()) {
        schedulePendingMessageCleanUp(pendingMessagesToCleanUp, cache.getAsyncTasksThreadPool(), manager.getHelixDataAccessor());
    }
    event.addAttribute(AttributeName.MESSAGES_ALL.name(), output);
}
Also used : Partition(org.apache.helix.model.Partition) HelixManager(org.apache.helix.HelixManager) Message(org.apache.helix.model.Message) HashMap(java.util.HashMap) StageException(org.apache.helix.controller.pipeline.StageException) Resource(org.apache.helix.model.Resource) IdealState(org.apache.helix.model.IdealState) LiveInstance(org.apache.helix.model.LiveInstance) StateModelDefinition(org.apache.helix.model.StateModelDefinition) ArrayList(java.util.ArrayList) List(java.util.List)

Aggregations

HelixManager (org.apache.helix.HelixManager)105 Test (org.testng.annotations.Test)44 HelixDataAccessor (org.apache.helix.HelixDataAccessor)35 ZNRecord (org.apache.helix.ZNRecord)27 Message (org.apache.helix.model.Message)23 PropertyKey (org.apache.helix.PropertyKey)20 Date (java.util.Date)18 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)17 Builder (org.apache.helix.PropertyKey.Builder)16 ArrayList (java.util.ArrayList)14 HashMap (java.util.HashMap)12 ExternalView (org.apache.helix.model.ExternalView)11 NotificationContext (org.apache.helix.NotificationContext)10 LiveInstance (org.apache.helix.model.LiveInstance)10 IdealState (org.apache.helix.model.IdealState)9 List (java.util.List)8 Criteria (org.apache.helix.Criteria)8 HelixAdmin (org.apache.helix.HelixAdmin)8 ZKHelixManager (org.apache.helix.manager.zk.ZKHelixManager)8 StringWriter (java.io.StringWriter)7