Search in sources :

Example 31 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project pinot by linkedin.

the class OfflineClusterIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);
    // Start the cluster
    startCluster();
    // Unpack the Avro files
    final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT);
    createTable();
    // Get the list of instances through the REST API
    URL url = new URL("http://" + ControllerTestUtils.DEFAULT_CONTROLLER_HOST + ":" + ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT + "/instances");
    InputStream inputStream = url.openConnection().getInputStream();
    String instanceApiResponseString = IOUtils.toString(inputStream);
    IOUtils.closeQuietly(inputStream);
    JSONObject instanceApiResponse = new JSONObject(instanceApiResponseString);
    JSONArray instanceArray = instanceApiResponse.getJSONArray("instances");
    HelixAdmin helixAdmin = new ZKHelixAdmin(new ZkClient(ZkStarter.DEFAULT_ZK_STR, 10000, 10000, new ZNRecordSerializer()));
    for (int i = 0; i < instanceArray.length(); i++) {
        String instance = instanceArray.getString(i);
        if (instance.startsWith("Server_")) {
            _serverServiceStatusCallback = new ServiceStatus.IdealStateAndExternalViewMatchServiceStatusCallback(helixAdmin, getHelixClusterName(), instance);
        }
        if (instance.startsWith("Broker_")) {
            _brokerServiceStatusCallback = new ServiceStatus.IdealStateAndExternalViewMatchServiceStatusCallback(helixAdmin, getHelixClusterName(), instance, Collections.singletonList("brokerResource"));
        }
    }
    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);
    // Create segments from Avro data
    buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);
    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);
    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);
    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        //      System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
    }
    // Wait for all segments to be online
    latch.await();
    TOTAL_DOCS = 115545;
    long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L;
    long numDocs;
    while ((numDocs = getCurrentServingNumDocs("mytable")) < TOTAL_DOCS) {
        //      System.out.println("Current number of documents: " + numDocs);
        if (System.currentTimeMillis() < timeInTwoMinutes) {
            Thread.sleep(1000);
        } else {
            Assert.fail("Segments were not completely loaded within two minutes");
        }
    }
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) InputStream(java.io.InputStream) JSONArray(org.json.JSONArray) HelixAdmin(org.apache.helix.HelixAdmin) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) CountDownLatch(java.util.concurrent.CountDownLatch) URL(java.net.URL) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) JSONObject(org.json.JSONObject) ServiceStatus(com.linkedin.pinot.common.utils.ServiceStatus) ExecutorService(java.util.concurrent.ExecutorService) File(java.io.File) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) BeforeClass(org.testng.annotations.BeforeClass)

Example 32 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project pinot by linkedin.

the class BalanceNumSegmentAssignmentStrategyIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    // Start zk and controller
    startZk();
    startController();
    // Start one server and one broker instance
    startServer();
    startBroker();
    // Create eight dummy server instances
    for (int i = 0; i < 8; ++i) {
        JSONObject serverInstance = new JSONObject();
        serverInstance.put("host", hostName);
        serverInstance.put("port", Integer.toString(basePort + i));
        serverInstance.put("tag", serverTenant);
        serverInstance.put("type", "server");
        sendPostRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forInstanceCreate(), serverInstance.toString());
    }
    // Create Helix connection
    _helixAdmin = new ZKHelixAdmin(ZkStarter.DEFAULT_ZK_STR);
// BaseClass @BeforeMethod will setup tablex
}
Also used : ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) JSONObject(org.json.JSONObject) BeforeClass(org.testng.annotations.BeforeClass)

Example 33 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project pinot by linkedin.

the class PerfBenchmarkDriver method waitForExternalViewUpdate.

public static void waitForExternalViewUpdate(String zkAddress, final String clusterName, long timeoutInMilliseconds) {
    final ZKHelixAdmin helixAdmin = new ZKHelixAdmin(zkAddress);
    Verifier customVerifier = new Verifier() {

        @Override
        public boolean verify() {
            List<String> resourcesInCluster = helixAdmin.getResourcesInCluster(clusterName);
            LOGGER.info("Waiting for the cluster to be set up and indexes to be loaded on the servers" + new Timestamp(System.currentTimeMillis()));
            for (String resourceName : resourcesInCluster) {
                IdealState idealState = helixAdmin.getResourceIdealState(clusterName, resourceName);
                ExternalView externalView = helixAdmin.getResourceExternalView(clusterName, resourceName);
                if (idealState == null || externalView == null) {
                    return false;
                }
                Set<String> partitionSet = idealState.getPartitionSet();
                for (String partition : partitionSet) {
                    Map<String, String> instanceStateMapIS = idealState.getInstanceStateMap(partition);
                    Map<String, String> instanceStateMapEV = externalView.getStateMap(partition);
                    if (instanceStateMapIS == null || instanceStateMapEV == null) {
                        return false;
                    }
                    if (!instanceStateMapIS.equals(instanceStateMapEV)) {
                        return false;
                    }
                }
            }
            LOGGER.info("Cluster is ready to serve queries");
            return true;
        }
    };
    ClusterStateVerifier.verifyByPolling(customVerifier, timeoutInMilliseconds);
}
Also used : ExternalView(org.apache.helix.model.ExternalView) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) ClusterStateVerifier(org.apache.helix.tools.ClusterStateVerifier) Verifier(org.apache.helix.tools.ClusterStateVerifier.Verifier) Timestamp(java.sql.Timestamp) IdealState(org.apache.helix.model.IdealState)

Example 34 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project ambry by linkedin.

the class HelixParticipantTest method testGetAndSetReplicaSealedState.

/**
 * Tests setReplicaSealedState method for {@link HelixParticipant}
 * @throws Exception
 */
@Test
public void testGetAndSetReplicaSealedState() throws Exception {
    // setup HelixParticipant and dependencies
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    String instanceName = ClusterMapUtils.getInstanceName("localhost", clusterMapConfig.clusterMapPort);
    HelixParticipant helixParticipant = new HelixParticipant(clusterMapConfig, new HelixFactory(), new MetricRegistry(), getDefaultZkConnectStr(clusterMapConfig), true);
    ZKHelixAdmin helixAdmin = new ZKHelixAdmin("localhost:" + zkInfo.getPort());
    DataNodeConfig dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    Set<String> localPartitionNames = new HashSet<>();
    dataNodeConfig.getDiskConfigs().values().forEach(diskConfig -> localPartitionNames.addAll(diskConfig.getReplicaConfigs().keySet()));
    String partitionIdStr = localPartitionNames.iterator().next();
    String partitionIdStr2 = localPartitionNames.stream().filter(p -> !p.equals(partitionIdStr)).findFirst().get();
    ReplicaId replicaId = createMockAmbryReplica(partitionIdStr);
    ReplicaId replicaId2 = createMockAmbryReplica(partitionIdStr2);
    // Make sure the current sealedReplicas list is empty
    List<String> sealedReplicas = helixParticipant.getSealedReplicas();
    assertEquals("sealedReplicas should be empty", Collections.emptyList(), sealedReplicas);
    String listName = "sealedReplicas";
    // Check that invoking setReplicaSealedState with a non-AmbryReplica ReplicaId throws an IllegalArgumentException
    ReplicaId notAmbryReplica = createMockNotAmbryReplica(partitionIdStr);
    try {
        helixParticipant.setReplicaSealedState(notAmbryReplica, true);
        fail("Expected an IllegalArgumentException here");
    } catch (IllegalArgumentException e) {
    // Expected exception
    }
    // Check that invoking setReplicaSealedState adds the partition to the list of sealed replicas
    helixParticipant.setReplicaSealedState(replicaId, true);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 1, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Seal another replicaId
    helixParticipant.setReplicaSealedState(replicaId2, true);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 2, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Check that sealed replica list doesn't take duplicates (and that dups are detected by partitionId comparison, not
    // replicaId object comparison
    ReplicaId dup = createMockAmbryReplica(partitionIdStr);
    helixParticipant.setReplicaSealedState(dup, true);
    helixParticipant.setReplicaSealedState(replicaId2, true);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 2, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertTrue(sealedReplicas.contains(partitionIdStr));
    // Check that invoking setReplicaSealedState with isSealed == false removes partition from list of sealed replicas
    helixParticipant.setReplicaSealedState(replicaId, false);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 1, listName);
    assertTrue(sealedReplicas.contains(partitionIdStr2));
    assertFalse(sealedReplicas.contains(partitionIdStr));
    // Removing a replicaId that's already been removed doesn't hurt anything
    helixParticipant.setReplicaSealedState(replicaId, false);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 1, listName);
    // Removing all replicas yields expected behavior (and removal works by partitionId, not replicaId itself)
    dup = createMockAmbryReplica(partitionIdStr2);
    helixParticipant.setReplicaSealedState(dup, false);
    sealedReplicas = helixParticipant.getSealedReplicas();
    listIsExpectedSize(sealedReplicas, 0, listName);
    helixAdmin.close();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 35 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project ambry by linkedin.

the class HelixParticipantTest method testUpdateNodeInfoInCluster.

/**
 * Test both replica info addition and removal cases when updating node info in Helix cluster.
 * @throws Exception
 */
@Test
public void testUpdateNodeInfoInCluster() throws Exception {
    // override some props for current test
    props.setProperty("clustermap.update.datanode.info", Boolean.toString(true));
    ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
    HelixParticipant participant = new HelixParticipant(clusterMapConfig, new HelixFactory(), new MetricRegistry(), getDefaultZkConnectStr(clusterMapConfig), true);
    participant.markDisablePartitionComplete();
    // create InstanceConfig for local node. Also, put existing replica into sealed list
    String instanceName = ClusterMapUtils.getInstanceName("localhost", clusterMapConfig.clusterMapPort);
    ZKHelixAdmin helixAdmin = new ZKHelixAdmin("localhost:" + zkInfo.getPort());
    DataNodeConfig dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    DataNodeConfig.DiskConfig diskConfig = dataNodeConfig.getDiskConfigs().values().iterator().next();
    String existingReplicaName = diskConfig.getReplicaConfigs().keySet().iterator().next();
    PartitionId correspondingPartition = testPartitionLayout.getPartitionLayout().getPartitions(null).stream().filter(p -> p.toPathString().equals(existingReplicaName)).findFirst().get();
    ReplicaId existingReplica = correspondingPartition.getReplicaIds().stream().filter(r -> r.getDataNodeId().getPort() == clusterMapConfig.clusterMapPort).findFirst().get();
    // generate exactly same config for comparison
    DataNodeConfig initialDataNodeConfig = deepCopyDataNodeConfig(dataNodeConfig);
    // 1. add existing replica's info to Helix should be no-op
    assertTrue("Adding existing replica's info should succeed", participant.updateDataNodeInfoInCluster(existingReplica, true));
    assertEquals("DataNodeConfig should stay unchanged", initialDataNodeConfig, getDataNodeConfigInHelix(helixAdmin, instanceName));
    // create two new replicas on the same disk of local node
    int currentPartitionCount = testPartitionLayout.getPartitionCount();
    Partition newPartition1 = new Partition(currentPartitionCount++, DEFAULT_PARTITION_CLASS, PartitionState.READ_WRITE, testPartitionLayout.replicaCapacityInBytes);
    Partition newPartition2 = new Partition(currentPartitionCount, DEFAULT_PARTITION_CLASS, PartitionState.READ_WRITE, testPartitionLayout.replicaCapacityInBytes);
    Disk disk = (Disk) existingReplica.getDiskId();
    // 2. add new partition2 (id = 10, replicaFromPartition2) to Helix
    ReplicaId replicaFromPartition2 = new Replica(newPartition2, disk, clusterMapConfig);
    assertTrue("Adding new replica info to Helix should succeed.", participant.updateDataNodeInfoInCluster(replicaFromPartition2, true));
    // verify new added replica (replicaFromPartition2) info is present in DataNodeConfig
    Thread.sleep(50);
    dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition2, true);
    // 3. add new partition1 (replicaFromPartition1) into InstanceConfig
    ReplicaId replicaFromPartition1 = new Replica(newPartition1, disk, clusterMapConfig);
    assertTrue("Adding new replica info into InstanceConfig should succeed.", participant.updateDataNodeInfoInCluster(replicaFromPartition1, true));
    Thread.sleep(50);
    // verify new added replica (replicaFromPartition1) info is present in InstanceConfig
    dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition1, true);
    // ensure previous added replica (replicaFromPartition2) still exists
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition2, true);
    // 4. remove recently added new replica (replicaFromPartition1)
    assertTrue("Removing replica info from InstanceConfig should succeed.", participant.updateDataNodeInfoInCluster(replicaFromPartition1, false));
    Thread.sleep(50);
    dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition1, false);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition2, true);
    // 5. remove same replica again (id = 9, replicaFromPartition1) should be no-op
    assertTrue("Removing non-found replica info from InstanceConfig should succeed.", participant.updateDataNodeInfoInCluster(replicaFromPartition1, false));
    // 6. remove recently added new replica (replicaFromPartition2)
    assertTrue("Removing replica info from InstanceConfig should succeed.", participant.updateDataNodeInfoInCluster(replicaFromPartition2, false));
    Thread.sleep(50);
    dataNodeConfig = getDataNodeConfigInHelix(helixAdmin, instanceName);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, replicaFromPartition2, false);
    verifyReplicaInfoInDataNodeConfig(dataNodeConfig, existingReplica, true);
    // reset props
    props.setProperty("clustermap.update.datanode.info", Boolean.toString(false));
    helixAdmin.close();
}
Also used : VerifiableProperties(com.github.ambry.config.VerifiableProperties) MetricRegistry(com.codahale.metrics.MetricRegistry) ClusterMapConfig(com.github.ambry.config.ClusterMapConfig) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) Test(org.junit.Test)

Aggregations

ZKHelixAdmin (org.apache.helix.manager.zk.ZKHelixAdmin)70 HelixAdmin (org.apache.helix.HelixAdmin)31 IdealState (org.apache.helix.model.IdealState)25 Test (org.testng.annotations.Test)23 Date (java.util.Date)21 InstanceConfig (org.apache.helix.model.InstanceConfig)16 ClusterControllerManager (org.apache.helix.integration.manager.ClusterControllerManager)14 ZNRecord (org.apache.helix.ZNRecord)13 MockParticipantManager (org.apache.helix.integration.manager.MockParticipantManager)13 ZNRecordSerializer (org.apache.helix.manager.zk.ZNRecordSerializer)12 ZkClient (org.apache.helix.manager.zk.ZkClient)12 ClusterStateVerifier (org.apache.helix.tools.ClusterStateVerifier)12 ZKHelixDataAccessor (org.apache.helix.manager.zk.ZKHelixDataAccessor)11 StateModelDefinition (org.apache.helix.model.StateModelDefinition)11 HashMap (java.util.HashMap)10 HashSet (java.util.HashSet)10 HelixDataAccessor (org.apache.helix.HelixDataAccessor)8 ExternalView (org.apache.helix.model.ExternalView)8 Test (org.junit.Test)8 BestPossAndExtViewZkVerifier (org.apache.helix.tools.ClusterStateVerifier.BestPossAndExtViewZkVerifier)7