Search in sources :

Example 6 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project databus by linkedin.

the class ClusterCheckpointPersistenceProvider method createCluster.

/**
     * Create a cluster if it doesn't exist Note: This method is not thread-safe
     * as HelixAdmin.addCluster appears to fail on concurrent execution within
     * threads
     * 
     * @return true if cluster was created false otherwise
     */
public static boolean createCluster(String zkAddr, String clusterName) {
    boolean created = false;
    ZkClient zkClient = null;
    try {
        zkClient = new ZkClient(zkAddr, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT, new ZNRecordSerializer());
        ZKHelixAdmin admin = new ZKHelixAdmin(zkClient);
        admin.addCluster(clusterName, false);
        created = true;
    } catch (HelixException e) {
        LOG.warn("Warn! Cluster might already exist! " + clusterName);
        created = false;
    } finally {
        // close this connection
        if (zkClient != null) {
            zkClient.close();
        }
    }
    return created;
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) HelixException(org.apache.helix.HelixException) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer)

Example 7 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project pinot by linkedin.

the class OfflineClusterIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);
    // Start the cluster
    startCluster();
    // Unpack the Avro files
    final List<File> avroFiles = unpackAvroData(_tmpDir, SEGMENT_COUNT);
    createTable();
    // Get the list of instances through the REST API
    URL url = new URL("http://" + ControllerTestUtils.DEFAULT_CONTROLLER_HOST + ":" + ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT + "/instances");
    InputStream inputStream = url.openConnection().getInputStream();
    String instanceApiResponseString = IOUtils.toString(inputStream);
    IOUtils.closeQuietly(inputStream);
    JSONObject instanceApiResponse = new JSONObject(instanceApiResponseString);
    JSONArray instanceArray = instanceApiResponse.getJSONArray("instances");
    HelixAdmin helixAdmin = new ZKHelixAdmin(new ZkClient(ZkStarter.DEFAULT_ZK_STR, 10000, 10000, new ZNRecordSerializer()));
    for (int i = 0; i < instanceArray.length(); i++) {
        String instance = instanceArray.getString(i);
        if (instance.startsWith("Server_")) {
            _serverServiceStatusCallback = new ServiceStatus.IdealStateAndExternalViewMatchServiceStatusCallback(helixAdmin, getHelixClusterName(), instance);
        }
        if (instance.startsWith("Broker_")) {
            _brokerServiceStatusCallback = new ServiceStatus.IdealStateAndExternalViewMatchServiceStatusCallback(helixAdmin, getHelixClusterName(), instance, Collections.singletonList("brokerResource"));
        }
    }
    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);
    // Create segments from Avro data
    buildSegmentsFromAvro(avroFiles, executor, 0, _segmentDir, _tarDir, "mytable", false, null);
    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);
    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);
    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        //      System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
    }
    // Wait for all segments to be online
    latch.await();
    TOTAL_DOCS = 115545;
    long timeInTwoMinutes = System.currentTimeMillis() + 2 * 60 * 1000L;
    long numDocs;
    while ((numDocs = getCurrentServingNumDocs("mytable")) < TOTAL_DOCS) {
        //      System.out.println("Current number of documents: " + numDocs);
        if (System.currentTimeMillis() < timeInTwoMinutes) {
            Thread.sleep(1000);
        } else {
            Assert.fail("Segments were not completely loaded within two minutes");
        }
    }
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) InputStream(java.io.InputStream) JSONArray(org.json.JSONArray) HelixAdmin(org.apache.helix.HelixAdmin) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) CountDownLatch(java.util.concurrent.CountDownLatch) URL(java.net.URL) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) JSONObject(org.json.JSONObject) ServiceStatus(com.linkedin.pinot.common.utils.ServiceStatus) ExecutorService(java.util.concurrent.ExecutorService) File(java.io.File) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) BeforeClass(org.testng.annotations.BeforeClass)

Example 8 with ZKHelixAdmin

use of org.apache.helix.manager.zk.ZKHelixAdmin in project pinot by linkedin.

the class BalanceNumSegmentAssignmentStrategyIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    // Start zk and controller
    startZk();
    startController();
    // Start one server and one broker instance
    startServer();
    startBroker();
    // Create eight dummy server instances
    for (int i = 0; i < 8; ++i) {
        JSONObject serverInstance = new JSONObject();
        serverInstance.put("host", hostName);
        serverInstance.put("port", Integer.toString(basePort + i));
        serverInstance.put("tag", serverTenant);
        serverInstance.put("type", "server");
        sendPostRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forInstanceCreate(), serverInstance.toString());
    }
    // Create Helix connection
    _helixAdmin = new ZKHelixAdmin(ZkStarter.DEFAULT_ZK_STR);
// BaseClass @BeforeMethod will setup tablex
}
Also used : ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) JSONObject(org.json.JSONObject) BeforeClass(org.testng.annotations.BeforeClass)

Aggregations

ZKHelixAdmin (org.apache.helix.manager.zk.ZKHelixAdmin)8 ZNRecordSerializer (org.apache.helix.manager.zk.ZNRecordSerializer)4 IdealState (org.apache.helix.model.IdealState)4 ZkClient (org.apache.helix.manager.zk.ZkClient)3 ExternalView (org.apache.helix.model.ExternalView)3 Timestamp (java.sql.Timestamp)2 HelixAdmin (org.apache.helix.HelixAdmin)2 ClusterStateVerifier (org.apache.helix.tools.ClusterStateVerifier)2 JSONObject (org.json.JSONObject)2 BeforeClass (org.testng.annotations.BeforeClass)2 Sets (com.google.common.collect.Sets)1 ServiceStatus (com.linkedin.pinot.common.utils.ServiceStatus)1 PinotHelixBrokerResourceOnlineOfflineStateModelGenerator (com.linkedin.pinot.controller.helix.core.PinotHelixBrokerResourceOnlineOfflineStateModelGenerator)1 PinotHelixSegmentOnlineOfflineStateModelGenerator (com.linkedin.pinot.controller.helix.core.PinotHelixSegmentOnlineOfflineStateModelGenerator)1 File (java.io.File)1 InputStream (java.io.InputStream)1 URL (java.net.URL)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 CountDownLatch (java.util.concurrent.CountDownLatch)1