use of org.apache.helix.manager.zk.ZkClient in project pinot by linkedin.
the class PinotRealtimeSegmentManager method start.
public void start(ControllerMetrics controllerMetrics) {
_controllerMetrics = controllerMetrics;
LOGGER.info("Starting realtime segments manager, adding a listener on the property store table configs path.");
String zkUrl = _pinotHelixResourceManager.getHelixZkURL();
_zkClient = new ZkClient(zkUrl, ZkClient.DEFAULT_SESSION_TIMEOUT, ZkClient.DEFAULT_CONNECTION_TIMEOUT);
_zkClient.setZkSerializer(new ZNRecordSerializer());
_zkClient.waitUntilConnected();
// Subscribe to any data/child changes to property
_zkClient.subscribeChildChanges(_tableConfigPath, this);
_zkClient.subscribeDataChanges(_tableConfigPath, this);
// Subscribe to leadership changes
_pinotHelixResourceManager.getHelixZkManager().addControllerListener(new ControllerChangeListener() {
@Override
public void onControllerChange(NotificationContext changeContext) {
processPropertyStoreChange(CONTROLLER_LEADER_CHANGE);
}
});
// Setup change listeners for already existing tables, if any.
processPropertyStoreChange(_tableConfigPath);
}
use of org.apache.helix.manager.zk.ZkClient in project pinot by linkedin.
the class ControllerInstanceToggleTest method setup.
@BeforeClass
public void setup() throws Exception {
startZk();
_zkClient = new ZkClient(ZkStarter.DEFAULT_ZK_STR);
startController();
_pinotResourceManager = _controllerStarter.getHelixResourceManager();
ControllerRequestBuilderUtil.addFakeBrokerInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZkStarter.DEFAULT_ZK_STR, 20, true);
ControllerRequestBuilderUtil.addFakeDataInstancesToAutoJoinHelixCluster(HELIX_CLUSTER_NAME, ZkStarter.DEFAULT_ZK_STR, 20, true);
}
use of org.apache.helix.manager.zk.ZkClient in project pinot by linkedin.
the class DeleteClusterCommand method execute.
@Override
public boolean execute() throws Exception {
LOGGER.info("Connecting to Zookeeper at address: {}", _zkAddress);
ZkClient zkClient = new ZkClient(_zkAddress, 5000);
String helixClusterName = "/" + _clusterName;
LOGGER.info("Executing command: " + toString());
if (!zkClient.exists(helixClusterName)) {
LOGGER.error("Cluster {} does not exist.", _clusterName);
return false;
}
zkClient.deleteRecursive(helixClusterName);
return true;
}
use of org.apache.helix.manager.zk.ZkClient in project ambry by linkedin.
the class HelixClusterWideAggregationTool method main.
/**
* @param args takes in three mandatory arguments: the ZK layout, the cluster name, the workflow name. Optional
* argument to create the workflow as a recurrent workflow and specifies the recurrent time interval.
* The ZK layout has to be of the following form:
* {
* "zkInfo" : [
* {
* "datacenter":"dc1",
* "id" : "1",
* "zkConnectStr":"abc.example.com:2199",
* },
* {
* "datacenter":"dc2",
* "id" : "2",
* "zkConnectStr":"def.example.com:2300",
* }
* ]
* }
* @throws Exception
*/
public static void main(String[] args) throws Exception {
OptionParser parser = new OptionParser();
ArgumentAcceptingOptionSpec<String> zkLayoutPathOpt = parser.accepts("zkLayoutPath", "The path to the json file containing zookeeper connect info. This should be of the following form: \n{\n" + " \"zkInfo\" : [\n" + " {\n" + " \"datacenter\":\"dc1\",\n" + " \"id\":\"1\",\n" + " \"zkConnectStr\":\"abc.example.com:2199\",\n" + " },\n" + " {\n" + " \"datacenter\":\"dc2\",\n" + " \"id\":\"2\",\n" + " \"zkConnectStr\":\"def.example.com:2300\",\n" + " },\n" + " {\n" + " \"datacenter\":\"dc3\",\n" + " \"id\":\"3\",\n" + " \"zkConnectStr\":\"ghi.example.com:2400\",\n" + " }\n" + " ]\n" + "}").withRequiredArg().describedAs("zk_connect_info_path").ofType(String.class);
ArgumentAcceptingOptionSpec<String> clusterNameOpt = parser.accepts("clusterName", "The cluster name in helix").withRequiredArg().describedAs("cluster_name").ofType(String.class);
ArgumentAcceptingOptionSpec<String> workflowNameOpt = parser.accepts("workflowName", "The name of the one-time workflow").withRequiredArg().describedAs("workflow_name").ofType(String.class);
ArgumentAcceptingOptionSpec<Long> recurrentIntervalInMinutesOpt = parser.accepts("recurrentIntervalInMinutes", "The frequency for the recurrent workflow").withOptionalArg().describedAs("recurrent_interval_in_minutes").ofType(Long.class).defaultsTo(Utils.Infinite_Time);
parser.accepts("delete", "Flag to remove the given workflow from the cluster(s) instead of creating one");
OptionSet options = parser.parse(args);
Boolean isDelete = options.has("delete");
String zkLayoutPath = options.valueOf(zkLayoutPathOpt);
String clusterName = options.valueOf(clusterNameOpt);
String workflowName = options.valueOf(workflowNameOpt);
Long recurrentIntervalInMinutes = options.valueOf(recurrentIntervalInMinutesOpt);
Map<String, ClusterMapUtils.DcZkInfo> dataCenterToZKAddress = ClusterMapUtils.parseDcJsonAndPopulateDcInfo(Utils.readStringFromFile(zkLayoutPath));
for (ClusterMapUtils.DcZkInfo zkInfo : dataCenterToZKAddress.values()) {
String zkAddress = zkInfo.getZkConnectStr();
ZkClient zkClient = new ZkClient(zkAddress, SESSION_TIMEOUT, CONNECTION_TIMEOUT, new ZNRecordSerializer());
TaskDriver taskDriver = new TaskDriver(zkClient, clusterName);
if (isDelete) {
try {
taskDriver.stop(workflowName);
taskDriver.delete(workflowName);
} catch (Exception | Error e) {
System.out.println(String.format("Failed to delete %s. Workflow not found in cluster %s at %s", workflowName, clusterName, zkAddress));
}
} else {
try {
Workflow.Builder workflowBuilder = new Workflow.Builder(workflowName);
String jobId = ONE_TIME_JOB_ID;
if (recurrentIntervalInMinutes != Utils.Infinite_Time) {
jobId = RECURRENT_JOB_ID;
workflowBuilder.setScheduleConfig(ScheduleConfig.recurringFromNow(TimeUnit.MINUTES, recurrentIntervalInMinutes));
workflowBuilder.setExpiry(TimeUnit.MINUTES.toMillis(recurrentIntervalInMinutes));
}
JobConfig.Builder jobConfigBuilder = new JobConfig.Builder();
List<TaskConfig> taskConfigs = new ArrayList<>();
taskConfigs.add(new TaskConfig.Builder().setTaskId(TASK_ID).setCommand(String.format("%s_%s", HelixHealthReportAggregatorTask.TASK_COMMAND_PREFIX, REPORT_NAME)).build());
jobConfigBuilder.addTaskConfigs(taskConfigs);
workflowBuilder.addJob(jobId, jobConfigBuilder);
Workflow workflow = workflowBuilder.build();
taskDriver.start(workflow);
System.out.println(String.format("%s_%s started successfully", workflowName, jobId));
} catch (Exception | Error e) {
System.out.println(String.format("Failed to start %s in cluster %s at %s", workflowName, clusterName, zkAddress));
}
}
}
}
use of org.apache.helix.manager.zk.ZkClient in project helix by apache.
the class TestZnodeModify method beforeClass.
@BeforeClass()
public void beforeClass() {
System.out.println("START " + getShortClassName() + " at " + new Date(System.currentTimeMillis()));
_zkClient = new ZkClient(ZK_ADDR);
_zkClient.setZkSerializer(new ZNRecordSerializer());
if (_zkClient.exists(PREFIX)) {
_zkClient.deleteRecursively(PREFIX);
}
}
Aggregations