use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project pinot by linkedin.
the class HelixSetupUtils method createHelixClusterIfNeeded.
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
final HelixAdmin admin = new ZKHelixAdmin(zkPath);
final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;
if (admin.getClusters().contains(helixClusterName)) {
LOGGER.info("cluster already exists ********************************************* ");
if (isUpdateStateModel) {
final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
List<String> states = curStateModelDef.getStatesPriorityList();
if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
return;
} else {
LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
ZkClient zkClient = new ZkClient(zkPath);
zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
zkClient.setZkSerializer(new ZNRecordSerializer());
HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
zkClient.close();
}
}
return;
}
LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName + " was not found ********************************************* ");
admin.addCluster(helixClusterName, false);
LOGGER.info("Enable auto join.");
final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();
final Map<String, String> props = new HashMap<String, String>();
props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
//we need only one segment to be loaded at a time
props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));
admin.setConfig(scope, props);
LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************", segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());
// If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
// state model. But then the servers will never be asked to go to that STATE (whether they have the code
// to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
admin.addStateModelDef(helixClusterName, segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding state model definition named : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString() + " ********************************************** ");
admin.addStateModelDef(helixClusterName, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding empty ideal state for Broker!");
HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, helixClusterName, admin);
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
initPropertyStorePath(helixClusterName, zkPath);
LOGGER.info("New Cluster setup completed... ********************************************** ");
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class IntegrationTest method addConfiguration.
private static void addConfiguration(ClusterSetup setup, String baseDir, String clusterName, String instanceName) throws IOException {
Map<String, String> properties = new HashMap<String, String>();
HelixConfigScopeBuilder builder = new HelixConfigScopeBuilder(ConfigScopeProperty.PARTICIPANT);
HelixConfigScope instanceScope = builder.forCluster(clusterName).forParticipant(instanceName).build();
properties.put("change_log_dir", baseDir + instanceName + "/translog");
properties.put("file_store_dir", baseDir + instanceName + "/filestore");
properties.put("check_point_dir", baseDir + instanceName + "/checkpoint");
setup.getClusterManagementTool().setConfig(instanceScope, properties);
FileUtils.deleteDirectory(new File(properties.get("change_log_dir")));
FileUtils.deleteDirectory(new File(properties.get("file_store_dir")));
FileUtils.deleteDirectory(new File(properties.get("check_point_dir")));
new File(properties.get("change_log_dir")).mkdirs();
new File(properties.get("file_store_dir")).mkdirs();
new File(properties.get("check_point_dir")).mkdirs();
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class TaskCluster method submitDag.
public void submitDag(Dag dag) throws Exception {
ConfigAccessor clusterConfig = new ConfigAccessor(_zkclient);
HelixConfigScope clusterScope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(_clusterName).build();
for (String id : dag.getNodeIds()) {
Dag.Node node = dag.getNode(id);
clusterConfig.set(clusterScope, node.getId(), node.toJson());
_admin.addResource(_clusterName, node.getId(), node.getNumPartitions(), DEFAULT_STATE_MODEL, RebalanceMode.FULL_AUTO.toString());
}
for (String id : dag.getNodeIds()) {
_admin.rebalance(_clusterName, id, 1);
}
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class TaskStateModel method onBecomeOnlineFromOffline.
@Transition(to = "ONLINE", from = "OFFLINE")
public void onBecomeOnlineFromOffline(Message message, NotificationContext context) throws Exception {
LOG.debug(_workerId + " becomes ONLINE from OFFLINE for " + _partition);
ConfigAccessor clusterConfig = context.getManager().getConfigAccessor();
HelixManager manager = context.getManager();
HelixConfigScope clusterScope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(manager.getClusterName()).build();
String json = clusterConfig.get(clusterScope, message.getResourceName());
Dag.Node node = Dag.Node.fromJson(json);
Set<String> parentIds = node.getParentIds();
String resourceName = message.getResourceName();
int numPartitions = node.getNumPartitions();
Task task = _taskFactory.createTask(resourceName, parentIds, manager, _taskResultStore);
manager.addExternalViewChangeListener(task);
LOG.debug("Starting task for " + _partition + "...");
int partitionNum = Integer.parseInt(_partition.split("_")[1]);
task.execute(resourceName, numPartitions, partitionNum);
LOG.debug("Task for " + _partition + " done");
}
use of org.apache.helix.model.builder.HelixConfigScopeBuilder in project helix by apache.
the class ConfigResource method getConfigs.
StringRepresentation getConfigs(ConfigScopeProperty scopeProperty, String... keys) throws Exception {
StringRepresentation representation = null;
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ClusterSetup setupTool = new ClusterSetup(zkClient);
HelixAdmin admin = setupTool.getClusterManagementTool();
ZNRecord record = new ZNRecord(scopeProperty + " Config");
HelixConfigScope scope = new HelixConfigScopeBuilder(scopeProperty, keys).build();
List<String> configKeys = admin.getConfigKeys(scope);
Map<String, String> configs = admin.getConfig(scope, configKeys);
record.setSimpleFields(configs);
representation = new StringRepresentation(ClusterRepresentationUtil.ZNRecordToJson(record), MediaType.APPLICATION_JSON);
return representation;
}
Aggregations