use of org.apache.helix.model.HelixConfigScope in project pinot by linkedin.
the class HelixHelper method deleteResourcePropertyFromHelix.
public static void deleteResourcePropertyFromHelix(HelixAdmin admin, String clusterName, String resourceName, String configKey) {
final List<String> keys = new ArrayList<String>();
keys.add(configKey);
final HelixConfigScope scope = getResourceScopeFor(clusterName, resourceName);
admin.removeConfig(scope, keys);
}
use of org.apache.helix.model.HelixConfigScope in project pinot by linkedin.
the class HelixSetupUtils method createHelixClusterIfNeeded.
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
final HelixAdmin admin = new ZKHelixAdmin(zkPath);
final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;
if (admin.getClusters().contains(helixClusterName)) {
LOGGER.info("cluster already exists ********************************************* ");
if (isUpdateStateModel) {
final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
List<String> states = curStateModelDef.getStatesPriorityList();
if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
return;
} else {
LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
ZkClient zkClient = new ZkClient(zkPath);
zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
zkClient.setZkSerializer(new ZNRecordSerializer());
HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
zkClient.close();
}
}
return;
}
LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName + " was not found ********************************************* ");
admin.addCluster(helixClusterName, false);
LOGGER.info("Enable auto join.");
final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();
final Map<String, String> props = new HashMap<String, String>();
props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
//we need only one segment to be loaded at a time
props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));
admin.setConfig(scope, props);
LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************", segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());
// If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
// state model. But then the servers will never be asked to go to that STATE (whether they have the code
// to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
admin.addStateModelDef(helixClusterName, segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding state model definition named : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString() + " ********************************************** ");
admin.addStateModelDef(helixClusterName, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding empty ideal state for Broker!");
HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, helixClusterName, admin);
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
initPropertyStorePath(helixClusterName, zkPath);
LOGGER.info("New Cluster setup completed... ********************************************** ");
}
use of org.apache.helix.model.HelixConfigScope in project pinot by linkedin.
the class HelixHelper method getResourceConfigsFor.
public static Map<String, String> getResourceConfigsFor(String clusterName, String resourceName, HelixAdmin admin) {
final HelixConfigScope scope = getResourceScopeFor(clusterName, resourceName);
final List<String> keys = admin.getConfigKeys(scope);
return admin.getConfig(scope, keys);
}
use of org.apache.helix.model.HelixConfigScope in project pinot by linkedin.
the class HelixHelper method getInstanceConfigsMapFor.
public static Map<String, String> getInstanceConfigsMapFor(String instanceName, String clusterName, HelixAdmin admin) {
final HelixConfigScope scope = getInstanceScopefor(clusterName, instanceName);
final List<String> keys = admin.getConfigKeys(scope);
return admin.getConfig(scope, keys);
}
use of org.apache.helix.model.HelixConfigScope in project pinot by linkedin.
the class HelixHelper method updateResourceConfigsFor.
public static void updateResourceConfigsFor(Map<String, String> newConfigs, String resourceName, String clusterName, HelixAdmin admin) {
final HelixConfigScope scope = getResourceScopeFor(clusterName, resourceName);
admin.setConfig(scope, newConfigs);
}
Aggregations