use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project pinot by linkedin.
the class HelixSetupUtils method createHelixClusterIfNeeded.
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
final HelixAdmin admin = new ZKHelixAdmin(zkPath);
final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;
if (admin.getClusters().contains(helixClusterName)) {
LOGGER.info("cluster already exists ********************************************* ");
if (isUpdateStateModel) {
final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
List<String> states = curStateModelDef.getStatesPriorityList();
if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
return;
} else {
LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
ZkClient zkClient = new ZkClient(zkPath);
zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
zkClient.setZkSerializer(new ZNRecordSerializer());
HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
zkClient.close();
}
}
return;
}
LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName + " was not found ********************************************* ");
admin.addCluster(helixClusterName, false);
LOGGER.info("Enable auto join.");
final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();
final Map<String, String> props = new HashMap<String, String>();
props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
//we need only one segment to be loaded at a time
props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));
admin.setConfig(scope, props);
LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************", segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());
// If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
// state model. But then the servers will never be asked to go to that STATE (whether they have the code
// to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
admin.addStateModelDef(helixClusterName, segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding state model definition named : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString() + " ********************************************** ");
admin.addStateModelDef(helixClusterName, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding empty ideal state for Broker!");
HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, helixClusterName, admin);
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
initPropertyStorePath(helixClusterName, zkPath);
LOGGER.info("New Cluster setup completed... ********************************************** ");
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project pinot by linkedin.
the class DeleteOverlappingSegmentsInPinot method deleteOverlappingSegments.
public static boolean deleteOverlappingSegments(String zkUrl, String zkCluster, String tableName) {
boolean updateSuccessful = false;
if (!tableName.endsWith("_OFFLINE")) {
tableName = tableName + "_OFFLINE";
}
ZkClient zkClient = new ZkClient(zkUrl);
ZNRecordSerializer zkSerializer = new ZNRecordSerializer();
zkClient.setZkSerializer(zkSerializer);
BaseDataAccessor<ZNRecord> baseDataAccessor = new ZkBaseDataAccessor<>(zkClient);
HelixDataAccessor helixDataAccessor = new ZKHelixDataAccessor(zkCluster, baseDataAccessor);
Builder keyBuilder = helixDataAccessor.keyBuilder();
PropertyKey idealStateKey = keyBuilder.idealStates(tableName);
PropertyKey externalViewKey = keyBuilder.externalView(tableName);
IdealState currentIdealState = helixDataAccessor.getProperty(idealStateKey);
byte[] serializeIS = zkSerializer.serialize(currentIdealState.getRecord());
String name = tableName + ".idealstate." + System.currentTimeMillis();
File outputFile = new File("/tmp", name);
try (FileOutputStream fileOutputStream = new FileOutputStream(outputFile)) {
IOUtils.write(serializeIS, fileOutputStream);
} catch (IOException e) {
LOG.error("Exception in delete overlapping segments", e);
return updateSuccessful;
}
LOG.info("Saved current idealstate to {}", outputFile);
IdealState newIdealState;
do {
newIdealState = computeNewIdealStateAfterDeletingOverlappingSegments(helixDataAccessor, idealStateKey);
LOG.info("Updating IdealState");
updateSuccessful = helixDataAccessor.getBaseDataAccessor().set(idealStateKey.getPath(), newIdealState.getRecord(), newIdealState.getRecord().getVersion(), AccessOption.PERSISTENT);
if (updateSuccessful) {
int numSegmentsDeleted = currentIdealState.getPartitionSet().size() - newIdealState.getPartitionSet().size();
LOG.info("Successfully updated IdealState: Removed segments: {}", (numSegmentsDeleted));
}
} while (!updateSuccessful);
try {
while (true) {
Thread.sleep(10000);
ExternalView externalView = helixDataAccessor.getProperty(externalViewKey);
IdealState idealState = helixDataAccessor.getProperty(idealStateKey);
Set<String> evPartitionSet = externalView.getPartitionSet();
Set<String> isPartitionSet = idealState.getPartitionSet();
if (evPartitionSet.equals(isPartitionSet)) {
LOG.info("Table {} has reached stable state. i.e segments in external view match idealstates", tableName);
break;
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
return updateSuccessful;
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestResetPartitionState method clearStatusUpdate.
private void clearStatusUpdate(String clusterName, String instance, String resource, String partition) {
// clear status update for error partition so verify() will not fail on
// old errors
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(_gZkClient));
Builder keyBuilder = accessor.keyBuilder();
LiveInstance liveInstance = accessor.getProperty(keyBuilder.liveInstance(instance));
accessor.removeProperty(keyBuilder.stateTransitionStatus(instance, liveInstance.getSessionId(), resource, partition));
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class ControllerResource method getControllerRepresentation.
StringRepresentation getControllerRepresentation(String clusterName) throws JsonGenerationException, JsonMappingException, IOException {
Builder keyBuilder = new PropertyKey.Builder(clusterName);
ZkClient zkClient = ResourceUtil.getAttributeFromCtx(getContext(), ResourceUtil.ContextKey.ZKCLIENT);
ZKHelixDataAccessor accessor = new ZKHelixDataAccessor(clusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
ZNRecord record = null;
LiveInstance leader = accessor.getProperty(keyBuilder.controllerLeader());
if (leader != null) {
record = leader.getRecord();
} else {
record = new ZNRecord("");
DateFormat formatter = new SimpleDateFormat("yyyyMMdd-HHmmss.SSSSSS");
String time = formatter.format(new Date());
Map<String, String> contentMap = new TreeMap<String, String>();
contentMap.put("AdditionalInfo", "No leader exists");
record.setMapField(Level.HELIX_INFO + "-" + time, contentMap);
}
boolean paused = (accessor.getProperty(keyBuilder.pause()) == null ? false : true);
record.setSimpleField(PropertyType.PAUSE.toString(), "" + paused);
String retVal = ClusterRepresentationUtil.ZNRecordToJson(record);
StringRepresentation representation = new StringRepresentation(retVal, MediaType.APPLICATION_JSON);
return representation;
}
use of org.apache.helix.manager.zk.ZKHelixDataAccessor in project helix by apache.
the class TestClusterAccessor method testEnableDisableMaintenanceMode.
@Test(dependsOnMethods = "testGetClusterConfig")
public void testEnableDisableMaintenanceMode() {
System.out.println("Start test :" + TestHelper.getTestMethodName());
String cluster = _clusters.iterator().next();
String reason = "Test reason";
HelixDataAccessor accessor = new ZKHelixDataAccessor(cluster, _baseAccessor);
post("clusters/" + cluster, ImmutableMap.of("command", "enableMaintenanceMode"), Entity.entity(reason, MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
MaintenanceSignal signal = accessor.getProperty(accessor.keyBuilder().maintenance());
Assert.assertNotNull(signal);
Assert.assertEquals(reason, signal.getReason());
post("clusters/" + cluster, ImmutableMap.of("command", "disableMaintenanceMode"), Entity.entity(new String(), MediaType.APPLICATION_JSON_TYPE), Response.Status.OK.getStatusCode());
Assert.assertNull(accessor.getProperty(accessor.keyBuilder().maintenance()));
}
Aggregations