use of org.apache.helix.manager.zk.ZNRecordSerializer in project pinot by linkedin.
the class HelixHelper method updateIdealState.
/**
* Updates the ideal state, retrying if necessary in case of concurrent updates to the ideal state.
*
* @param helixManager The HelixManager used to interact with the Helix cluster
* @param resourceName The resource for which to update the ideal state
* @param updater A function that returns an updated ideal state given an input ideal state
*/
public static void updateIdealState(final HelixManager helixManager, final String resourceName, final Function<IdealState, IdealState> updater, RetryPolicy policy) {
boolean successful = policy.attempt(new Callable<Boolean>() {
@Override
public Boolean call() {
HelixDataAccessor dataAccessor = helixManager.getHelixDataAccessor();
PropertyKey propertyKey = dataAccessor.keyBuilder().idealStates(resourceName);
// Create an updated version of the ideal state
IdealState idealState = dataAccessor.getProperty(propertyKey);
PropertyKey key = dataAccessor.keyBuilder().idealStates(resourceName);
String path = key.getPath();
// Make a copy of the the idealState above to pass it to the updater, instead of querying again,
// as the state my change between the queries.
ZNRecordSerializer znRecordSerializer = new ZNRecordSerializer();
IdealState idealStateCopy = new IdealState((ZNRecord) znRecordSerializer.deserialize(znRecordSerializer.serialize(idealState.getRecord())));
IdealState updatedIdealState;
try {
updatedIdealState = updater.apply(idealStateCopy);
} catch (Exception e) {
LOGGER.error("Caught exception while updating ideal state", e);
return false;
}
// If there are changes to apply, apply them
if (!EqualityUtils.isEqual(idealState, updatedIdealState) && updatedIdealState != null) {
BaseDataAccessor<ZNRecord> baseDataAccessor = dataAccessor.getBaseDataAccessor();
boolean success;
// If the ideal state is large enough, enable compression
if (MAX_PARTITION_COUNT_IN_UNCOMPRESSED_IDEAL_STATE < updatedIdealState.getPartitionSet().size()) {
updatedIdealState.getRecord().setBooleanField("enableCompression", true);
}
try {
success = baseDataAccessor.set(path, updatedIdealState.getRecord(), idealState.getRecord().getVersion(), AccessOption.PERSISTENT);
} catch (Exception e) {
boolean idealStateIsCompressed = updatedIdealState.getRecord().getBooleanField("enableCompression", false);
LOGGER.warn("Caught exception while updating ideal state for resource {} (compressed={}), retrying.", resourceName, idealStateIsCompressed, e);
return false;
}
if (success) {
return true;
} else {
LOGGER.warn("Failed to update ideal state for resource {}, retrying.", resourceName);
return false;
}
} else {
LOGGER.warn("Idempotent or null ideal state update for resource {}, skipping update.", resourceName);
return true;
}
}
});
if (!successful) {
throw new RuntimeException("Failed to update ideal state for resource " + resourceName);
}
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project pinot by linkedin.
the class HelixSetupUtils method createHelixClusterIfNeeded.
public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
final HelixAdmin admin = new ZKHelixAdmin(zkPath);
final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;
if (admin.getClusters().contains(helixClusterName)) {
LOGGER.info("cluster already exists ********************************************* ");
if (isUpdateStateModel) {
final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
List<String> states = curStateModelDef.getStatesPriorityList();
if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
return;
} else {
LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
ZkClient zkClient = new ZkClient(zkPath);
zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
zkClient.setZkSerializer(new ZNRecordSerializer());
HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
PropertyKey.Builder keyBuilder = accessor.keyBuilder();
accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
zkClient.close();
}
}
return;
}
LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName + " was not found ********************************************* ");
admin.addCluster(helixClusterName, false);
LOGGER.info("Enable auto join.");
final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();
final Map<String, String> props = new HashMap<String, String>();
props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
//we need only one segment to be loaded at a time
props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));
admin.setConfig(scope, props);
LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************", segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());
// If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
// state model. But then the servers will never be asked to go to that STATE (whether they have the code
// to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
admin.addStateModelDef(helixClusterName, segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding state model definition named : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString() + " ********************************************** ");
admin.addStateModelDef(helixClusterName, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
LOGGER.info("Adding empty ideal state for Broker!");
HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, helixClusterName, admin);
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
initPropertyStorePath(helixClusterName, zkPath);
LOGGER.info("New Cluster setup completed... ********************************************** ");
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project pinot by linkedin.
the class HelixSetupUtils method initPropertyStorePath.
private static void initPropertyStorePath(String helixClusterName, String zkPath) {
String propertyStorePath = PropertyPathConfig.getPath(PropertyType.PROPERTYSTORE, helixClusterName);
ZkHelixPropertyStore<ZNRecord> propertyStore = new ZkHelixPropertyStore<ZNRecord>(zkPath, new ZNRecordSerializer(), propertyStorePath);
propertyStore.create("/CONFIGS", new ZNRecord(""), AccessOption.PERSISTENT);
propertyStore.create("/CONFIGS/CLUSTER", new ZNRecord(""), AccessOption.PERSISTENT);
propertyStore.create("/CONFIGS/TABLE", new ZNRecord(""), AccessOption.PERSISTENT);
propertyStore.create("/CONFIGS/INSTANCE", new ZNRecord(""), AccessOption.PERSISTENT);
propertyStore.create("/SCHEMAS", new ZNRecord(""), AccessOption.PERSISTENT);
propertyStore.create("/SEGMENTS", new ZNRecord(""), AccessOption.PERSISTENT);
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project pinot by linkedin.
the class BenchmarkQueryEngine method startPinot.
@Setup
public void startPinot() throws Exception {
System.out.println("Using table name " + TABLE_NAME);
System.out.println("Using data directory " + DATA_DIRECTORY);
System.out.println("Starting pinot");
PerfBenchmarkDriverConf conf = new PerfBenchmarkDriverConf();
conf.setStartBroker(true);
conf.setStartController(true);
conf.setStartServer(true);
conf.setStartZookeeper(true);
conf.setUploadIndexes(false);
conf.setRunQueries(false);
conf.setServerInstanceSegmentTarDir(null);
conf.setServerInstanceDataDir(DATA_DIRECTORY);
conf.setConfigureResources(false);
_perfBenchmarkDriver = new PerfBenchmarkDriver(conf);
_perfBenchmarkDriver.run();
Set<String> tables = new HashSet<String>();
File[] segments = new File(DATA_DIRECTORY, TABLE_NAME).listFiles();
for (File segmentDir : segments) {
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(segmentDir);
if (!tables.contains(segmentMetadata.getTableName())) {
_perfBenchmarkDriver.configureTable(segmentMetadata.getTableName());
tables.add(segmentMetadata.getTableName());
}
System.out.println("Adding segment " + segmentDir.getAbsolutePath());
_perfBenchmarkDriver.addSegment(segmentMetadata);
}
ZkClient client = new ZkClient("localhost:2191", 10000, 10000, new ZNRecordSerializer());
ZNRecord record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
while (true) {
System.out.println("record = " + record);
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
int onlineSegmentCount = 0;
for (Map<String, String> instancesAndStates : record.getMapFields().values()) {
for (String state : instancesAndStates.values()) {
if (state.equals("ONLINE")) {
onlineSegmentCount++;
break;
}
}
}
System.out.println(onlineSegmentCount + " segments online out of " + segments.length);
if (onlineSegmentCount == segments.length) {
break;
}
record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
}
ranOnce = false;
System.out.println(_perfBenchmarkDriver.postQuery(QUERY_PATTERNS[queryPattern], optimizationFlags).toString(2));
}
use of org.apache.helix.manager.zk.ZNRecordSerializer in project pinot by linkedin.
the class LLCRealtimeClusterIntegrationTest method testSegmentFlushSize.
@Test
public void testSegmentFlushSize() {
ZkClient zkClient = new ZkClient(ZkStarter.DEFAULT_ZK_STR, 10000);
zkClient.setZkSerializer(new ZNRecordSerializer());
String zkPath = "/LLCRealtimeClusterIntegrationTest/PROPERTYSTORE/SEGMENTS/mytable_REALTIME";
List<String> segmentNames = zkClient.getChildren(zkPath);
for (String segmentName : segmentNames) {
ZNRecord znRecord = zkClient.<ZNRecord>readData(zkPath + "/" + segmentName);
Assert.assertEquals(znRecord.getSimpleField(CommonConstants.Segment.FLUSH_THRESHOLD_SIZE), Integer.toString(ROW_COUNT_FOR_REALTIME_SEGMENT_FLUSH / KAFKA_PARTITION_COUNT), "Segment " + segmentName + " does not have the expected flush size");
}
zkClient.close();
}
Aggregations