Search in sources :

Example 11 with IdealState

use of org.apache.helix.model.IdealState in project pinot by linkedin.

the class PinotHelixResourceManager method addTable.

/**
   * Table APIs
   */
public void addTable(AbstractTableConfig config) throws JsonGenerationException, JsonMappingException, IOException {
    TenantConfig tenantConfig = null;
    TableType type = TableType.valueOf(config.getTableType().toUpperCase());
    if (isSingleTenantCluster()) {
        tenantConfig = new TenantConfig();
        tenantConfig.setBroker(ControllerTenantNameBuilder.getBrokerTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
        switch(type) {
            case OFFLINE:
                tenantConfig.setServer(ControllerTenantNameBuilder.getOfflineTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
                break;
            case REALTIME:
                tenantConfig.setServer(ControllerTenantNameBuilder.getRealtimeTenantNameForTenant(ControllerTenantNameBuilder.DEFAULT_TENANT_NAME));
                break;
            default:
                throw new RuntimeException("UnSupported table type");
        }
        config.setTenantConfig(tenantConfig);
    } else {
        tenantConfig = config.getTenantConfig();
        if (tenantConfig.getBroker() == null || tenantConfig.getServer() == null) {
            throw new RuntimeException("missing tenant configs");
        }
    }
    SegmentsValidationAndRetentionConfig segmentsConfig = config.getValidationConfig();
    switch(type) {
        case OFFLINE:
            final String offlineTableName = config.getTableName();
            // now lets build an ideal state
            LOGGER.info("building empty ideal state for table : " + offlineTableName);
            final IdealState offlineIdealState = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(offlineTableName, Integer.parseInt(segmentsConfig.getReplication()));
            LOGGER.info("adding table via the admin");
            _helixAdmin.addResource(_helixClusterName, offlineTableName, offlineIdealState);
            LOGGER.info("successfully added the table : " + offlineTableName + " to the cluster");
            // lets add table configs
            ZKMetadataProvider.setOfflineTableConfig(_propertyStore, offlineTableName, AbstractTableConfig.toZnRecord(config));
            _propertyStore.create(ZKMetadataProvider.constructPropertyStorePathForResource(offlineTableName), new ZNRecord(offlineTableName), AccessOption.PERSISTENT);
            break;
        case REALTIME:
            final String realtimeTableName = config.getTableName();
            // lets add table configs
            ZKMetadataProvider.setRealtimeTableConfig(_propertyStore, realtimeTableName, AbstractTableConfig.toZnRecord(config));
            /*
         * PinotRealtimeSegmentManager sets up watches on table and segment path. When a table gets created,
         * it expects the INSTANCE path in propertystore to be set up so that it can get the kafka group ID and
         * create (high-level consumer) segments for that table.
         * So, we need to set up the instance first, before adding the table resource for HLC new table creation.
         *
         * For low-level consumers, the order is to create the resource first, and set up the propertystore with segments
         * and then tweak the idealstate to add those segments.
         *
         * We also need to support the case when a high-level consumer already exists for a table and we are adding
         * the low-level consumers.
         */
            IndexingConfig indexingConfig = config.getIndexingConfig();
            ensureRealtimeClusterIsSetUp(config, realtimeTableName, indexingConfig);
            LOGGER.info("Successfully added or updated the table {} ", realtimeTableName);
            break;
        default:
            throw new RuntimeException("UnSupported table type");
    }
    handleBrokerResource(config);
}
Also used : IndexingConfig(com.linkedin.pinot.common.config.IndexingConfig) TableType(com.linkedin.pinot.common.utils.CommonConstants.Helix.TableType) TenantConfig(com.linkedin.pinot.common.config.TenantConfig) SegmentsValidationAndRetentionConfig(com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig) IdealState(org.apache.helix.model.IdealState) ZNRecord(org.apache.helix.ZNRecord)

Example 12 with IdealState

use of org.apache.helix.model.IdealState in project pinot by linkedin.

the class PinotHelixResourceManager method isServerTenantDeletable.

public boolean isServerTenantDeletable(String tenantName) {
    Set<String> taggedInstances = new HashSet<String>(_helixAdmin.getInstancesInClusterWithTag(_helixClusterName, ControllerTenantNameBuilder.getOfflineTenantNameForTenant(tenantName)));
    taggedInstances.addAll(_helixAdmin.getInstancesInClusterWithTag(_helixClusterName, ControllerTenantNameBuilder.getRealtimeTenantNameForTenant(tenantName)));
    for (String tableName : getAllTableNames()) {
        if (tableName.equals(CommonConstants.Helix.BROKER_RESOURCE_INSTANCE)) {
            continue;
        }
        IdealState tableIdealState = _helixAdmin.getResourceIdealState(_helixClusterName, tableName);
        for (String partition : tableIdealState.getPartitionSet()) {
            for (String instance : tableIdealState.getInstanceSet(partition)) {
                if (taggedInstances.contains(instance)) {
                    return false;
                }
            }
        }
    }
    return true;
}
Also used : IdealState(org.apache.helix.model.IdealState) HashSet(java.util.HashSet)

Example 13 with IdealState

use of org.apache.helix.model.IdealState in project pinot by linkedin.

the class PinotHelixResourceManager method isBrokerTenantDeletable.

public boolean isBrokerTenantDeletable(String tenantName) {
    String brokerTag = ControllerTenantNameBuilder.getBrokerTenantNameForTenant(tenantName);
    Set<String> taggedInstances = new HashSet<String>(_helixAdmin.getInstancesInClusterWithTag(_helixClusterName, brokerTag));
    String brokerName = CommonConstants.Helix.BROKER_RESOURCE_INSTANCE;
    IdealState brokerIdealState = _helixAdmin.getResourceIdealState(_helixClusterName, brokerName);
    for (String partition : brokerIdealState.getPartitionSet()) {
        for (String instance : brokerIdealState.getInstanceSet(partition)) {
            if (taggedInstances.contains(instance)) {
                return false;
            }
        }
    }
    return true;
}
Also used : IdealState(org.apache.helix.model.IdealState) HashSet(java.util.HashSet)

Example 14 with IdealState

use of org.apache.helix.model.IdealState in project pinot by linkedin.

the class PinotHelixResourceManager method getInstanceToSegmentsInATableMap.

public Map<String, List<String>> getInstanceToSegmentsInATableMap(String tableName) {
    Map<String, List<String>> instancesToSegmentsMap = new HashMap<String, List<String>>();
    IdealState is = _helixAdmin.getResourceIdealState(_helixClusterName, tableName);
    Set<String> segments = is.getPartitionSet();
    for (String segment : segments) {
        Set<String> instances = is.getInstanceSet(segment);
        for (String instance : instances) {
            if (instancesToSegmentsMap.containsKey(instance)) {
                instancesToSegmentsMap.get(instance).add(segment);
            } else {
                List<String> a = new ArrayList<String>();
                a.add(segment);
                instancesToSegmentsMap.put(instance, a);
            }
        }
    }
    return instancesToSegmentsMap;
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) List(java.util.List) ArrayList(java.util.ArrayList) IdealState(org.apache.helix.model.IdealState)

Example 15 with IdealState

use of org.apache.helix.model.IdealState in project pinot by linkedin.

the class HelixSetupUtils method createHelixClusterIfNeeded.

public static void createHelixClusterIfNeeded(String helixClusterName, String zkPath, boolean isUpdateStateModel) {
    final HelixAdmin admin = new ZKHelixAdmin(zkPath);
    final String segmentStateModelName = PinotHelixSegmentOnlineOfflineStateModelGenerator.PINOT_SEGMENT_ONLINE_OFFLINE_STATE_MODEL;
    if (admin.getClusters().contains(helixClusterName)) {
        LOGGER.info("cluster already exists ********************************************* ");
        if (isUpdateStateModel) {
            final StateModelDefinition curStateModelDef = admin.getStateModelDef(helixClusterName, segmentStateModelName);
            List<String> states = curStateModelDef.getStatesPriorityList();
            if (states.contains(PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE)) {
                LOGGER.info("State model {} already updated to contain CONSUMING state", segmentStateModelName);
                return;
            } else {
                LOGGER.info("Updating {} to add states for low level kafka consumers", segmentStateModelName);
                StateModelDefinition newStateModelDef = PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition();
                ZkClient zkClient = new ZkClient(zkPath);
                zkClient.waitUntilConnected(20, TimeUnit.SECONDS);
                zkClient.setZkSerializer(new ZNRecordSerializer());
                HelixDataAccessor accessor = new ZKHelixDataAccessor(helixClusterName, new ZkBaseDataAccessor<ZNRecord>(zkClient));
                PropertyKey.Builder keyBuilder = accessor.keyBuilder();
                accessor.setProperty(keyBuilder.stateModelDef(segmentStateModelName), newStateModelDef);
                LOGGER.info("Completed updating statemodel {}", segmentStateModelName);
                zkClient.close();
            }
        }
        return;
    }
    LOGGER.info("Creating a new cluster, as the helix cluster : " + helixClusterName + " was not found ********************************************* ");
    admin.addCluster(helixClusterName, false);
    LOGGER.info("Enable auto join.");
    final HelixConfigScope scope = new HelixConfigScopeBuilder(ConfigScopeProperty.CLUSTER).forCluster(helixClusterName).build();
    final Map<String, String> props = new HashMap<String, String>();
    props.put(ZKHelixManager.ALLOW_PARTICIPANT_AUTO_JOIN, String.valueOf(true));
    //we need only one segment to be loaded at a time
    props.put(MessageType.STATE_TRANSITION + "." + HelixTaskExecutor.MAX_THREADS, String.valueOf(1));
    admin.setConfig(scope, props);
    LOGGER.info("Adding state model {} (with CONSUMED state) generated using {} **********************************************", segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.class.toString());
    // If this is a fresh cluster we are creating, then the cluster will see the CONSUMING state in the
    // state model. But then the servers will never be asked to go to that STATE (whether they have the code
    // to handle it or not) unil we complete the feature using low-level kafka consumers and turn the feature on.
    admin.addStateModelDef(helixClusterName, segmentStateModelName, PinotHelixSegmentOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
    LOGGER.info("Adding state model definition named : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL + " generated using : " + PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.class.toString() + " ********************************************** ");
    admin.addStateModelDef(helixClusterName, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.PINOT_BROKER_RESOURCE_ONLINE_OFFLINE_STATE_MODEL, PinotHelixBrokerResourceOnlineOfflineStateModelGenerator.generatePinotStateModelDefinition());
    LOGGER.info("Adding empty ideal state for Broker!");
    HelixHelper.updateResourceConfigsFor(new HashMap<String, String>(), CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, helixClusterName, admin);
    IdealState idealState = PinotTableIdealStateBuilder.buildEmptyIdealStateForBrokerResource(admin, helixClusterName);
    admin.setResourceIdealState(helixClusterName, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE, idealState);
    initPropertyStorePath(helixClusterName, zkPath);
    LOGGER.info("New Cluster setup completed... ********************************************** ");
}
Also used : ZkClient(org.apache.helix.manager.zk.ZkClient) HashMap(java.util.HashMap) HelixConfigScopeBuilder(org.apache.helix.model.builder.HelixConfigScopeBuilder) HelixAdmin(org.apache.helix.HelixAdmin) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) IdealState(org.apache.helix.model.IdealState) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor) HelixDataAccessor(org.apache.helix.HelixDataAccessor) ZKHelixAdmin(org.apache.helix.manager.zk.ZKHelixAdmin) StateModelDefinition(org.apache.helix.model.StateModelDefinition) PinotHelixSegmentOnlineOfflineStateModelGenerator(com.linkedin.pinot.controller.helix.core.PinotHelixSegmentOnlineOfflineStateModelGenerator) HelixConfigScope(org.apache.helix.model.HelixConfigScope) PinotHelixBrokerResourceOnlineOfflineStateModelGenerator(com.linkedin.pinot.controller.helix.core.PinotHelixBrokerResourceOnlineOfflineStateModelGenerator) ZNRecord(org.apache.helix.ZNRecord) PropertyKey(org.apache.helix.PropertyKey) ZNRecordSerializer(org.apache.helix.manager.zk.ZNRecordSerializer) ZKHelixDataAccessor(org.apache.helix.manager.zk.ZKHelixDataAccessor)

Aggregations

IdealState (org.apache.helix.model.IdealState)65 ArrayList (java.util.ArrayList)20 Test (org.testng.annotations.Test)20 ZNRecord (org.apache.helix.ZNRecord)15 ExternalView (org.apache.helix.model.ExternalView)15 HelixAdmin (org.apache.helix.HelixAdmin)14 HashMap (java.util.HashMap)11 LLCSegmentName (com.linkedin.pinot.common.utils.LLCSegmentName)10 AbstractTableConfig (com.linkedin.pinot.common.config.AbstractTableConfig)9 HashSet (java.util.HashSet)9 ControllerMetrics (com.linkedin.pinot.common.metrics.ControllerMetrics)8 MetricsRegistry (com.yammer.metrics.core.MetricsRegistry)8 Map (java.util.Map)7 BeforeTest (org.testng.annotations.BeforeTest)7 PropertyKey (org.apache.helix.PropertyKey)6 LLCRealtimeSegmentZKMetadata (com.linkedin.pinot.common.metadata.segment.LLCRealtimeSegmentZKMetadata)5 List (java.util.List)5 HelixDataAccessor (org.apache.helix.HelixDataAccessor)5 IOException (java.io.IOException)4 ZKHelixAdmin (org.apache.helix.manager.zk.ZKHelixAdmin)4