Search in sources :

Example 16 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class AutoAddInvertedIndex method runQueryStrategy.

private void runQueryStrategy() throws Exception {
    // Get all resources in cluster
    List<String> resourcesInCluster = _helixAdmin.getResourcesInCluster(_clusterName);
    for (String tableName : resourcesInCluster) {
        // Skip non-table resources
        if (!tableName.endsWith("_OFFLINE") && !tableName.endsWith("_REALTIME")) {
            continue;
        }
        // Skip tables that do not match the defined name pattern
        if (_tableNamePattern != null && !tableName.matches(_tableNamePattern)) {
            continue;
        }
        LOGGER.info("Table: {} matches the table name pattern: {}", tableName, _tableNamePattern);
        // Get the inverted index config
        AbstractTableConfig tableConfig = getTableConfig(tableName);
        IndexingConfig indexingConfig = tableConfig.getIndexingConfig();
        List<String> invertedIndexColumns = indexingConfig.getInvertedIndexColumns();
        boolean autoGeneratedInvertedIndex = indexingConfig.isAutoGeneratedInvertedIndex();
        // Handle auto-generated inverted index
        if (autoGeneratedInvertedIndex) {
            Preconditions.checkState(!invertedIndexColumns.isEmpty(), "Auto-generated inverted index list is empty");
            // NEW mode, skip
            if (_mode == Mode.NEW) {
                LOGGER.info("Table: {}, skip adding inverted index because it has auto-generated inverted index and under NEW mode", tableName);
                continue;
            }
            // REMOVE mode, remove the inverted index and update
            if (_mode == Mode.REMOVE) {
                invertedIndexColumns.clear();
                indexingConfig.setAutoGeneratedInvertedIndex(false);
                if (updateIndexConfig(tableName, tableConfig)) {
                    LOGGER.info("Table: {}, removed auto-generated inverted index", tableName);
                } else {
                    LOGGER.error("Table: {}, failed to remove auto-generated inverted index", tableName);
                }
                continue;
            }
            // REFRESH mode, remove auto-generated inverted index
            if (_mode == Mode.REFRESH) {
                invertedIndexColumns.clear();
            }
        } else {
            // Handle null inverted index columns
            if (invertedIndexColumns == null) {
                invertedIndexColumns = new ArrayList<>();
                indexingConfig.setInvertedIndexColumns(invertedIndexColumns);
            }
            // Remove empty strings
            int emptyStringIndex;
            while ((emptyStringIndex = invertedIndexColumns.indexOf("")) != -1) {
                invertedIndexColumns.remove(emptyStringIndex);
            }
            // Skip non-empty non-auto-generated inverted index
            if (!invertedIndexColumns.isEmpty()) {
                LOGGER.info("Table: {}, skip adding inverted index because it has non-auto-generated inverted index", tableName);
                continue;
            }
        }
        // Skip tables without a schema
        Schema tableSchema = getTableSchema(tableName);
        if (tableSchema == null) {
            LOGGER.info("Table: {}, skip adding inverted index because it does not have a schema", tableName);
            continue;
        }
        // Skip tables without dimensions
        List<String> dimensionNames = tableSchema.getDimensionNames();
        if (dimensionNames.size() == 0) {
            LOGGER.info("Table: {}, skip adding inverted index because it does not have any dimension column", tableName);
            continue;
        }
        // Skip tables without a proper time column
        TimeFieldSpec timeFieldSpec = tableSchema.getTimeFieldSpec();
        if (timeFieldSpec == null || timeFieldSpec.getDataType() == FieldSpec.DataType.STRING) {
            LOGGER.info("Table: {}, skip adding inverted index because it does not have a numeric time column", tableName);
            continue;
        }
        String timeColumnName = timeFieldSpec.getName();
        TimeUnit timeUnit = timeFieldSpec.getOutgoingGranularitySpec().getTimeType();
        if (timeUnit != TimeUnit.DAYS) {
            LOGGER.warn("Table: {}, time column {] has non-DAYS time unit: {}", timeColumnName, timeUnit);
        }
        // Only add inverted index to table larger than a threshold
        JSONObject queryResponse = sendQuery("SELECT COUNT(*) FROM " + tableName);
        long numTotalDocs = queryResponse.getLong("totalDocs");
        LOGGER.info("Table: {}, number of total documents: {}", tableName, numTotalDocs);
        if (numTotalDocs <= _tableSizeThreshold) {
            LOGGER.info("Table: {}, skip adding inverted index because the table is too small", tableName);
            continue;
        }
        // Get each dimension's cardinality on one timestamp's data
        queryResponse = sendQuery("SELECT Max(" + timeColumnName + ") FROM " + tableName);
        int maxTimeStamp = queryResponse.getJSONArray("aggregationResults").getJSONObject(0).getInt("value");
        LOGGER.info("Table: {}, max time column {}: {}", tableName, timeColumnName, maxTimeStamp);
        // Query DISTINCTCOUNT on all dimensions in one query might cause timeout, so query them separately
        List<ResultPair> resultPairs = new ArrayList<>();
        for (String dimensionName : dimensionNames) {
            String query = "SELECT DISTINCTCOUNT(" + dimensionName + ") FROM " + tableName + " WHERE " + timeColumnName + " = " + maxTimeStamp;
            queryResponse = sendQuery(query);
            JSONObject result = queryResponse.getJSONArray("aggregationResults").getJSONObject(0);
            resultPairs.add(new ResultPair(result.getString("function").substring("distinctCount_".length()), result.getLong("value")));
        }
        // Sort the dimensions based on their cardinalities
        Collections.sort(resultPairs);
        // Add the top dimensions into inverted index columns
        int numInvertedIndex = Math.min(_maxNumInvertedIndexAdded, resultPairs.size());
        for (int i = 0; i < numInvertedIndex; i++) {
            ResultPair resultPair = resultPairs.get(i);
            String columnName = resultPair._key;
            long cardinality = resultPair._value;
            if (cardinality > _cardinalityThreshold) {
                // Do not append inverted index if already exists
                if (!invertedIndexColumns.contains(columnName)) {
                    invertedIndexColumns.add(columnName);
                }
                LOGGER.info("Table: {}, add inverted index to column {} with cardinality: {}", tableName, columnName, cardinality);
            } else {
                LOGGER.info("Table: {}, skip adding inverted index to column {} with cardinality: {}", tableName, columnName, cardinality);
                break;
            }
        }
        // Update indexing config
        if (!invertedIndexColumns.isEmpty()) {
            indexingConfig.setAutoGeneratedInvertedIndex(true);
            if (updateIndexConfig(tableName, tableConfig)) {
                LOGGER.info("Table: {}, added inverted index to columns: {}", tableName, invertedIndexColumns);
            } else {
                LOGGER.error("Table: {}, failed to add inverted index to columns: {}", tableName, invertedIndexColumns);
            }
        } else {
            if (autoGeneratedInvertedIndex) {
                Preconditions.checkState(_mode == Mode.REFRESH);
                // Remove existing auto-generated inverted index because no column matches all the conditions
                indexingConfig.setAutoGeneratedInvertedIndex(false);
                if (updateIndexConfig(tableName, tableConfig)) {
                    LOGGER.info("Table: {}, removed auto-generated inverted index", tableName);
                } else {
                    LOGGER.error("Table: {}, failed to remove auto-generated inverted index", tableName);
                }
            }
        }
    }
}
Also used : IndexingConfig(com.linkedin.pinot.common.config.IndexingConfig) Schema(com.linkedin.pinot.common.data.Schema) TimeFieldSpec(com.linkedin.pinot.common.data.TimeFieldSpec) ArrayList(java.util.ArrayList) JSONObject(org.json.JSONObject) TimeUnit(java.util.concurrent.TimeUnit) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig)

Example 17 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class ValidationManager method runValidation.

/**
   * Runs a validation pass over the currently loaded tables.
   */
public void runValidation() {
    if (!_pinotHelixResourceManager.isLeader()) {
        LOGGER.info("Skipping validation, not leader!");
        return;
    }
    LOGGER.info("Starting validation");
    // Fetch the list of tables
    List<String> allTableNames = _pinotHelixResourceManager.getAllPinotTableNames();
    ZkHelixPropertyStore<ZNRecord> propertyStore = _pinotHelixResourceManager.getPropertyStore();
    for (String tableName : allTableNames) {
        List<SegmentMetadata> segmentMetadataList = new ArrayList<SegmentMetadata>();
        TableType tableType = TableNameBuilder.getTableTypeFromTableName(tableName);
        AbstractTableConfig tableConfig = null;
        _pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags(tableName);
        // For each table, fetch the metadata for all its segments
        if (tableType.equals(TableType.OFFLINE)) {
            validateOfflineSegmentPush(propertyStore, tableName, segmentMetadataList);
        } else if (tableType.equals(TableType.REALTIME)) {
            LOGGER.info("Starting to validate table {}", tableName);
            List<RealtimeSegmentZKMetadata> realtimeSegmentZKMetadatas = ZKMetadataProvider.getRealtimeSegmentZKMetadataListForTable(propertyStore, tableName);
            // false if this table has ONLY LLC segments (i.e. fully migrated)
            boolean countHLCSegments = true;
            KafkaStreamMetadata streamMetadata = null;
            try {
                tableConfig = _pinotHelixResourceManager.getRealtimeTableConfig(tableName);
                streamMetadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());
                if (streamMetadata.hasSimpleKafkaConsumerType() && !streamMetadata.hasHighLevelKafkaConsumerType()) {
                    countHLCSegments = false;
                }
                for (RealtimeSegmentZKMetadata realtimeSegmentZKMetadata : realtimeSegmentZKMetadatas) {
                    SegmentMetadata segmentMetadata = new SegmentMetadataImpl(realtimeSegmentZKMetadata);
                    segmentMetadataList.add(segmentMetadata);
                }
                // Update the gauge to contain the total document count in the segments
                _validationMetrics.updateTotalDocumentsGauge(tableName, computeRealtimeTotalDocumentInSegments(segmentMetadataList, countHLCSegments));
                if (streamMetadata.hasSimpleKafkaConsumerType()) {
                    validateLLCSegments(tableName, tableConfig);
                }
            } catch (Exception e) {
                if (tableConfig == null) {
                    LOGGER.warn("Cannot get realtime tableconfig for {}", tableName);
                } else if (streamMetadata == null) {
                    LOGGER.warn("Cannot get streamconfig for {}", tableName);
                } else {
                    LOGGER.error("Exception while validating table {}", tableName, e);
                }
            }
        } else {
            LOGGER.warn("Ignoring table type {} for table {}", tableType, tableName);
        }
    }
    LOGGER.info("Validation completed");
}
Also used : KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) TableType(com.linkedin.pinot.common.utils.CommonConstants.Helix.TableType) ArrayList(java.util.ArrayList) SegmentMetadata(com.linkedin.pinot.common.segment.SegmentMetadata) RealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.RealtimeSegmentZKMetadata) ArrayList(java.util.ArrayList) List(java.util.List) SegmentMetadataImpl(com.linkedin.pinot.core.segment.index.SegmentMetadataImpl) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) ZNRecord(org.apache.helix.ZNRecord)

Example 18 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class PinotRealtimeSegmentManager method assignRealtimeSegmentsToServerInstancesIfNecessary.

private synchronized void assignRealtimeSegmentsToServerInstancesIfNecessary() throws JSONException, IOException {
    // Fetch current ideal state snapshot
    Map<String, IdealState> idealStateMap = new HashMap<String, IdealState>();
    for (String resource : _pinotHelixResourceManager.getAllRealtimeTables()) {
        final String tableName = TableNameBuilder.extractRawTableName(resource);
        AbstractTableConfig tableConfig = _pinotHelixResourceManager.getTableConfig(tableName, TableType.REALTIME);
        KafkaStreamMetadata metadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());
        if (metadata.hasHighLevelKafkaConsumerType()) {
            idealStateMap.put(resource, _pinotHelixResourceManager.getHelixAdmin().getResourceIdealState(_pinotHelixResourceManager.getHelixClusterName(), resource));
        } else {
            LOGGER.debug("Not considering table {} for realtime segment assignment");
        }
    }
    List<Pair<String, String>> listOfSegmentsToAddToInstances = new ArrayList<Pair<String, String>>();
    for (String resource : idealStateMap.keySet()) {
        try {
            IdealState state = idealStateMap.get(resource);
            // Are there any partitions?
            if (state.getPartitionSet().size() == 0) {
                // No, this is a brand new ideal state, so we will add one new segment to every partition and replica
                List<String> instancesInResource = new ArrayList<String>();
                try {
                    instancesInResource.addAll(_pinotHelixResourceManager.getServerInstancesForTable(resource, TableType.REALTIME));
                } catch (Exception e) {
                    LOGGER.error("Caught exception while fetching instances for resource {}", resource, e);
                    _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                }
                // Assign a new segment to all server instances
                for (String instanceId : instancesInResource) {
                    InstanceZKMetadata instanceZKMetadata = _pinotHelixResourceManager.getInstanceZKMetadata(instanceId);
                    if (instanceZKMetadata == null) {
                        LOGGER.warn("Instance {} has no associated instance metadata in ZK, ignoring for segment assignment.", instanceId);
                        _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                        continue;
                    }
                    String groupId = instanceZKMetadata.getGroupId(resource);
                    String partitionId = instanceZKMetadata.getPartition(resource);
                    if (groupId != null && !groupId.isEmpty() && partitionId != null && !partitionId.isEmpty()) {
                        listOfSegmentsToAddToInstances.add(new Pair<String, String>(new HLCSegmentName(groupId, partitionId, String.valueOf(System.currentTimeMillis())).getSegmentName(), instanceId));
                    } else {
                        LOGGER.warn("Instance {} has invalid groupId ({}) and/or partitionId ({}) for resource {}, ignoring for segment assignment.", instanceId, groupId, partitionId, resource);
                        _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                    }
                }
            } else {
                // Add all server instances to the list of instances for which to assign a realtime segment
                Set<String> instancesToAssignRealtimeSegment = new HashSet<String>();
                try {
                    instancesToAssignRealtimeSegment.addAll(_pinotHelixResourceManager.getServerInstancesForTable(resource, TableType.REALTIME));
                } catch (Exception e) {
                    LOGGER.error("Caught exception while fetching instances for resource {}", resource, e);
                    _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
                }
                // Remove server instances that are currently processing a segment
                for (String partition : state.getPartitionSet()) {
                    // Helix partition is the segment name
                    if (SegmentName.isHighLevelConsumerSegmentName(partition)) {
                        HLCSegmentName segName = new HLCSegmentName(partition);
                        RealtimeSegmentZKMetadata realtimeSegmentZKMetadata = ZKMetadataProvider.getRealtimeSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(), segName.getTableName(), partition);
                        if (realtimeSegmentZKMetadata == null) {
                            // Segment was deleted by retention manager.
                            continue;
                        }
                        if (realtimeSegmentZKMetadata.getStatus() == Status.IN_PROGRESS) {
                            instancesToAssignRealtimeSegment.removeAll(state.getInstanceSet(partition));
                        }
                    }
                }
                // Assign a new segment to the server instances not currently processing this segment
                for (String instanceId : instancesToAssignRealtimeSegment) {
                    InstanceZKMetadata instanceZKMetadata = _pinotHelixResourceManager.getInstanceZKMetadata(instanceId);
                    String groupId = instanceZKMetadata.getGroupId(resource);
                    String partitionId = instanceZKMetadata.getPartition(resource);
                    listOfSegmentsToAddToInstances.add(new Pair<String, String>(new HLCSegmentName(groupId, partitionId, String.valueOf(System.currentTimeMillis())).getSegmentName(), instanceId));
                }
            }
        } catch (Exception e) {
            LOGGER.warn("Caught exception while processing resource {}, skipping.", resource, e);
            _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
        }
    }
    LOGGER.info("Computed list of new segments to add : " + Arrays.toString(listOfSegmentsToAddToInstances.toArray()));
    // Add the new segments to the server instances
    for (final Pair<String, String> segmentIdAndInstanceId : listOfSegmentsToAddToInstances) {
        final String segmentId = segmentIdAndInstanceId.getFirst();
        final String instanceName = segmentIdAndInstanceId.getSecond();
        try {
            final HLCSegmentName segName = new HLCSegmentName(segmentId);
            String resourceName = segName.getTableName();
            // Does the ideal state already contain this segment?
            if (!idealStateMap.get(resourceName).getPartitionSet().contains(segmentId)) {
                // No, add it
                // Create the realtime segment metadata
                RealtimeSegmentZKMetadata realtimeSegmentMetadataToAdd = new RealtimeSegmentZKMetadata();
                realtimeSegmentMetadataToAdd.setTableName(TableNameBuilder.extractRawTableName(resourceName));
                realtimeSegmentMetadataToAdd.setSegmentType(SegmentType.REALTIME);
                realtimeSegmentMetadataToAdd.setStatus(Status.IN_PROGRESS);
                realtimeSegmentMetadataToAdd.setSegmentName(segmentId);
                // Add the new metadata to the property store
                ZKMetadataProvider.setRealtimeSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(), realtimeSegmentMetadataToAdd);
                // Update the ideal state to add the new realtime segment
                HelixHelper.updateIdealState(_pinotHelixResourceManager.getHelixZkManager(), resourceName, new Function<IdealState, IdealState>() {

                    @Override
                    public IdealState apply(IdealState idealState) {
                        return PinotTableIdealStateBuilder.addNewRealtimeSegmentToIdealState(segmentId, idealState, instanceName);
                    }
                }, RetryPolicies.exponentialBackoffRetryPolicy(5, 500L, 2.0f));
            }
        } catch (Exception e) {
            LOGGER.warn("Caught exception while processing segment {} for instance {}, skipping.", segmentId, instanceName, e);
            _controllerMetrics.addMeteredGlobalValue(ControllerMeter.CONTROLLER_REALTIME_TABLE_SEGMENT_ASSIGNMENT_ERROR, 1L);
        }
    }
}
Also used : HLCSegmentName(com.linkedin.pinot.common.utils.HLCSegmentName) KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) HashMap(java.util.HashMap) InstanceZKMetadata(com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata) ArrayList(java.util.ArrayList) IdealState(org.apache.helix.model.IdealState) JSONException(org.json.JSONException) IOException(java.io.IOException) RealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.RealtimeSegmentZKMetadata) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) Pair(com.linkedin.pinot.core.query.utils.Pair) HashSet(java.util.HashSet)

Example 19 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class PinotRealtimeSegmentManager method refreshWatchers.

/**
   * Helper method to perform idempotent operation to refresh all watches (related to real-time segments):
   * - Data change listener for all existing real-time tables.
   * - Child creation listener for all existing real-time tables.
   * - Data change listener for all existing real-time segments
   *
   * @param path
   */
private void refreshWatchers(String path) {
    LOGGER.info("Received change notification for path: {}", path);
    List<Stat> stats = new ArrayList<>();
    List<ZNRecord> tableConfigs = _pinotHelixResourceManager.getPropertyStore().getChildren(TABLE_CONFIG, stats, 0);
    if (tableConfigs == null) {
        return;
    }
    for (ZNRecord tableConfigZnRecord : tableConfigs) {
        try {
            String znRecordId = tableConfigZnRecord.getId();
            if (TableNameBuilder.getTableTypeFromTableName(znRecordId) == TableType.REALTIME) {
                AbstractTableConfig abstractTableConfig = AbstractTableConfig.fromZnRecord(tableConfigZnRecord);
                KafkaStreamMetadata metadata = new KafkaStreamMetadata(abstractTableConfig.getIndexingConfig().getStreamConfigs());
                if (metadata.hasHighLevelKafkaConsumerType()) {
                    String realtimeTable = abstractTableConfig.getTableName();
                    String realtimeSegmentsPathForTable = _propertyStorePath + SEGMENTS_PATH + "/" + realtimeTable;
                    LOGGER.info("Setting data/child changes watch for real-time table '{}'", realtimeTable);
                    _zkClient.subscribeDataChanges(realtimeSegmentsPathForTable, this);
                    _zkClient.subscribeChildChanges(realtimeSegmentsPathForTable, this);
                    List<String> childNames = _pinotHelixResourceManager.getPropertyStore().getChildNames(SEGMENTS_PATH + "/" + realtimeTable, 0);
                    if (childNames != null && !childNames.isEmpty()) {
                        for (String segmentName : childNames) {
                            if (!SegmentName.isHighLevelConsumerSegmentName(segmentName)) {
                                continue;
                            }
                            String segmentPath = realtimeSegmentsPathForTable + "/" + segmentName;
                            RealtimeSegmentZKMetadata realtimeSegmentZKMetadata = ZKMetadataProvider.getRealtimeSegmentZKMetadata(_pinotHelixResourceManager.getPropertyStore(), abstractTableConfig.getTableName(), segmentName);
                            if (realtimeSegmentZKMetadata == null) {
                                // The segment got deleted by retention manager
                                continue;
                            }
                            if (realtimeSegmentZKMetadata.getStatus() == Status.IN_PROGRESS) {
                                LOGGER.info("Setting data change watch for real-time segment currently being consumed: {}", segmentPath);
                                _zkClient.subscribeDataChanges(segmentPath, this);
                            } else {
                                _zkClient.unsubscribeDataChanges(segmentPath, this);
                            }
                        }
                    }
                }
            }
        } catch (Exception e) {
            // we want to continue setting watches for other tables for any kind of exception here so that
            // errors with one table don't impact others
            LOGGER.error("Caught exception while processing ZNRecord id: {}. Skipping node to continue setting watches", tableConfigZnRecord.getId(), e);
        }
    }
}
Also used : RealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.RealtimeSegmentZKMetadata) KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) Stat(org.apache.zookeeper.data.Stat) ArrayList(java.util.ArrayList) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) ZNRecord(org.apache.helix.ZNRecord) JSONException(org.json.JSONException) IOException(java.io.IOException)

Example 20 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class RetentionManager method updateDeletionStrategyForRealtimeTable.

/**
   * Update deletion strategy for realtime table.
   * <ul>
   *   <li>Keep the current deletion strategy when failed to get a valid retention time</li>
   *   <li>Update the deletion strategy when valid retention time is set.</li>
   * </ul>
   * The reason for this is that we don't allow realtime table without deletion strategy.
   */
private void updateDeletionStrategyForRealtimeTable(String realtimeTableName) {
    try {
        AbstractTableConfig realtimeTableConfig = ZKMetadataProvider.getRealtimeTableConfig(_pinotHelixResourceManager.getPropertyStore(), realtimeTableName);
        assert realtimeTableConfig != null;
        SegmentsValidationAndRetentionConfig validationConfig = realtimeTableConfig.getValidationConfig();
        TimeRetentionStrategy timeRetentionStrategy = new TimeRetentionStrategy(validationConfig.getRetentionTimeUnit(), validationConfig.getRetentionTimeValue());
        _tableDeletionStrategy.put(realtimeTableName, timeRetentionStrategy);
    } catch (Exception e) {
        LOGGER.error("Caught exception while updating deletion strategy, skip updating deletion strategy for table: {}.", realtimeTableName, e);
    }
}
Also used : TimeRetentionStrategy(com.linkedin.pinot.controller.helix.core.retention.strategy.TimeRetentionStrategy) SegmentsValidationAndRetentionConfig(com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig)

Aggregations

AbstractTableConfig (com.linkedin.pinot.common.config.AbstractTableConfig)53 ZNRecord (org.apache.helix.ZNRecord)10 Test (org.testng.annotations.Test)10 IdealState (org.apache.helix.model.IdealState)9 ArrayList (java.util.ArrayList)8 JSONObject (org.json.JSONObject)8 HttpVerb (com.linkedin.pinot.common.restlet.swagger.HttpVerb)7 Paths (com.linkedin.pinot.common.restlet.swagger.Paths)7 Summary (com.linkedin.pinot.common.restlet.swagger.Summary)7 Tags (com.linkedin.pinot.common.restlet.swagger.Tags)7 IOException (java.io.IOException)7 HashMap (java.util.HashMap)7 StringRepresentation (org.restlet.representation.StringRepresentation)7 BeforeTest (org.testng.annotations.BeforeTest)7 TableNameBuilder (com.linkedin.pinot.common.config.TableNameBuilder)6 Schema (com.linkedin.pinot.common.data.Schema)6 IndexingConfig (com.linkedin.pinot.common.config.IndexingConfig)5 KafkaStreamMetadata (com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata)4 JSONException (org.json.JSONException)4 SegmentsValidationAndRetentionConfig (com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig)3