Search in sources :

Example 1 with LLCSegmentName

use of com.linkedin.pinot.common.utils.LLCSegmentName in project pinot by linkedin.

the class SegmentCompletionManager method segmentCommitEnd.

/**
   * This method is to be called when the segment sent in by the server has been saved locally in the correct path that
   * is downloadable by the servers.
   *
   * It returns a response code to be sent back to the client.
   *
   * If the repsonse code is not COMMIT_SUCCESS, then the caller may remove the segment that has been saved.
   *
   * @return
   */
public SegmentCompletionProtocol.Response segmentCommitEnd(SegmentCompletionProtocol.Request.Params reqParams, boolean success) {
    if (!_helixManager.isLeader()) {
        return SegmentCompletionProtocol.RESP_NOT_LEADER;
    }
    final String segmentNameStr = reqParams.getSegmentName();
    final String instanceId = reqParams.getInstanceId();
    final long offset = reqParams.getOffset();
    LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
    SegmentCompletionFSM fsm = lookupOrCreateFsm(segmentName, SegmentCompletionProtocol.MSG_TYPE_COMMMIT);
    SegmentCompletionProtocol.Response response = fsm.segmentCommitEnd(instanceId, offset, success);
    if (fsm.isDone()) {
        LOGGER.info("Removing FSM (if present):{}", fsm.toString());
        _fsmMap.remove(segmentNameStr);
    }
    return response;
}
Also used : SegmentCompletionProtocol(com.linkedin.pinot.common.protocols.SegmentCompletionProtocol) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName)

Example 2 with LLCSegmentName

use of com.linkedin.pinot.common.utils.LLCSegmentName in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManager method completeCommittingSegments.

public void completeCommittingSegments(String realtimeTableName, List<String> segmentIds) {
    Comparator<LLCSegmentName> comparator = new Comparator<LLCSegmentName>() {

        @Override
        public int compare(LLCSegmentName o1, LLCSegmentName o2) {
            return o2.compareTo(o1);
        }
    };
    Map<Integer, MinMaxPriorityQueue<LLCSegmentName>> partitionToLatestSegments = new HashMap<>();
    for (String segmentId : segmentIds) {
        LLCSegmentName segmentName = new LLCSegmentName(segmentId);
        final int partitionId = segmentName.getPartitionId();
        MinMaxPriorityQueue latestSegments = partitionToLatestSegments.get(partitionId);
        if (latestSegments == null) {
            latestSegments = MinMaxPriorityQueue.orderedBy(comparator).maximumSize(2).create();
            partitionToLatestSegments.put(partitionId, latestSegments);
        }
        latestSegments.offer(segmentName);
    }
    completeCommittingSegmentsInternal(realtimeTableName, partitionToLatestSegments);
}
Also used : Object2IntLinkedOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntLinkedOpenHashMap) HashMap(java.util.HashMap) MinMaxPriorityQueue(com.google.common.collect.MinMaxPriorityQueue) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) Comparator(java.util.Comparator)

Example 3 with LLCSegmentName

use of com.linkedin.pinot.common.utils.LLCSegmentName in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManager method createConsumingSegment.

/**
   * Create a consuming segment for the kafka partitions that are missing one.
   *
   * @param realtimeTableName is the name of the realtime table (e.g. "table_REALTIME")
   * @param nonConsumingPartitions is a set of integers (kafka partitions that do not have a consuming segment)
   * @param llcSegments is a list of segment names in the ideal state as was observed last.
   */
public void createConsumingSegment(final String realtimeTableName, final Set<Integer> nonConsumingPartitions, final List<String> llcSegments, final AbstractTableConfig tableConfig) {
    final KafkaStreamMetadata kafkaStreamMetadata = new KafkaStreamMetadata(tableConfig.getIndexingConfig().getStreamConfigs());
    final ZNRecord partitionAssignment = getKafkaPartitionAssignment(realtimeTableName);
    final HashMap<Integer, LLCSegmentName> ncPartitionToLatestSegment = new HashMap<>(nonConsumingPartitions.size());
    // Number of replicas (should be same for all partitions)
    final int nReplicas = partitionAssignment.getListField("0").size();
    // (null if there is none).
    for (String segmentId : llcSegments) {
        LLCSegmentName segmentName = new LLCSegmentName(segmentId);
        int partitionId = segmentName.getPartitionId();
        if (nonConsumingPartitions.contains(partitionId)) {
            LLCSegmentName hashedSegName = ncPartitionToLatestSegment.get(partitionId);
            if (hashedSegName == null || hashedSegName.getSequenceNumber() < segmentName.getSequenceNumber()) {
                ncPartitionToLatestSegment.put(partitionId, segmentName);
            }
        }
    }
    // and completed), or the table configuration (smallest/largest).
    for (int partition : nonConsumingPartitions) {
        try {
            LLCSegmentName latestSegment = ncPartitionToLatestSegment.get(partition);
            long startOffset;
            int nextSeqNum;
            List<String> instances = partitionAssignment.getListField(Integer.toString(partition));
            if (latestSegment == null) {
                // No segment yet in partition, Create a new one with a starting offset as per table config specification.
                nextSeqNum = STARTING_SEQUENCE_NUMBER;
                LOGGER.info("Creating CONSUMING segment for {} partition {} with seq {}", realtimeTableName, partition, nextSeqNum);
                String consumerStartOffsetSpec = kafkaStreamMetadata.getKafkaConsumerProperties().get(CommonConstants.Helix.DataSource.Realtime.Kafka.AUTO_OFFSET_RESET);
                startOffset = getKafkaPartitionOffset(kafkaStreamMetadata, consumerStartOffsetSpec, partition);
                LOGGER.info("Found kafka offset {} for table {} for partition {}", startOffset, realtimeTableName, partition);
            } else {
                nextSeqNum = latestSegment.getSequenceNumber() + 1;
                LOGGER.info("Creating CONSUMING segment for {} partition {} with seq {}", realtimeTableName, partition, nextSeqNum);
                // To begin with, set startOffset to the oldest available offset in kafka. Fix it to be the one we want,
                // depending on what the prev segment had.
                startOffset = getKafkaPartitionOffset(kafkaStreamMetadata, "smallest", partition);
                LOGGER.info("Found kafka offset {} for table {} for partition {}", startOffset, realtimeTableName, partition);
                startOffset = getBetterStartOffsetIfNeeded(realtimeTableName, partition, latestSegment, startOffset, nextSeqNum);
            }
            createSegment(realtimeTableName, nReplicas, partition, nextSeqNum, instances, startOffset, partitionAssignment);
        } catch (Exception e) {
            LOGGER.error("Exception creating CONSUMING segment for {} partition {}", realtimeTableName, partition, e);
        }
    }
}
Also used : KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) Object2IntLinkedOpenHashMap(it.unimi.dsi.fastutil.objects.Object2IntLinkedOpenHashMap) HashMap(java.util.HashMap) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) ZNRecord(org.apache.helix.ZNRecord) TimeoutException(java.util.concurrent.TimeoutException)

Example 4 with LLCSegmentName

use of com.linkedin.pinot.common.utils.LLCSegmentName in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManagerTest method testAutoReplaceConsumingSegment.

public void testAutoReplaceConsumingSegment(final String tableConfigStartOffset) throws Exception {
    FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(true, null);
    final int nPartitions = 8;
    final int nInstances = 3;
    final int nReplicas = 2;
    final String topic = "someTopic";
    final String rtTableName = "table_REALTIME";
    List<String> instances = getInstanceList(nInstances);
    final String startOffset = KAFKA_OFFSET;
    IdealState idealState = PinotTableIdealStateBuilder.buildEmptyKafkaConsumerRealtimeIdealStateFor(rtTableName, nReplicas);
    segmentManager.setupHelixEntries(topic, rtTableName, nPartitions, instances, nReplicas, startOffset, DUMMY_HOST, idealState, false, 10000);
    // Add another segment for each partition
    long now = System.currentTimeMillis();
    List<String> existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
    final int partitionToBeFixed = 3;
    final int partitionWithHigherOffset = 4;
    final int emptyPartition = 5;
    final long smallestPartitionOffset = 0x259080984568L;
    final long largestPartitionOffset = smallestPartitionOffset + 100000;
    final long higherOffset = smallestPartitionOffset + 100;
    for (String segmentNameStr : existingSegments) {
        LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
        switch(segmentName.getPartitionId()) {
            case partitionToBeFixed:
                // Do nothing, we will test adding a new segment for this partition when there is only one segment in there.
                break;
            case emptyPartition:
                // Remove existing segment, so we can test adding a new segment for this partition when none exists
                segmentManager._idealStateEntries.remove(segmentNameStr);
                break;
            case partitionWithHigherOffset:
                // Set segment metadata for this segment such that its offset is higher than startOffset we get from kafka.
                // In that case, we should choose the new segment offset as this one rather than the one kafka hands us.
                LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata();
                metadata.setSegmentName(segmentName.getSegmentName());
                metadata.setEndOffset(higherOffset);
                metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
                segmentManager._metadataMap.put(segmentName.getSegmentName(), metadata);
                break;
            default:
                // Add a second segment for this partition. It will not be repaired.
                LLCSegmentName newSegmentName = new LLCSegmentName(segmentName.getTableName(), segmentName.getPartitionId(), segmentName.getSequenceNumber() + 1, now);
                List<String> hosts = segmentManager._idealStateEntries.get(segmentNameStr);
                segmentManager._idealStateEntries.put(newSegmentName.getSegmentName(), hosts);
                break;
        }
    }
    Map<String, String> streamPropMap = new HashMap<>(1);
    streamPropMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "simple");
    streamPropMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.KAFKA_CONSUMER_PROPS_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.AUTO_OFFSET_RESET), tableConfigStartOffset);
    KafkaStreamMetadata kafkaStreamMetadata = new KafkaStreamMetadata(streamPropMap);
    AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
    IndexingConfig indexingConfig = mock(IndexingConfig.class);
    when(indexingConfig.getStreamConfigs()).thenReturn(streamPropMap);
    when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
    Set<Integer> nonConsumingPartitions = new HashSet<>(1);
    nonConsumingPartitions.add(partitionToBeFixed);
    nonConsumingPartitions.add(partitionWithHigherOffset);
    nonConsumingPartitions.add(emptyPartition);
    segmentManager._kafkaSmallestOffsetToReturn = smallestPartitionOffset;
    segmentManager._kafkaLargestOffsetToReturn = largestPartitionOffset;
    existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
    segmentManager._paths.clear();
    segmentManager._records.clear();
    segmentManager.createConsumingSegment(rtTableName, nonConsumingPartitions, existingSegments, tableConfig);
    Assert.assertEquals(segmentManager._paths.size(), 3);
    Assert.assertEquals(segmentManager._records.size(), 3);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 3);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 3);
    int found = 0;
    int index = 0;
    while (index < segmentManager._paths.size()) {
        String znodePath = segmentManager._paths.get(index);
        int slash = znodePath.lastIndexOf('/');
        String segmentNameStr = znodePath.substring(slash + 1);
        LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
        ZNRecord znRecord;
        LLCRealtimeSegmentZKMetadata metadata;
        switch(segmentName.getPartitionId()) {
            case partitionToBeFixed:
                // We had left this partition with one segment. So, a second one should be created with a sequence number one
                // higher than starting. Its start offset should be what kafka returns.
                found++;
                Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
                znRecord = segmentManager._records.get(index);
                metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
                Assert.assertEquals(metadata.getNumReplicas(), 2);
                Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
                break;
            case emptyPartition:
                // We had removed any segments in this partition. A new one should be created with the offset as returned
                // by kafka and with the starting sequence number.
                found++;
                Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER);
                znRecord = segmentManager._records.get(index);
                metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
                Assert.assertEquals(metadata.getNumReplicas(), 2);
                if (tableConfigStartOffset.equals("smallest")) {
                    Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
                } else {
                    Assert.assertEquals(metadata.getStartOffset(), largestPartitionOffset);
                }
                break;
            case partitionWithHigherOffset:
                // We had left this partition with one segment. In addition, we had the end-offset of the first segment set to
                // a value higher than that returned by kafka. So, a second one should be created with a sequence number one
                // equal to the end offset of the first one.
                found++;
                Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
                znRecord = segmentManager._records.get(index);
                metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
                Assert.assertEquals(metadata.getNumReplicas(), 2);
                Assert.assertEquals(metadata.getStartOffset(), higherOffset);
                break;
        }
        index++;
    }
    // We should see all three cases here.
    Assert.assertEquals(3, found);
    // Now, if we make 'partitionToBeFixed' a non-consuming partition, a second one should get added with the same start offset as
    // as the first one, since the kafka offset to return has not changed.
    Set<Integer> ncPartitions = new HashSet<>(1);
    ncPartitions.add(partitionToBeFixed);
    segmentManager.createConsumingSegment(rtTableName, ncPartitions, segmentManager.getExistingSegments(rtTableName), tableConfig);
    Assert.assertEquals(segmentManager._paths.size(), 4);
    Assert.assertEquals(segmentManager._records.size(), 4);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 4);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 4);
    // The latest zn record should be that of the new one we added.
    ZNRecord znRecord = segmentManager._records.get(3);
    LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
    Assert.assertEquals(metadata.getNumReplicas(), 2);
    Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
    LLCSegmentName llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
    Assert.assertEquals(llcSegmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 2);
    Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);
    // Now pretend the prev segment ended successfully, and set the end offset
    metadata.setEndOffset(metadata.getStartOffset() + 10);
    metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
    segmentManager._records.remove(3);
    segmentManager._records.add(metadata.toZNRecord());
    segmentManager._metadataMap.put(metadata.getSegmentName(), metadata);
    segmentManager._kafkaLargestOffsetToReturn *= 2;
    segmentManager._kafkaSmallestOffsetToReturn *= 2;
    ncPartitions.clear();
    ncPartitions.add(partitionToBeFixed);
    segmentManager.createConsumingSegment(rtTableName, ncPartitions, segmentManager.getExistingSegments(rtTableName), tableConfig);
    Assert.assertEquals(segmentManager._paths.size(), 5);
    Assert.assertEquals(segmentManager._records.size(), 5);
    Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 5);
    Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 5);
    znRecord = segmentManager._records.get(4);
    metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
    Assert.assertEquals(metadata.getNumReplicas(), 2);
    // In this case, since we have data loss, we will always put the smallest kafka partition available.
    Assert.assertEquals(metadata.getStartOffset(), segmentManager.getKafkaPartitionOffset(null, "smallest", partitionToBeFixed));
    llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
    Assert.assertEquals(llcSegmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 3);
    Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);
}
Also used : KafkaStreamMetadata(com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata) IndexingConfig(com.linkedin.pinot.common.config.IndexingConfig) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) IdealState(org.apache.helix.model.IdealState) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) LLCRealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.LLCRealtimeSegmentZKMetadata) ZNRecord(org.apache.helix.ZNRecord) HashSet(java.util.HashSet)

Example 5 with LLCSegmentName

use of com.linkedin.pinot.common.utils.LLCSegmentName in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManagerTest method testInitialSegmentAssignments.

private void testInitialSegmentAssignments(final int nPartitions, final int nInstances, final int nReplicas, boolean existingIS) {
    FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(true, null);
    final String topic = "someTopic";
    final String rtTableName = "table_REALTIME";
    List<String> instances = getInstanceList(nInstances);
    final String startOffset = KAFKA_OFFSET;
    IdealState idealState = PinotTableIdealStateBuilder.buildEmptyKafkaConsumerRealtimeIdealStateFor(rtTableName, nReplicas);
    segmentManager.setupHelixEntries(topic, rtTableName, nPartitions, instances, nReplicas, startOffset, DUMMY_HOST, idealState, !existingIS, 1000000);
    final String actualRtTableName = segmentManager._realtimeTableName;
    final Map<String, List<String>> idealStateEntries = segmentManager._idealStateEntries;
    final int idealStateNReplicas = segmentManager._nReplicas;
    final List<String> propStorePaths = segmentManager._paths;
    final List<ZNRecord> propStoreEntries = segmentManager._records;
    final boolean createNew = segmentManager._createNew;
    Assert.assertEquals(propStorePaths.size(), nPartitions);
    Assert.assertEquals(propStoreEntries.size(), nPartitions);
    Assert.assertEquals(idealStateEntries.size(), nPartitions);
    Assert.assertEquals(actualRtTableName, rtTableName);
    Assert.assertEquals(createNew, !existingIS);
    Assert.assertEquals(idealStateNReplicas, nReplicas);
    Map<Integer, ZNRecord> segmentPropStoreMap = new HashMap<>(propStorePaths.size());
    Map<Integer, String> segmentPathsMap = new HashMap<>(propStorePaths.size());
    for (String path : propStorePaths) {
        String segNameStr = path.split("/")[3];
        int partition = new LLCSegmentName(segNameStr).getPartitionId();
        segmentPathsMap.put(partition, path);
    }
    for (ZNRecord znRecord : propStoreEntries) {
        LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
        segmentPropStoreMap.put(new LLCSegmentName(metadata.getSegmentName()).getPartitionId(), znRecord);
    }
    Assert.assertEquals(segmentPathsMap.size(), nPartitions);
    Assert.assertEquals(segmentPropStoreMap.size(), nPartitions);
    for (int partition = 0; partition < nPartitions; partition++) {
        final LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(segmentPropStoreMap.get(partition));
        // Just for coverage
        metadata.toString();
        ZNRecord znRecord = metadata.toZNRecord();
        LLCRealtimeSegmentZKMetadata metadataCopy = new LLCRealtimeSegmentZKMetadata(znRecord);
        Assert.assertEquals(metadata, metadataCopy);
        final String path = segmentPathsMap.get(partition);
        final String segmentName = metadata.getSegmentName();
        Assert.assertEquals(metadata.getStartOffset(), -1L);
        Assert.assertEquals(path, "/SEGMENTS/" + rtTableName + "/" + segmentName);
        LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName);
        Assert.assertEquals(llcSegmentName.getPartitionId(), partition);
        Assert.assertEquals(llcSegmentName.getTableName(), TableNameBuilder.extractRawTableName(rtTableName));
        Assert.assertEquals(metadata.getNumReplicas(), nReplicas);
    }
}
Also used : HashMap(java.util.HashMap) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) IdealState(org.apache.helix.model.IdealState) ArrayList(java.util.ArrayList) List(java.util.List) LLCRealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.LLCRealtimeSegmentZKMetadata) ZNRecord(org.apache.helix.ZNRecord)

Aggregations

LLCSegmentName (com.linkedin.pinot.common.utils.LLCSegmentName)34 Test (org.testng.annotations.Test)16 ArrayList (java.util.ArrayList)13 SegmentCompletionProtocol (com.linkedin.pinot.common.protocols.SegmentCompletionProtocol)12 ZNRecord (org.apache.helix.ZNRecord)11 IdealState (org.apache.helix.model.IdealState)10 LLCRealtimeSegmentZKMetadata (com.linkedin.pinot.common.metadata.segment.LLCRealtimeSegmentZKMetadata)9 HashMap (java.util.HashMap)8 Request (com.linkedin.pinot.common.protocols.SegmentCompletionProtocol.Request)6 Object2IntLinkedOpenHashMap (it.unimi.dsi.fastutil.objects.Object2IntLinkedOpenHashMap)5 ExternalView (org.apache.helix.model.ExternalView)5 Field (java.lang.reflect.Field)4 HashSet (java.util.HashSet)4 List (java.util.List)4 BaseConfiguration (org.apache.commons.configuration.BaseConfiguration)4 HLCSegmentName (com.linkedin.pinot.common.utils.HLCSegmentName)3 InstanceConfig (org.apache.helix.model.InstanceConfig)3 BeforeTest (org.testng.annotations.BeforeTest)3 MinMaxPriorityQueue (com.google.common.collect.MinMaxPriorityQueue)2 AbstractTableConfig (com.linkedin.pinot.common.config.AbstractTableConfig)2