Search in sources :

Example 26 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class PinotLLCRealtimeSegmentManagerTest method testUpdatingKafkaPartitions.

@Test
public void testUpdatingKafkaPartitions() throws Exception {
    FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(false, null);
    int nInstances = 3;
    int nKafkaPartitions = 8;
    int nReplicas = 3;
    final String topic = "someTopic";
    final String rtTableName = "table_REALTIME";
    List<String> instances = getInstanceList(nInstances);
    final String startOffset = KAFKA_OFFSET;
    // Populate 'partitionSet' with all kafka partitions,
    // As we find partitions in the assigment, we will remove the partition from this set.
    Set<Integer> partitionSet = new HashSet<>(nKafkaPartitions);
    for (int i = 0; i < nKafkaPartitions; i++) {
        partitionSet.add(i);
    }
    // Setup initial entries
    segmentManager.setupHelixEntries(topic, rtTableName, nKafkaPartitions, instances, nReplicas, startOffset, DUMMY_HOST, null, true, 1000);
    ZNRecord partitionAssignment = segmentManager._partitionAssignment;
    segmentManager._currentKafkaPartitionCount = nKafkaPartitions;
    segmentManager._currentInstanceList = instances;
    // Call to update the partition list should do nothing.
    AbstractTableConfig tableConfig = makeTableConfig(nReplicas, topic);
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    Assert.assertTrue(segmentManager._partitionAssignment == partitionAssignment);
    // Change the number of kafka partitions to 9, and we should generate a new partition assignment
    nKafkaPartitions = 9;
    segmentManager._currentKafkaPartitionCount = nKafkaPartitions;
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
    // Now reduce the number of instances and, we should not be updating anything.
    segmentManager._currentInstanceList = getInstanceList(nInstances - 1);
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    Assert.assertTrue(partitionAssignment == segmentManager._partitionAssignment);
    // Change the number of servers to 1 more, and we should update it again.
    nInstances++;
    segmentManager._currentInstanceList = getInstanceList(nInstances);
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
    partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
    // Change the replica count to one more, and we should update the assignment
    nReplicas++;
    tableConfig = makeTableConfig(nReplicas, topic);
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
    partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
    // Change the list of servers while keeping the number of servers the same.
    // We should see a change in the partition assignment.
    String server1 = segmentManager._currentInstanceList.get(0);
    segmentManager._currentInstanceList.set(0, server1 + "_new");
    Assert.assertEquals(nInstances, segmentManager._currentInstanceList.size());
    segmentManager.updateKafkaPartitionsIfNecessary(rtTableName, tableConfig);
    Assert.assertTrue(partitionAssignment != segmentManager._partitionAssignment);
    partitionAssignment = validatePartitionAssignment(segmentManager, nKafkaPartitions, nReplicas);
}
Also used : AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) ZNRecord(org.apache.helix.ZNRecord) HashSet(java.util.HashSet) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest)

Example 27 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class ValidationManagerTest method testLLCValidation.

@Test
public void testLLCValidation() throws Exception {
    final String topicName = "topic";
    final int kafkaPartitionCount = 2;
    final String realtimeTableName = "table_REALTIME";
    final String tableName = TableNameBuilder.extractRawTableName(realtimeTableName);
    // Server 1
    final String S1 = "S1";
    // Server 2
    final String S2 = "S2";
    // Server 3
    final String S3 = "S3";
    final List<String> hosts = Arrays.asList(new String[] { S1, S2, S3 });
    final HelixAdmin helixAdmin = _pinotHelixResourceManager.getHelixAdmin();
    ZNRecord znRecord = new ZNRecord(topicName);
    for (int i = 0; i < kafkaPartitionCount; i++) {
        znRecord.setListField(Integer.toString(i), hosts);
    }
    makeMockPinotLLCRealtimeSegmentManager(znRecord);
    long msSinceEpoch = 1540;
    LLCSegmentName p0s0 = new LLCSegmentName(tableName, 0, 0, msSinceEpoch);
    LLCSegmentName p0s1 = new LLCSegmentName(tableName, 0, 1, msSinceEpoch);
    LLCSegmentName p1s0 = new LLCSegmentName(tableName, 1, 0, msSinceEpoch);
    LLCSegmentName p1s1 = new LLCSegmentName(tableName, 1, 1, msSinceEpoch);
    IdealState idealstate = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(realtimeTableName, 3);
    idealstate.setPartitionState(p0s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    idealstate.setPartitionState(p0s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    idealstate.setPartitionState(p0s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    //    idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    //    idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    //    idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    idealstate.setPartitionState(p1s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    idealstate.setPartitionState(p1s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    idealstate.setPartitionState(p1s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
    idealstate.setPartitionState(p1s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    idealstate.setPartitionState(p1s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    idealstate.setPartitionState(p1s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
    FakeValidationMetrics validationMetrics = new FakeValidationMetrics();
    ValidationManager validationManager = new ValidationManager(validationMetrics, _pinotHelixResourceManager, new ControllerConf(), _segmentManager);
    Map<String, String> streamConfigs = new HashMap<String, String>(4);
    streamConfigs.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "highLevel,simple");
    Field autoCreateOnError = ValidationManager.class.getDeclaredField("_autoCreateOnError");
    autoCreateOnError.setAccessible(true);
    autoCreateOnError.setBoolean(validationManager, false);
    AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
    IndexingConfig indexingConfig = mock(IndexingConfig.class);
    when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
    when(indexingConfig.getStreamConfigs()).thenReturn(streamConfigs);
    validationManager.validateLLCSegments(realtimeTableName, tableConfig);
    Assert.assertEquals(validationMetrics.partitionCount, 1);
    // Set partition 0 to have one instance in CONSUMING state, and others in OFFLINE.
    // we should not flag any partitions to correct.
    helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
    idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
    idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
    idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
    helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
    validationManager.validateLLCSegments(realtimeTableName, tableConfig);
    Assert.assertEquals(validationMetrics.partitionCount, 0);
    helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
}
Also used : IndexingConfig(com.linkedin.pinot.common.config.IndexingConfig) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) HelixAdmin(org.apache.helix.HelixAdmin) LLCSegmentName(com.linkedin.pinot.common.utils.LLCSegmentName) IdealState(org.apache.helix.model.IdealState) Field(java.lang.reflect.Field) ControllerConf(com.linkedin.pinot.controller.ControllerConf) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) ZNRecord(org.apache.helix.ZNRecord) Test(org.testng.annotations.Test)

Example 28 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class LLRealtimeSegmentDataManagerTest method testTimeString.

@Test
public void testTimeString() throws Exception {
    JSONObject tableConfigJson = new JSONObject(_tableConfigJson);
    JSONObject tableIndexConfig = (JSONObject) tableConfigJson.get("tableIndexConfig");
    JSONObject streamConfigs = (JSONObject) tableIndexConfig.get("streamConfigs");
    {
        streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, "3h");
        AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
        InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
        Schema schema = Schema.fromString(makeSchema());
        KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
        config.init(tableConfig, instanceZKMetadata, schema);
        Assert.assertEquals(3 * 3600 * 1000L, config.getTimeThresholdToFlushSegment());
    }
    {
        streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, "3h30m");
        AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
        InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
        Schema schema = Schema.fromString(makeSchema());
        KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
        config.init(tableConfig, instanceZKMetadata, schema);
        Assert.assertEquals((3 * 3600 + 30 * 60) * 1000L, config.getTimeThresholdToFlushSegment());
    }
    {
        final long segTime = 898789748357L;
        streamConfigs.put(CommonConstants.Helix.DataSource.Realtime.REALTIME_SEGMENT_FLUSH_TIME, String.valueOf(segTime));
        AbstractTableConfig tableConfig = createTableConfig(tableConfigJson.toString());
        InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
        Schema schema = Schema.fromString(makeSchema());
        KafkaLowLevelStreamProviderConfig config = new KafkaLowLevelStreamProviderConfig();
        config.init(tableConfig, instanceZKMetadata, schema);
        Assert.assertEquals(segTime, config.getTimeThresholdToFlushSegment());
    }
}
Also used : JSONObject(org.json.JSONObject) InstanceZKMetadata(com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata) Schema(com.linkedin.pinot.common.data.Schema) KafkaLowLevelStreamProviderConfig(com.linkedin.pinot.core.realtime.impl.kafka.KafkaLowLevelStreamProviderConfig) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) Test(org.testng.annotations.Test)

Example 29 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class PinotSegmentRebalancer method rebalanceTable.

/**
   * Rebalances a table
   * @param tableName
   * @throws Exception
   */
public void rebalanceTable(String tableName) throws Exception {
    String tableConfigPath = "/CONFIGS/TABLE/" + tableName;
    Stat stat = new Stat();
    ZNRecord znRecord = propertyStore.get(tableConfigPath, stat, 0);
    AbstractTableConfig tableConfig = AbstractTableConfig.fromZnRecord(znRecord);
    String tenantName = tableConfig.getTenantConfig().getServer().replaceAll(TableType.OFFLINE.toString(), "").replace(TableType.OFFLINE.toString(), "");
    rebalanceTable(tableName, tenantName);
}
Also used : Stat(org.apache.zookeeper.data.Stat) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) ZNRecord(org.apache.helix.ZNRecord)

Example 30 with AbstractTableConfig

use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.

the class LLRealtimeSegmentDataManagerTest method createFakeSegmentManager.

private FakeLLRealtimeSegmentDataManager createFakeSegmentManager() throws Exception {
    LLCRealtimeSegmentZKMetadata segmentZKMetadata = createZkMetadata();
    AbstractTableConfig tableConfig = createTableConfig();
    InstanceZKMetadata instanceZKMetadata = new InstanceZKMetadata();
    RealtimeTableDataManager tableDataManager = createTableDataManager();
    String resourceDir = _segmentDir;
    Schema schema = Schema.fromString(makeSchema());
    ServerMetrics serverMetrics = new ServerMetrics(new MetricsRegistry());
    FakeLLRealtimeSegmentDataManager segmentDataManager = new FakeLLRealtimeSegmentDataManager(segmentZKMetadata, tableConfig, instanceZKMetadata, tableDataManager, resourceDir, schema, serverMetrics);
    return segmentDataManager;
}
Also used : MetricsRegistry(com.yammer.metrics.core.MetricsRegistry) InstanceZKMetadata(com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata) Schema(com.linkedin.pinot.common.data.Schema) ServerMetrics(com.linkedin.pinot.common.metrics.ServerMetrics) AbstractTableConfig(com.linkedin.pinot.common.config.AbstractTableConfig) LLCRealtimeSegmentZKMetadata(com.linkedin.pinot.common.metadata.segment.LLCRealtimeSegmentZKMetadata)

Aggregations

AbstractTableConfig (com.linkedin.pinot.common.config.AbstractTableConfig)53 ZNRecord (org.apache.helix.ZNRecord)10 Test (org.testng.annotations.Test)10 IdealState (org.apache.helix.model.IdealState)9 ArrayList (java.util.ArrayList)8 JSONObject (org.json.JSONObject)8 HttpVerb (com.linkedin.pinot.common.restlet.swagger.HttpVerb)7 Paths (com.linkedin.pinot.common.restlet.swagger.Paths)7 Summary (com.linkedin.pinot.common.restlet.swagger.Summary)7 Tags (com.linkedin.pinot.common.restlet.swagger.Tags)7 IOException (java.io.IOException)7 HashMap (java.util.HashMap)7 StringRepresentation (org.restlet.representation.StringRepresentation)7 BeforeTest (org.testng.annotations.BeforeTest)7 TableNameBuilder (com.linkedin.pinot.common.config.TableNameBuilder)6 Schema (com.linkedin.pinot.common.data.Schema)6 IndexingConfig (com.linkedin.pinot.common.config.IndexingConfig)5 KafkaStreamMetadata (com.linkedin.pinot.common.metadata.stream.KafkaStreamMetadata)4 JSONException (org.json.JSONException)4 SegmentsValidationAndRetentionConfig (com.linkedin.pinot.common.config.SegmentsValidationAndRetentionConfig)3