use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotTableRestletResourceTest method testUpdateTableConfig.
@Test
public void testUpdateTableConfig() throws IOException, JSONException {
String tableName = "updateTC";
JSONObject request = ControllerRequestBuilder.buildCreateOfflineTableJSON(tableName, "default", "default", "potato", "DAYS", "DAYS", "5", 2, "BalanceNumSegmentAssignmentStrategy", Collections.<String>emptyList(), "MMAP", "v3");
ControllerRequestURLBuilder controllerUrlBuilder = ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL);
sendPostRequest(controllerUrlBuilder.forTableCreate(), request.toString());
// table creation should succeed
AbstractTableConfig tableConfig = getTableConfig(tableName, "OFFLINE");
Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeValue(), "5");
Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeUnit(), "DAYS");
tableConfig.getValidationConfig().setRetentionTimeUnit("HOURS");
tableConfig.getValidationConfig().setRetentionTimeValue("10");
String output = sendPutRequest(controllerUrlBuilder.forUpdateTableConfig(tableName), tableConfig.toJSON().toString());
JSONObject jsonResponse = new JSONObject(output);
Assert.assertTrue(jsonResponse.has("status"));
Assert.assertEquals(jsonResponse.getString("status"), "Success");
AbstractTableConfig modifiedConfig = getTableConfig(tableName, "OFFLINE");
Assert.assertEquals(modifiedConfig.getValidationConfig().getRetentionTimeUnit(), "HOURS");
Assert.assertEquals(modifiedConfig.getValidationConfig().getRetentionTimeValue(), "10");
// Realtime
JSONObject metadata = new JSONObject();
metadata.put("streamType", "kafka");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.CONSUMER_TYPE, Kafka.ConsumerType.highLevel.toString());
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.TOPIC_NAME, "fakeTopic");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.DECODER_CLASS, "fakeClass");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.ZK_BROKER_URL, "fakeUrl");
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.HighLevelConsumer.ZK_CONNECTION_STRING, "potato");
metadata.put(DataSource.Realtime.REALTIME_SEGMENT_FLUSH_SIZE, Integer.toString(1234));
metadata.put(DataSource.STREAM_PREFIX + "." + Kafka.KAFKA_CONSUMER_PROPS_PREFIX + "." + Kafka.AUTO_OFFSET_RESET, "smallest");
request = ControllerRequestBuilder.buildCreateRealtimeTableJSON(tableName, "default", "default", "potato", "DAYS", "DAYS", "5", 2, "BalanceNumSegmentAssignmentStrategy", metadata, "fakeSchema", "fakeColumn", Collections.<String>emptyList(), "MMAP", false);
sendPostRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forTableCreate(), request.toString());
tableConfig = getTableConfig(tableName, "REALTIME");
Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeValue(), "5");
Assert.assertEquals(tableConfig.getValidationConfig().getRetentionTimeUnit(), "DAYS");
Assert.assertNull(tableConfig.getQuotaConfig());
QuotaConfig quota = new QuotaConfig();
quota.setStorage("10G");
tableConfig.setQuotaConfig(quota);
sendPutRequest(controllerUrlBuilder.forUpdateTableConfig(tableName), tableConfig.toJSON().toString());
modifiedConfig = getTableConfig(tableName, "REALTIME");
Assert.assertEquals(modifiedConfig.getQuotaConfig().getStorage(), "10G");
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManager method getCommitTimeoutMS.
public long getCommitTimeoutMS(String tableName) {
long commitTimeoutMS = SegmentCompletionProtocol.getMaxSegmentCommitTimeMs();
if (_propertyStore == null) {
return commitTimeoutMS;
}
AbstractTableConfig tableConfig = ZKMetadataProvider.getRealtimeTableConfig(_propertyStore, tableName);
final Map<String, String> streamConfigs = tableConfig.getIndexingConfig().getStreamConfigs();
if (streamConfigs != null && streamConfigs.containsKey(CommonConstants.Helix.DataSource.Realtime.SEGMENT_COMMIT_TIMEOUT_SECONDS)) {
final String commitTimeoutSecondsStr = streamConfigs.get(CommonConstants.Helix.DataSource.Realtime.SEGMENT_COMMIT_TIMEOUT_SECONDS);
try {
return TimeUnit.MILLISECONDS.convert(Integer.parseInt(commitTimeoutSecondsStr), TimeUnit.SECONDS);
} catch (Exception e) {
LOGGER.warn("Failed to parse flush size of {}", commitTimeoutSecondsStr, e);
return commitTimeoutMS;
}
}
return commitTimeoutMS;
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotHelixResourceManager method updateMetadataConfigFor.
public void updateMetadataConfigFor(String tableName, TableType type, TableCustomConfig newConfigs) throws Exception {
String actualTableName = new TableNameBuilder(type).forTable(tableName);
AbstractTableConfig config;
if (type == TableType.REALTIME) {
config = ZKMetadataProvider.getRealtimeTableConfig(getPropertyStore(), actualTableName);
} else {
config = ZKMetadataProvider.getOfflineTableConfig(getPropertyStore(), actualTableName);
}
if (config == null) {
throw new RuntimeException("tableName : " + tableName + " of type : " + type + " not found");
}
config.setCustomConfigs(newConfigs);
setTableConfig(config, actualTableName, type);
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotHelixResourceManagerTest method testRebuildBrokerResourceFromHelixTags.
@Test
public void testRebuildBrokerResourceFromHelixTags() throws Exception {
AbstractTableConfig tableConfig = AbstractTableConfig.init(ControllerRequestBuilderUtil.buildCreateOfflineTableJSON("faketable", "serverTenant", "brokerTenant", 3).toString());
Tenant tenant = new Tenant();
tenant.setTenantName("brokerTenant");
tenant.setTenantRole("BROKER");
tenant.setNumberOfInstances(3);
pinotHelixResourceManager.createBrokerTenant(tenant);
pinotHelixResourceManager.addTable(tableConfig);
// Check that the broker ideal state has 3 brokers assigned to it for faketable_OFFLINE
IdealState idealState = pinotHelixResourceManager.getHelixAdmin().getResourceIdealState(HELIX_CLUSTER_NAME, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
Assert.assertEquals(idealState.getInstanceStateMap("faketable_OFFLINE").size(), 3);
// Retag all instances current assigned to brokerTenant to be unassigned
Set<String> brokerInstances = pinotHelixResourceManager.getAllInstancesForBrokerTenant("brokerTenant");
for (String brokerInstance : brokerInstances) {
pinotHelixResourceManager.getHelixAdmin().removeInstanceTag(HELIX_CLUSTER_NAME, brokerInstance, "brokerTenant_BROKER");
pinotHelixResourceManager.getHelixAdmin().addInstanceTag(HELIX_CLUSTER_NAME, brokerInstance, CommonConstants.Helix.UNTAGGED_BROKER_INSTANCE);
}
// Rebuilding the broker tenant should update the ideal state size
pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags("faketable_OFFLINE");
idealState = pinotHelixResourceManager.getHelixAdmin().getResourceIdealState(HELIX_CLUSTER_NAME, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
Assert.assertEquals(idealState.getInstanceStateMap("faketable_OFFLINE").size(), 0);
// Tag five instances
int instancesRemainingToTag = 5;
List<String> instances = pinotHelixResourceManager.getAllInstanceNames();
for (String instance : instances) {
if (instance.startsWith(CommonConstants.Helix.PREFIX_OF_BROKER_INSTANCE)) {
pinotHelixResourceManager.getHelixAdmin().removeInstanceTag(HELIX_CLUSTER_NAME, instance, CommonConstants.Helix.UNTAGGED_BROKER_INSTANCE);
pinotHelixResourceManager.getHelixAdmin().addInstanceTag(HELIX_CLUSTER_NAME, instance, "brokerTenant_BROKER");
instancesRemainingToTag--;
if (instancesRemainingToTag == 0) {
break;
}
}
}
// Rebuilding the broker tenant should update the ideal state size
pinotHelixResourceManager.rebuildBrokerResourceFromHelixTags("faketable_OFFLINE");
idealState = pinotHelixResourceManager.getHelixAdmin().getResourceIdealState(HELIX_CLUSTER_NAME, CommonConstants.Helix.BROKER_RESOURCE_INSTANCE);
Assert.assertEquals(idealState.getInstanceStateMap("faketable_OFFLINE").size(), 5);
// Untag all instances for other tests
for (String instance : instances) {
if (instance.startsWith(CommonConstants.Helix.PREFIX_OF_BROKER_INSTANCE)) {
pinotHelixResourceManager.getHelixAdmin().removeInstanceTag(HELIX_CLUSTER_NAME, instance, "brokerTenant_BROKER");
pinotHelixResourceManager.getHelixAdmin().addInstanceTag(HELIX_CLUSTER_NAME, instance, CommonConstants.Helix.UNTAGGED_BROKER_INSTANCE);
}
}
// Delete table
pinotHelixResourceManager.deleteOfflineTable("faketable");
}
use of com.linkedin.pinot.common.config.AbstractTableConfig in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method testAutoReplaceConsumingSegment.
public void testAutoReplaceConsumingSegment(final String tableConfigStartOffset) throws Exception {
FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(true, null);
final int nPartitions = 8;
final int nInstances = 3;
final int nReplicas = 2;
final String topic = "someTopic";
final String rtTableName = "table_REALTIME";
List<String> instances = getInstanceList(nInstances);
final String startOffset = KAFKA_OFFSET;
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyKafkaConsumerRealtimeIdealStateFor(rtTableName, nReplicas);
segmentManager.setupHelixEntries(topic, rtTableName, nPartitions, instances, nReplicas, startOffset, DUMMY_HOST, idealState, false, 10000);
// Add another segment for each partition
long now = System.currentTimeMillis();
List<String> existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
final int partitionToBeFixed = 3;
final int partitionWithHigherOffset = 4;
final int emptyPartition = 5;
final long smallestPartitionOffset = 0x259080984568L;
final long largestPartitionOffset = smallestPartitionOffset + 100000;
final long higherOffset = smallestPartitionOffset + 100;
for (String segmentNameStr : existingSegments) {
LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
switch(segmentName.getPartitionId()) {
case partitionToBeFixed:
// Do nothing, we will test adding a new segment for this partition when there is only one segment in there.
break;
case emptyPartition:
// Remove existing segment, so we can test adding a new segment for this partition when none exists
segmentManager._idealStateEntries.remove(segmentNameStr);
break;
case partitionWithHigherOffset:
// Set segment metadata for this segment such that its offset is higher than startOffset we get from kafka.
// In that case, we should choose the new segment offset as this one rather than the one kafka hands us.
LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata();
metadata.setSegmentName(segmentName.getSegmentName());
metadata.setEndOffset(higherOffset);
metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
segmentManager._metadataMap.put(segmentName.getSegmentName(), metadata);
break;
default:
// Add a second segment for this partition. It will not be repaired.
LLCSegmentName newSegmentName = new LLCSegmentName(segmentName.getTableName(), segmentName.getPartitionId(), segmentName.getSequenceNumber() + 1, now);
List<String> hosts = segmentManager._idealStateEntries.get(segmentNameStr);
segmentManager._idealStateEntries.put(newSegmentName.getSegmentName(), hosts);
break;
}
}
Map<String, String> streamPropMap = new HashMap<>(1);
streamPropMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "simple");
streamPropMap.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.KAFKA_CONSUMER_PROPS_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.AUTO_OFFSET_RESET), tableConfigStartOffset);
KafkaStreamMetadata kafkaStreamMetadata = new KafkaStreamMetadata(streamPropMap);
AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
IndexingConfig indexingConfig = mock(IndexingConfig.class);
when(indexingConfig.getStreamConfigs()).thenReturn(streamPropMap);
when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
Set<Integer> nonConsumingPartitions = new HashSet<>(1);
nonConsumingPartitions.add(partitionToBeFixed);
nonConsumingPartitions.add(partitionWithHigherOffset);
nonConsumingPartitions.add(emptyPartition);
segmentManager._kafkaSmallestOffsetToReturn = smallestPartitionOffset;
segmentManager._kafkaLargestOffsetToReturn = largestPartitionOffset;
existingSegments = new ArrayList<>(segmentManager._idealStateEntries.keySet());
segmentManager._paths.clear();
segmentManager._records.clear();
segmentManager.createConsumingSegment(rtTableName, nonConsumingPartitions, existingSegments, tableConfig);
Assert.assertEquals(segmentManager._paths.size(), 3);
Assert.assertEquals(segmentManager._records.size(), 3);
Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 3);
Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 3);
int found = 0;
int index = 0;
while (index < segmentManager._paths.size()) {
String znodePath = segmentManager._paths.get(index);
int slash = znodePath.lastIndexOf('/');
String segmentNameStr = znodePath.substring(slash + 1);
LLCSegmentName segmentName = new LLCSegmentName(segmentNameStr);
ZNRecord znRecord;
LLCRealtimeSegmentZKMetadata metadata;
switch(segmentName.getPartitionId()) {
case partitionToBeFixed:
// We had left this partition with one segment. So, a second one should be created with a sequence number one
// higher than starting. Its start offset should be what kafka returns.
found++;
Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
znRecord = segmentManager._records.get(index);
metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata.getNumReplicas(), 2);
Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
break;
case emptyPartition:
// We had removed any segments in this partition. A new one should be created with the offset as returned
// by kafka and with the starting sequence number.
found++;
Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER);
znRecord = segmentManager._records.get(index);
metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata.getNumReplicas(), 2);
if (tableConfigStartOffset.equals("smallest")) {
Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
} else {
Assert.assertEquals(metadata.getStartOffset(), largestPartitionOffset);
}
break;
case partitionWithHigherOffset:
// We had left this partition with one segment. In addition, we had the end-offset of the first segment set to
// a value higher than that returned by kafka. So, a second one should be created with a sequence number one
// equal to the end offset of the first one.
found++;
Assert.assertEquals(segmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 1);
znRecord = segmentManager._records.get(index);
metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata.getNumReplicas(), 2);
Assert.assertEquals(metadata.getStartOffset(), higherOffset);
break;
}
index++;
}
// We should see all three cases here.
Assert.assertEquals(3, found);
// Now, if we make 'partitionToBeFixed' a non-consuming partition, a second one should get added with the same start offset as
// as the first one, since the kafka offset to return has not changed.
Set<Integer> ncPartitions = new HashSet<>(1);
ncPartitions.add(partitionToBeFixed);
segmentManager.createConsumingSegment(rtTableName, ncPartitions, segmentManager.getExistingSegments(rtTableName), tableConfig);
Assert.assertEquals(segmentManager._paths.size(), 4);
Assert.assertEquals(segmentManager._records.size(), 4);
Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 4);
Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 4);
// The latest zn record should be that of the new one we added.
ZNRecord znRecord = segmentManager._records.get(3);
LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata.getNumReplicas(), 2);
Assert.assertEquals(metadata.getStartOffset(), smallestPartitionOffset);
LLCSegmentName llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
Assert.assertEquals(llcSegmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 2);
Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);
// Now pretend the prev segment ended successfully, and set the end offset
metadata.setEndOffset(metadata.getStartOffset() + 10);
metadata.setStatus(CommonConstants.Segment.Realtime.Status.DONE);
segmentManager._records.remove(3);
segmentManager._records.add(metadata.toZNRecord());
segmentManager._metadataMap.put(metadata.getSegmentName(), metadata);
segmentManager._kafkaLargestOffsetToReturn *= 2;
segmentManager._kafkaSmallestOffsetToReturn *= 2;
ncPartitions.clear();
ncPartitions.add(partitionToBeFixed);
segmentManager.createConsumingSegment(rtTableName, ncPartitions, segmentManager.getExistingSegments(rtTableName), tableConfig);
Assert.assertEquals(segmentManager._paths.size(), 5);
Assert.assertEquals(segmentManager._records.size(), 5);
Assert.assertEquals(segmentManager._oldSegmentNameStr.size(), 5);
Assert.assertEquals(segmentManager._newSegmentNameStr.size(), 5);
znRecord = segmentManager._records.get(4);
metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata.getNumReplicas(), 2);
// In this case, since we have data loss, we will always put the smallest kafka partition available.
Assert.assertEquals(metadata.getStartOffset(), segmentManager.getKafkaPartitionOffset(null, "smallest", partitionToBeFixed));
llcSegmentName = new LLCSegmentName(metadata.getSegmentName());
Assert.assertEquals(llcSegmentName.getSequenceNumber(), PinotLLCRealtimeSegmentManager.STARTING_SEQUENCE_NUMBER + 3);
Assert.assertEquals(llcSegmentName.getPartitionId(), partitionToBeFixed);
}
Aggregations