use of org.apache.helix.ZNRecord in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method validatePartitionAssignment.
private ZNRecord validatePartitionAssignment(FakePinotLLCRealtimeSegmentManager segmentManager, int nKafkaPartitions, int nReplicas) {
ZNRecord partitionAssignment;
Map<String, List<String>> partitionToServerListMap;
partitionAssignment = segmentManager._partitionAssignment;
partitionToServerListMap = partitionAssignment.getListFields();
Assert.assertEquals(partitionToServerListMap.size(), nKafkaPartitions);
for (List<String> serverList : partitionToServerListMap.values()) {
Assert.assertEquals(serverList.size(), nReplicas);
}
return partitionAssignment;
}
use of org.apache.helix.ZNRecord in project pinot by linkedin.
the class PinotLLCRealtimeSegmentManagerTest method testInitialSegmentAssignments.
private void testInitialSegmentAssignments(final int nPartitions, final int nInstances, final int nReplicas, boolean existingIS) {
FakePinotLLCRealtimeSegmentManager segmentManager = new FakePinotLLCRealtimeSegmentManager(true, null);
final String topic = "someTopic";
final String rtTableName = "table_REALTIME";
List<String> instances = getInstanceList(nInstances);
final String startOffset = KAFKA_OFFSET;
IdealState idealState = PinotTableIdealStateBuilder.buildEmptyKafkaConsumerRealtimeIdealStateFor(rtTableName, nReplicas);
segmentManager.setupHelixEntries(topic, rtTableName, nPartitions, instances, nReplicas, startOffset, DUMMY_HOST, idealState, !existingIS, 1000000);
final String actualRtTableName = segmentManager._realtimeTableName;
final Map<String, List<String>> idealStateEntries = segmentManager._idealStateEntries;
final int idealStateNReplicas = segmentManager._nReplicas;
final List<String> propStorePaths = segmentManager._paths;
final List<ZNRecord> propStoreEntries = segmentManager._records;
final boolean createNew = segmentManager._createNew;
Assert.assertEquals(propStorePaths.size(), nPartitions);
Assert.assertEquals(propStoreEntries.size(), nPartitions);
Assert.assertEquals(idealStateEntries.size(), nPartitions);
Assert.assertEquals(actualRtTableName, rtTableName);
Assert.assertEquals(createNew, !existingIS);
Assert.assertEquals(idealStateNReplicas, nReplicas);
Map<Integer, ZNRecord> segmentPropStoreMap = new HashMap<>(propStorePaths.size());
Map<Integer, String> segmentPathsMap = new HashMap<>(propStorePaths.size());
for (String path : propStorePaths) {
String segNameStr = path.split("/")[3];
int partition = new LLCSegmentName(segNameStr).getPartitionId();
segmentPathsMap.put(partition, path);
}
for (ZNRecord znRecord : propStoreEntries) {
LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(znRecord);
segmentPropStoreMap.put(new LLCSegmentName(metadata.getSegmentName()).getPartitionId(), znRecord);
}
Assert.assertEquals(segmentPathsMap.size(), nPartitions);
Assert.assertEquals(segmentPropStoreMap.size(), nPartitions);
for (int partition = 0; partition < nPartitions; partition++) {
final LLCRealtimeSegmentZKMetadata metadata = new LLCRealtimeSegmentZKMetadata(segmentPropStoreMap.get(partition));
// Just for coverage
metadata.toString();
ZNRecord znRecord = metadata.toZNRecord();
LLCRealtimeSegmentZKMetadata metadataCopy = new LLCRealtimeSegmentZKMetadata(znRecord);
Assert.assertEquals(metadata, metadataCopy);
final String path = segmentPathsMap.get(partition);
final String segmentName = metadata.getSegmentName();
Assert.assertEquals(metadata.getStartOffset(), -1L);
Assert.assertEquals(path, "/SEGMENTS/" + rtTableName + "/" + segmentName);
LLCSegmentName llcSegmentName = new LLCSegmentName(segmentName);
Assert.assertEquals(llcSegmentName.getPartitionId(), partition);
Assert.assertEquals(llcSegmentName.getTableName(), TableNameBuilder.extractRawTableName(rtTableName));
Assert.assertEquals(metadata.getNumReplicas(), nReplicas);
}
}
use of org.apache.helix.ZNRecord in project pinot by linkedin.
the class SegmentDeletionManagerTest method testBulkDeleteWithFailures.
public void testBulkDeleteWithFailures(boolean useSet) throws Exception {
HelixAdmin helixAdmin = makeHelixAdmin();
ZkHelixPropertyStore<ZNRecord> propertyStore = makePropertyStore();
FakeDeletionManager deletionManager = new FakeDeletionManager(helixAdmin, propertyStore);
Collection<String> segments;
if (useSet) {
segments = new HashSet<String>();
} else {
segments = new ArrayList<String>();
}
segments.addAll(segmentsThatShouldBeDeleted());
segments.addAll(segmentsInIdealStateOrExtView());
segments.addAll(segmentsFailingPropStore());
deletionManager.deleteSegmenetsFromPropertyStoreAndLocal(tableName, segments);
Assert.assertTrue(deletionManager.segmentsToRetry.containsAll(segmentsFailingPropStore()));
Assert.assertTrue(deletionManager.segmentsToRetry.containsAll(segmentsInIdealStateOrExtView()));
Assert.assertTrue(deletionManager.segmentsRemovedFromStore.containsAll(segmentsThatShouldBeDeleted()));
}
use of org.apache.helix.ZNRecord in project pinot by linkedin.
the class ValidationManagerTest method testLLCValidation.
@Test
public void testLLCValidation() throws Exception {
final String topicName = "topic";
final int kafkaPartitionCount = 2;
final String realtimeTableName = "table_REALTIME";
final String tableName = TableNameBuilder.extractRawTableName(realtimeTableName);
// Server 1
final String S1 = "S1";
// Server 2
final String S2 = "S2";
// Server 3
final String S3 = "S3";
final List<String> hosts = Arrays.asList(new String[] { S1, S2, S3 });
final HelixAdmin helixAdmin = _pinotHelixResourceManager.getHelixAdmin();
ZNRecord znRecord = new ZNRecord(topicName);
for (int i = 0; i < kafkaPartitionCount; i++) {
znRecord.setListField(Integer.toString(i), hosts);
}
makeMockPinotLLCRealtimeSegmentManager(znRecord);
long msSinceEpoch = 1540;
LLCSegmentName p0s0 = new LLCSegmentName(tableName, 0, 0, msSinceEpoch);
LLCSegmentName p0s1 = new LLCSegmentName(tableName, 0, 1, msSinceEpoch);
LLCSegmentName p1s0 = new LLCSegmentName(tableName, 1, 0, msSinceEpoch);
LLCSegmentName p1s1 = new LLCSegmentName(tableName, 1, 1, msSinceEpoch);
IdealState idealstate = PinotTableIdealStateBuilder.buildEmptyIdealStateFor(realtimeTableName, 3);
idealstate.setPartitionState(p0s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p0s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
// idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s0.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.ONLINE_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p1s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
FakeValidationMetrics validationMetrics = new FakeValidationMetrics();
ValidationManager validationManager = new ValidationManager(validationMetrics, _pinotHelixResourceManager, new ControllerConf(), _segmentManager);
Map<String, String> streamConfigs = new HashMap<String, String>(4);
streamConfigs.put(StringUtil.join(".", CommonConstants.Helix.DataSource.STREAM_PREFIX, CommonConstants.Helix.DataSource.Realtime.Kafka.CONSUMER_TYPE), "highLevel,simple");
Field autoCreateOnError = ValidationManager.class.getDeclaredField("_autoCreateOnError");
autoCreateOnError.setAccessible(true);
autoCreateOnError.setBoolean(validationManager, false);
AbstractTableConfig tableConfig = mock(AbstractTableConfig.class);
IndexingConfig indexingConfig = mock(IndexingConfig.class);
when(tableConfig.getIndexingConfig()).thenReturn(indexingConfig);
when(indexingConfig.getStreamConfigs()).thenReturn(streamConfigs);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 1);
// Set partition 0 to have one instance in CONSUMING state, and others in OFFLINE.
// we should not flag any partitions to correct.
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
idealstate.setPartitionState(p0s1.getSegmentName(), S1, PinotHelixSegmentOnlineOfflineStateModelGenerator.CONSUMING_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S2, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
idealstate.setPartitionState(p0s1.getSegmentName(), S3, PinotHelixSegmentOnlineOfflineStateModelGenerator.OFFLINE_STATE);
helixAdmin.addResource(HELIX_CLUSTER_NAME, realtimeTableName, idealstate);
validationManager.validateLLCSegments(realtimeTableName, tableConfig);
Assert.assertEquals(validationMetrics.partitionCount, 0);
helixAdmin.dropResource(HELIX_CLUSTER_NAME, realtimeTableName);
}
use of org.apache.helix.ZNRecord in project pinot by linkedin.
the class BenchmarkQueryEngine method startPinot.
@Setup
public void startPinot() throws Exception {
System.out.println("Using table name " + TABLE_NAME);
System.out.println("Using data directory " + DATA_DIRECTORY);
System.out.println("Starting pinot");
PerfBenchmarkDriverConf conf = new PerfBenchmarkDriverConf();
conf.setStartBroker(true);
conf.setStartController(true);
conf.setStartServer(true);
conf.setStartZookeeper(true);
conf.setUploadIndexes(false);
conf.setRunQueries(false);
conf.setServerInstanceSegmentTarDir(null);
conf.setServerInstanceDataDir(DATA_DIRECTORY);
conf.setConfigureResources(false);
_perfBenchmarkDriver = new PerfBenchmarkDriver(conf);
_perfBenchmarkDriver.run();
Set<String> tables = new HashSet<String>();
File[] segments = new File(DATA_DIRECTORY, TABLE_NAME).listFiles();
for (File segmentDir : segments) {
SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(segmentDir);
if (!tables.contains(segmentMetadata.getTableName())) {
_perfBenchmarkDriver.configureTable(segmentMetadata.getTableName());
tables.add(segmentMetadata.getTableName());
}
System.out.println("Adding segment " + segmentDir.getAbsolutePath());
_perfBenchmarkDriver.addSegment(segmentMetadata);
}
ZkClient client = new ZkClient("localhost:2191", 10000, 10000, new ZNRecordSerializer());
ZNRecord record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
while (true) {
System.out.println("record = " + record);
Uninterruptibles.sleepUninterruptibly(10, TimeUnit.SECONDS);
int onlineSegmentCount = 0;
for (Map<String, String> instancesAndStates : record.getMapFields().values()) {
for (String state : instancesAndStates.values()) {
if (state.equals("ONLINE")) {
onlineSegmentCount++;
break;
}
}
}
System.out.println(onlineSegmentCount + " segments online out of " + segments.length);
if (onlineSegmentCount == segments.length) {
break;
}
record = client.readData("/PinotPerfTestCluster/EXTERNALVIEW/" + TABLE_NAME);
}
ranOnce = false;
System.out.println(_perfBenchmarkDriver.postQuery(QUERY_PATTERNS[queryPattern], optimizationFlags).toString(2));
}
Aggregations