use of com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata in project pinot by linkedin.
the class RealtimeTableDataManagerTest method getInstanceZKMetadata.
private static InstanceZKMetadata getInstanceZKMetadata() {
ZNRecord record = new ZNRecord("Server_lva1-app0120.corp.linkedin.com_8001");
Map<String, String> groupIdMap = new HashMap<String, String>();
Map<String, String> partitionMap = new HashMap<String, String>();
groupIdMap.put("mirror", "groupId_testTable_" + String.valueOf(System.currentTimeMillis()));
partitionMap.put("testTable_R", "0");
record.setMapField("KAFKA_HLC_GROUP_MAP", groupIdMap);
record.setMapField("KAFKA_HLC_PARTITION_MAP", partitionMap);
return new InstanceZKMetadata(record);
}
use of com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata in project pinot by linkedin.
the class PinotHelixResourceManager method deleteRealtimeTable.
public void deleteRealtimeTable(String tableName) {
final String realtimeTableName = TableNameBuilder.REALTIME_TABLE_NAME_BUILDER.forTable(tableName);
// Remove from brokerResource
HelixHelper.removeResourceFromBrokerIdealState(_helixZkManager, realtimeTableName);
// Delete data table
if (!_helixAdmin.getResourcesInCluster(_helixClusterName).contains(realtimeTableName)) {
return;
}
// remove from property store
ZKMetadataProvider.removeResourceSegmentsFromPropertyStore(getPropertyStore(), realtimeTableName);
ZKMetadataProvider.removeResourceConfigFromPropertyStore(getPropertyStore(), realtimeTableName);
ZKMetadataProvider.removeKafkaPartitionAssignmentFromPropertyStore(getPropertyStore(), realtimeTableName);
// Remove groupId/PartitionId mapping for realtime table type.
for (String instance : getAllInstancesForTable(realtimeTableName)) {
InstanceZKMetadata instanceZKMetadata = ZKMetadataProvider.getInstanceZKMetadata(getPropertyStore(), instance);
if (instanceZKMetadata != null) {
instanceZKMetadata.removeResource(realtimeTableName);
ZKMetadataProvider.setInstanceZKMetadata(getPropertyStore(), instanceZKMetadata);
}
}
// dropping table
_helixAdmin.dropResource(_helixClusterName, realtimeTableName);
}
use of com.linkedin.pinot.common.metadata.instance.InstanceZKMetadata in project pinot by linkedin.
the class PinotTableIdealStateBuilder method setupInstanceConfigForKafkaHighLevelConsumer.
private static void setupInstanceConfigForKafkaHighLevelConsumer(String realtimeTableName, int numDataInstances, int numDataReplicas, Map<String, String> streamProviderConfig, ZkHelixPropertyStore<ZNRecord> zkHelixPropertyStore, List<String> instanceList) {
int numInstancesPerReplica = numDataInstances / numDataReplicas;
int partitionId = 0;
int replicaId = 0;
String groupId = getGroupIdFromRealtimeDataTable(realtimeTableName, streamProviderConfig);
for (int i = 0; i < numInstancesPerReplica * numDataReplicas; ++i) {
String instance = instanceList.get(i);
InstanceZKMetadata instanceZKMetadata = ZKMetadataProvider.getInstanceZKMetadata(zkHelixPropertyStore, instance);
if (instanceZKMetadata == null) {
instanceZKMetadata = new InstanceZKMetadata();
String[] instanceConfigs = instance.split("_");
assert (instanceConfigs.length == 3);
instanceZKMetadata.setInstanceType(instanceConfigs[0]);
instanceZKMetadata.setInstanceName(instanceConfigs[1]);
instanceZKMetadata.setInstancePort(Integer.parseInt(instanceConfigs[2]));
}
instanceZKMetadata.setGroupId(realtimeTableName, groupId + "_" + replicaId);
instanceZKMetadata.setPartition(realtimeTableName, Integer.toString(partitionId));
partitionId = (partitionId + 1) % numInstancesPerReplica;
if (partitionId == 0) {
replicaId++;
}
ZKMetadataProvider.setInstanceZKMetadata(zkHelixPropertyStore, instanceZKMetadata);
}
}
Aggregations