use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method testReadBadZNRecordCase7.
/**
* Tests reading {@link ZNRecord} from {@link HelixPropertyStore} and ambry, where the ambry has valid accounts
* record but {@link ZNRecord} has invalid list. This is a NOT good {@link ZNRecord} format and it should fail fetch or update
* operations, with none of the record should be read.
* @throws Exception Any unexpected exception.
*/
@Test
public void testReadBadZNRecordCase7() throws Exception {
if (!useNewZNodePath) {
return;
}
ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis()));
Map<String, String> accountMap = new HashMap<>();
accountMap.put(String.valueOf(refAccount.getId()), objectMapper.writeValueAsString(new AccountBuilder(refAccount).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
RouterStore.writeAccountMapToRouter(accountMap, mockRouter);
List<String> list = Collections.singletonList("bad_list_string");
zNRecord.setListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
updateAndWriteZNRecord(zNRecord, false);
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method testReadBadZNRecordCase4.
/**
* Tests reading {@link ZNRecord} from {@link HelixPropertyStore}, where the {@link ZNRecord} has a map field
* ({@link LegacyMetadataStore#ACCOUNT_METADATA_MAP_KEY}: accountMap), and accountMap contains
* ("accountId": accountJsonStr) that does not match. This is a NOT good {@link ZNRecord} format that should
* fail fetch or update.
* @throws Exception Any unexpected exception.
*/
@Test
public void testReadBadZNRecordCase4() throws Exception {
Map<String, String> mapValue = new HashMap<>();
mapValue.put("-1", objectMapper.writeValueAsString(new AccountBuilder(refAccount).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
ZNRecord zNRecord = null;
if (useNewZNodePath) {
String blobID = RouterStore.writeAccountMapToRouter(mapValue, mockRouter);
List<String> list = Collections.singletonList(new RouterStore.BlobIDAndVersion(blobID, 1).toJson());
zNRecord = makeZNRecordWithListField(null, RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
} else {
zNRecord = makeZNRecordWithMapField(null, LegacyMetadataStore.ACCOUNT_METADATA_MAP_KEY, mapValue);
}
updateAndWriteZNRecord(zNRecord, false);
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method writeAccountsToHelixPropertyStore.
/**
* Pre-populates a collection of {@link Account}s to the underlying {@link org.apache.helix.store.HelixPropertyStore}
* using {@link com.github.ambry.clustermap.HelixStoreOperator} (not through the {@link HelixAccountService}). This method
* does not check any conflict among the {@link Account}s to write.
* @throws Exception Any unexpected exception.
*/
private void writeAccountsToHelixPropertyStore(Collection<Account> accounts, boolean shouldNotify) throws Exception {
HelixStoreOperator storeOperator = new HelixStoreOperator(mockHelixAccountServiceFactory.getHelixStore(ZK_CONNECT_STRING, storeConfig));
ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis()));
Map<String, String> accountMap = new HashMap<>();
for (Account account : accounts) {
accountMap.put(String.valueOf(account.getId()), objectMapper.writeValueAsString(new AccountBuilder(account).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
}
if (useNewZNodePath) {
String blobID = RouterStore.writeAccountMapToRouter(accountMap, mockRouter);
List<String> list = Collections.singletonList(new RouterStore.BlobIDAndVersion(blobID, 1).toJson());
zNRecord.setListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
storeOperator.write(RouterStore.ACCOUNT_METADATA_BLOB_IDS_PATH, zNRecord);
} else {
zNRecord.setMapField(LegacyMetadataStore.ACCOUNT_METADATA_MAP_KEY, accountMap);
// Write account metadata into HelixPropertyStore.
storeOperator.write(LegacyMetadataStore.FULL_ACCOUNT_METADATA_PATH, zNRecord);
}
if (shouldNotify) {
notifier.publish(ACCOUNT_METADATA_CHANGE_TOPIC, FULL_ACCOUNT_METADATA_CHANGE_MESSAGE);
}
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class RouterStoreTest method getBlobIDAndVersionInHelix.
/**
* Fetch the list of {@link RouterStore.BlobIDAndVersion} from the helixStore.
* @param count The expected number of elements in the list.
* @return The list of {@link RouterStore.BlobIDAndVersion}.
*/
private List<RouterStore.BlobIDAndVersion> getBlobIDAndVersionInHelix(int count) {
// Verify that ZNRecord contains the right data.
ZNRecord record = helixStore.get(RouterStore.ACCOUNT_METADATA_BLOB_IDS_PATH, null, AccessOption.PERSISTENT);
assertNotNull("ZNRecord missing after update", record);
List<String> accountBlobs = record.getListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY);
assertNotNull("Blob ids are missing from ZNRecord", accountBlobs);
// version also equals to the number of blobs
assertEquals("Number of blobs mismatch", count, accountBlobs.size());
List<RouterStore.BlobIDAndVersion> blobIDAndVersions = new ArrayList<>(count);
for (String json : accountBlobs) {
blobIDAndVersions.add(RouterStore.BlobIDAndVersion.fromJson(json));
}
return blobIDAndVersions;
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixBootstrapUpgradeUtil method addUpdateResources.
/**
* Add and/or update resources in Helix based on the information in the static cluster map. This may involve adding
* or removing partitions from under a resource, and adding or dropping resources altogether. This may also involve
* changing the instance set for a partition under a resource, based on the static cluster map.
* @param dcName the name of the datacenter being processed.
* @param partitionsToInstancesInDc a map to be filled with the mapping of partitions to their instance sets in the
* given datacenter.
*/
private void addUpdateResources(String dcName, Map<String, Set<String>> partitionsToInstancesInDc) {
HelixAdmin dcAdmin = adminForDc.get(dcName);
List<String> resourcesInCluster = dcAdmin.getResourcesInCluster(clusterName);
List<String> instancesWithDisabledPartition = new ArrayList<>();
HelixPropertyStore<ZNRecord> helixPropertyStore = helixAdminOperation == HelixAdminOperation.DisablePartition ? createHelixPropertyStore(dcName) : null;
// maxResource may vary from one dc to another (special partition class allows partitions to exist in one dc only)
int maxResource = -1;
for (String resourceName : resourcesInCluster) {
boolean resourceModified = false;
if (!resourceName.matches("\\d+")) {
// cluster map. These will be ignored.
continue;
}
maxResource = Math.max(maxResource, Integer.parseInt(resourceName));
IdealState resourceIs = dcAdmin.getResourceIdealState(clusterName, resourceName);
for (String partitionName : new HashSet<>(resourceIs.getPartitionSet())) {
Set<String> instanceSetInHelix = resourceIs.getInstanceSet(partitionName);
Set<String> instanceSetInStatic = partitionsToInstancesInDc.remove(partitionName);
if (instanceSetInStatic == null || instanceSetInStatic.isEmpty()) {
if (forceRemove) {
info("[{}] *** Partition {} no longer present in the static clustermap, {} *** ", dcName.toUpperCase(), partitionName, dryRun ? "no action as dry run" : "removing from Resource");
// Helix team is planning to provide an API for this.
if (!dryRun) {
resourceIs.getRecord().getListFields().remove(partitionName);
}
resourceModified = true;
} else {
info("[{}] *** forceRemove option not provided, resources will not be removed (use --forceRemove to forcefully remove)", dcName.toUpperCase());
expectMoreInHelixDuringValidate = true;
partitionsNotForceRemovedByDc.computeIfAbsent(dcName, k -> ConcurrentHashMap.newKeySet()).add(partitionName);
}
} else if (!instanceSetInStatic.equals(instanceSetInHelix)) {
// we change the IdealState only when the operation is meant to bootstrap cluster or indeed update IdealState
if (EnumSet.of(HelixAdminOperation.UpdateIdealState, HelixAdminOperation.BootstrapCluster).contains(helixAdminOperation)) {
// @formatter:off
info("[{}] Different instance sets for partition {} under resource {}. {}. " + "Previous instance set: [{}], new instance set: [{}]", dcName.toUpperCase(), partitionName, resourceName, dryRun ? "No action as dry run" : "Updating Helix using static", String.join(",", instanceSetInHelix), String.join(",", instanceSetInStatic));
// @formatter:on
if (!dryRun) {
ArrayList<String> newInstances = new ArrayList<>(instanceSetInStatic);
Collections.shuffle(newInstances);
resourceIs.setPreferenceList(partitionName, newInstances);
// Existing resources may not have ANY_LIVEINSTANCE set as the numReplicas (which allows for different
// replication for different partitions under the same resource). So set it here (We use the name() method and
// not the toString() method for the enum as that is what Helix uses).
resourceIs.setReplicas(ResourceConfig.ResourceConfigConstants.ANY_LIVEINSTANCE.name());
}
resourceModified = true;
} else if (helixAdminOperation == HelixAdminOperation.DisablePartition) {
// if this is DisablePartition operation, we don't modify IdealState and only make InstanceConfig to disable
// certain partition on specific node.
// 1. extract difference between Helix and Static instance sets. Determine which replica is removed
instanceSetInHelix.removeAll(instanceSetInStatic);
// 2. disable removed replica on certain node.
for (String instanceInHelixOnly : instanceSetInHelix) {
info("Partition {} under resource {} on node {} is no longer in static clustermap. {}.", partitionName, resourceName, instanceInHelixOnly, dryRun ? "No action as dry run" : "Disabling it");
if (!dryRun) {
InstanceConfig instanceConfig = dcAdmin.getInstanceConfig(clusterName, instanceInHelixOnly);
String instanceName = instanceConfig.getInstanceName();
// on updating InstanceConfig until this tool has completed. TODO, remove this logic once migration to PropertyStore is done.
if (!instancesWithDisabledPartition.contains(instanceName)) {
ZNRecord znRecord = new ZNRecord(instanceName);
String path = PARTITION_DISABLED_ZNODE_PATH + instanceName;
if (!helixPropertyStore.create(path, znRecord, AccessOption.PERSISTENT)) {
logger.error("Failed to create a ZNode for {} in datacenter {} before disabling partition.", instanceName, dcName);
continue;
}
}
instanceConfig.setInstanceEnabledForPartition(resourceName, partitionName, false);
dcAdmin.setInstanceConfig(clusterName, instanceInHelixOnly, instanceConfig);
instancesWithDisabledPartition.add(instanceName);
}
partitionsDisabled.getAndIncrement();
}
// Disabling partition won't remove certain replica from IdealState. So replicas of this partition in Helix
// will be more than that in static clustermap.
expectMoreInHelixDuringValidate = true;
}
}
}
// update state model def if necessary
if (!resourceIs.getStateModelDefRef().equals(stateModelDef)) {
info("[{}] Resource {} has different state model {}. Updating it with {}", dcName.toUpperCase(), resourceName, resourceIs.getStateModelDefRef(), stateModelDef);
resourceIs.setStateModelDefRef(stateModelDef);
resourceModified = true;
}
resourceIs.setNumPartitions(resourceIs.getPartitionSet().size());
if (resourceModified) {
if (resourceIs.getPartitionSet().isEmpty()) {
info("[{}] Resource {} has no partition, {}", dcName.toUpperCase(), resourceName, dryRun ? "no action as dry run" : "dropping");
if (!dryRun) {
dcAdmin.dropResource(clusterName, resourceName);
}
resourcesDropped.getAndIncrement();
} else {
if (!dryRun) {
dcAdmin.setResourceIdealState(clusterName, resourceName, resourceIs);
System.out.println("------------------add resource!");
System.out.println(resourceName);
System.out.println(resourceName);
}
resourcesUpdated.getAndIncrement();
}
}
}
// note that disabling partition also updates InstanceConfig of certain nodes which host the partitions.
instancesUpdated.getAndAdd(instancesWithDisabledPartition.size());
// replica decommission thread on each datanode.
if (helixPropertyStore != null) {
maybeAwaitForLatch();
for (String instanceName : instancesWithDisabledPartition) {
String path = PARTITION_DISABLED_ZNODE_PATH + instanceName;
if (!helixPropertyStore.remove(path, AccessOption.PERSISTENT)) {
logger.error("Failed to remove a ZNode for {} in datacenter {} after disabling partition completed.", instanceName, dcName);
}
}
helixPropertyStore.stop();
}
// Add what is not already in Helix under new resources.
int fromIndex = 0;
List<Map.Entry<String, Set<String>>> newPartitions = new ArrayList<>(partitionsToInstancesInDc.entrySet());
while (fromIndex < newPartitions.size()) {
String resourceName = Integer.toString(++maxResource);
int toIndex = Math.min(fromIndex + maxPartitionsInOneResource, newPartitions.size());
List<Map.Entry<String, Set<String>>> partitionsUnderNextResource = newPartitions.subList(fromIndex, toIndex);
fromIndex = toIndex;
IdealState idealState = new IdealState(resourceName);
idealState.setStateModelDefRef(stateModelDef);
info("[{}] Adding partitions for next resource {} in {}. {}.", dcName.toUpperCase(), resourceName, dcName, dryRun ? "Actual IdealState is not changed as dry run" : "IdealState is being updated");
for (Map.Entry<String, Set<String>> entry : partitionsUnderNextResource) {
String partitionName = entry.getKey();
ArrayList<String> instances = new ArrayList<>(entry.getValue());
Collections.shuffle(instances);
idealState.setPreferenceList(partitionName, instances);
}
idealState.setNumPartitions(partitionsUnderNextResource.size());
idealState.setReplicas(ResourceConfig.ResourceConfigConstants.ANY_LIVEINSTANCE.name());
if (!idealState.isValid()) {
throw new IllegalStateException("IdealState could not be validated for new resource " + resourceName);
}
if (!dryRun) {
dcAdmin.addResource(clusterName, resourceName, idealState);
info("[{}] Added {} new partitions under resource {} in datacenter {}", dcName.toUpperCase(), partitionsUnderNextResource.size(), resourceName, dcName);
} else {
info("[{}] Under DryRun mode, {} new partitions are added to resource {} in datacenter {}", dcName.toUpperCase(), partitionsUnderNextResource.size(), resourceName, dcName);
}
resourcesAdded.getAndIncrement();
}
}
Aggregations