use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method testReadBadZNRecordCase7.
/**
* Tests reading {@link ZNRecord} from {@link HelixPropertyStore} and ambry, where the ambry has valid accounts
* record but {@link ZNRecord} has invalid list. This is a NOT good {@link ZNRecord} format and it should fail fetch or update
* operations, with none of the record should be read.
* @throws Exception Any unexpected exception.
*/
@Test
public void testReadBadZNRecordCase7() throws Exception {
if (!useNewZNodePath) {
return;
}
ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis()));
Map<String, String> accountMap = new HashMap<>();
accountMap.put(String.valueOf(refAccount.getId()), objectMapper.writeValueAsString(new AccountBuilder(refAccount).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
RouterStore.writeAccountMapToRouter(accountMap, mockRouter);
List<String> list = Collections.singletonList("bad_list_string");
zNRecord.setListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
updateAndWriteZNRecord(zNRecord, false);
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method testReadBadZNRecordCase4.
/**
* Tests reading {@link ZNRecord} from {@link HelixPropertyStore}, where the {@link ZNRecord} has a map field
* ({@link LegacyMetadataStore#ACCOUNT_METADATA_MAP_KEY}: accountMap), and accountMap contains
* ("accountId": accountJsonStr) that does not match. This is a NOT good {@link ZNRecord} format that should
* fail fetch or update.
* @throws Exception Any unexpected exception.
*/
@Test
public void testReadBadZNRecordCase4() throws Exception {
Map<String, String> mapValue = new HashMap<>();
mapValue.put("-1", objectMapper.writeValueAsString(new AccountBuilder(refAccount).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
ZNRecord zNRecord = null;
if (useNewZNodePath) {
String blobID = RouterStore.writeAccountMapToRouter(mapValue, mockRouter);
List<String> list = Collections.singletonList(new RouterStore.BlobIDAndVersion(blobID, 1).toJson());
zNRecord = makeZNRecordWithListField(null, RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
} else {
zNRecord = makeZNRecordWithMapField(null, LegacyMetadataStore.ACCOUNT_METADATA_MAP_KEY, mapValue);
}
updateAndWriteZNRecord(zNRecord, false);
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixAccountServiceTest method writeAccountsToHelixPropertyStore.
/**
* Pre-populates a collection of {@link Account}s to the underlying {@link org.apache.helix.store.HelixPropertyStore}
* using {@link com.github.ambry.clustermap.HelixStoreOperator} (not through the {@link HelixAccountService}). This method
* does not check any conflict among the {@link Account}s to write.
* @throws Exception Any unexpected exception.
*/
private void writeAccountsToHelixPropertyStore(Collection<Account> accounts, boolean shouldNotify) throws Exception {
HelixStoreOperator storeOperator = new HelixStoreOperator(mockHelixAccountServiceFactory.getHelixStore(ZK_CONNECT_STRING, storeConfig));
ZNRecord zNRecord = new ZNRecord(String.valueOf(System.currentTimeMillis()));
Map<String, String> accountMap = new HashMap<>();
for (Account account : accounts) {
accountMap.put(String.valueOf(account.getId()), objectMapper.writeValueAsString(new AccountBuilder(account).snapshotVersion(refAccount.getSnapshotVersion() + 1).build()));
}
if (useNewZNodePath) {
String blobID = RouterStore.writeAccountMapToRouter(accountMap, mockRouter);
List<String> list = Collections.singletonList(new RouterStore.BlobIDAndVersion(blobID, 1).toJson());
zNRecord.setListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY, list);
storeOperator.write(RouterStore.ACCOUNT_METADATA_BLOB_IDS_PATH, zNRecord);
} else {
zNRecord.setMapField(LegacyMetadataStore.ACCOUNT_METADATA_MAP_KEY, accountMap);
// Write account metadata into HelixPropertyStore.
storeOperator.write(LegacyMetadataStore.FULL_ACCOUNT_METADATA_PATH, zNRecord);
}
if (shouldNotify) {
notifier.publish(ACCOUNT_METADATA_CHANGE_TOPIC, FULL_ACCOUNT_METADATA_CHANGE_MESSAGE);
}
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class RouterStoreTest method getBlobIDAndVersionInHelix.
/**
* Fetch the list of {@link RouterStore.BlobIDAndVersion} from the helixStore.
* @param count The expected number of elements in the list.
* @return The list of {@link RouterStore.BlobIDAndVersion}.
*/
private List<RouterStore.BlobIDAndVersion> getBlobIDAndVersionInHelix(int count) {
// Verify that ZNRecord contains the right data.
ZNRecord record = helixStore.get(RouterStore.ACCOUNT_METADATA_BLOB_IDS_PATH, null, AccessOption.PERSISTENT);
assertNotNull("ZNRecord missing after update", record);
List<String> accountBlobs = record.getListField(RouterStore.ACCOUNT_METADATA_BLOB_IDS_LIST_KEY);
assertNotNull("Blob ids are missing from ZNRecord", accountBlobs);
// version also equals to the number of blobs
assertEquals("Number of blobs mismatch", count, accountBlobs.size());
List<RouterStore.BlobIDAndVersion> blobIDAndVersions = new ArrayList<>(count);
for (String json : accountBlobs) {
blobIDAndVersions.add(RouterStore.BlobIDAndVersion.fromJson(json));
}
return blobIDAndVersions;
}
use of org.apache.helix.zookeeper.datamodel.ZNRecord in project ambry by linkedin.
the class HelixClusterManager method initializeHelixManagerAndPropertyStoreInLocalDC.
/**
* Initialize HelixManager in local datacenter and complete subscription of HelixPropertyStore to listen for
* PartitionOverride zNode. This needs to happen before other datacenters are initialized so that any partition
* overrides can be properly honored.
* @param dataCenterToZkAddress the map mapping each datacenter to its corresponding ZkAddress.
* @param instanceName the String representation of the instance associated with this manager.
* @param helixFactory the factory class to construct and get a reference to a {@link HelixManager}.
* @return the HelixManager of local datacenter, or {@code null} if the local datacenter is
* {@link ReplicaType#CLOUD_BACKED}, as we currently do not support getting cluster state from Helix for cloud
* datacenters.
* @throws Exception
*/
private HelixManager initializeHelixManagerAndPropertyStoreInLocalDC(Map<String, DcZkInfo> dataCenterToZkAddress, String instanceName, HelixFactory helixFactory) throws Exception {
DcZkInfo dcZkInfo = dataCenterToZkAddress.get(clusterMapConfig.clusterMapDatacenterName);
if (dcZkInfo.getReplicaType() == ReplicaType.CLOUD_BACKED) {
return null;
}
// For now, the first ZK endpoint (if there are more than one endpoints) will be adopted by default. Note that, Ambry
// doesn't support multiple HelixClusterManagers(spectators) on same node.
String zkConnectStr = dcZkInfo.getZkConnectStrs().get(0);
HelixManager manager = helixFactory.getZkHelixManagerAndConnect(clusterName, instanceName, InstanceType.SPECTATOR, zkConnectStr);
helixPropertyStoreInLocalDc = manager.getHelixPropertyStore();
logger.info("HelixPropertyStore from local datacenter {} is: {}", dcZkInfo.getDcName(), helixPropertyStoreInLocalDc);
IZkDataListener dataListener = new IZkDataListener() {
@Override
public void handleDataChange(String dataPath, Object data) {
logger.info("Received data change notification for: {}", dataPath);
}
@Override
public void handleDataDeleted(String dataPath) {
logger.info("Received data delete notification for: {}", dataPath);
}
};
logger.info("Subscribing data listener to HelixPropertyStore.");
helixPropertyStoreInLocalDc.subscribeDataChanges(PARTITION_OVERRIDE_ZNODE_PATH, dataListener);
logger.info("Getting PartitionOverride ZNRecord from HelixPropertyStore");
ZNRecord zNRecord = helixPropertyStoreInLocalDc.get(PARTITION_OVERRIDE_ZNODE_PATH, null, AccessOption.PERSISTENT);
if (clusterMapConfig.clusterMapEnablePartitionOverride) {
if (zNRecord != null) {
partitionOverrideInfoMap.putAll(zNRecord.getMapFields());
logger.info("partitionOverrideInfoMap is initialized!");
} else {
logger.warn("ZNRecord from HelixPropertyStore is NULL, the partitionOverrideInfoMap is empty.");
}
}
return manager;
}
Aggregations