use of com.github.ambry.store.StoreKeyFactory in project ambry by linkedin.
the class VcrServer method startup.
/**
* Start the VCR Server.
* @throws InstantiationException if an error was encountered during startup.
*/
public void startup() throws InstantiationException {
try {
logger.info("starting");
ServerConfig serverConfig = new ServerConfig(properties);
ServerSecurityServiceFactory serverSecurityServiceFactory = Utils.getObj(serverConfig.serverSecurityServiceFactory, properties, serverMetrics, registry);
serverSecurityService = serverSecurityServiceFactory.getServerSecurityService();
clusterMap = clusterAgentsFactory.getClusterMap();
logger.info("Initialized clusterMap");
registry = clusterMap.getMetricRegistry();
serverMetrics = new ServerMetrics(registry, VcrServer.class, VcrServer.class);
logger.info("Setting up JMX.");
long startTime = SystemTime.getInstance().milliseconds();
registry = clusterMap.getMetricRegistry();
reporter = reporterFactory != null ? reporterFactory.apply(registry) : JmxReporter.forRegistry(registry).build();
reporter.start();
logger.info("creating configs");
NetworkConfig networkConfig = new NetworkConfig(properties);
StoreConfig storeConfig = new StoreConfig(properties);
ReplicationConfig replicationConfig = new ReplicationConfig(properties);
CloudConfig cloudConfig = new CloudConfig(properties);
ConnectionPoolConfig connectionPoolConfig = new ConnectionPoolConfig(properties);
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(properties);
SSLConfig sslConfig = new SSLConfig(properties);
// verify the configs
properties.verify();
// initialize cloud destination
if (cloudDestinationFactory == null) {
cloudDestinationFactory = Utils.getObj(cloudConfig.cloudDestinationFactoryClass, properties, registry, clusterMap);
}
cloudDestination = cloudDestinationFactory.getCloudDestination();
// TODO Make sure that config.updaterPollingIntervalMs value is large (~one day) for VCR.
AccountServiceFactory accountServiceFactory = Utils.getObj(serverConfig.serverAccountServiceFactory, properties, registry);
AccountService accountService = accountServiceFactory.getAccountService();
vcrClusterParticipant = ((VcrClusterAgentsFactory) Utils.getObj(cloudConfig.vcrClusterAgentsFactoryClass, cloudConfig, clusterMapConfig, clusterMap, accountService, storeConfig, cloudDestination, registry)).getVcrClusterParticipant();
scheduler = Utils.newScheduler(serverConfig.serverSchedulerNumOfthreads, false);
StoreKeyFactory storeKeyFactory = Utils.getObj(storeConfig.storeKeyFactory, clusterMap);
SSLFactory sslFactory = new NettySslHttp2Factory(sslConfig);
if (clusterMapConfig.clusterMapEnableHttp2Replication) {
connectionPool = new Http2BlockingChannelPool(sslFactory, new Http2ClientConfig(properties), new Http2ClientMetrics(registry));
logger.info("Using http2 for VCR replication.");
} else {
connectionPool = new BlockingChannelConnectionPool(connectionPoolConfig, sslConfig, clusterMapConfig, registry);
logger.info("Using blocking channel for VCR replication.");
}
connectionPool.start();
StoreKeyConverterFactory storeKeyConverterFactory = Utils.getObj(serverConfig.serverStoreKeyConverterFactory, properties, registry);
VcrMetrics vcrMetrics = new VcrMetrics(registry);
CloudStorageManager cloudStorageManager = new CloudStorageManager(properties, vcrMetrics, cloudDestination, clusterMap);
vcrReplicationManager = new VcrReplicationManager(cloudConfig, replicationConfig, clusterMapConfig, storeConfig, cloudStorageManager, storeKeyFactory, clusterMap, vcrClusterParticipant, cloudDestination, scheduler, connectionPool, vcrMetrics, notificationSystem, storeKeyConverterFactory, serverConfig.serverMessageTransformer);
vcrReplicationManager.start();
DataNodeId currentNode = vcrClusterParticipant.getCurrentDataNodeId();
ArrayList<Port> ports = new ArrayList<Port>();
ports.add(new Port(networkConfig.port, PortType.PLAINTEXT));
if (currentNode.hasSSLPort()) {
ports.add(new Port(cloudConfig.vcrSslPort, PortType.SSL));
}
networkServer = new SocketServer(networkConfig, sslConfig, registry, ports);
// todo fix enableDataPrefetch
ServerMetrics serverMetrics = new ServerMetrics(registry, VcrRequests.class, VcrServer.class);
VcrRequests requests = new VcrRequests(cloudStorageManager, networkServer.getRequestResponseChannel(), clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPool = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, networkServer.getRequestResponseChannel(), requests);
networkServer.start();
// Start netty http2 server
if (currentNode.hasHttp2Port()) {
logger.info("Http2 port {} is enabled. Starting HTTP/2 service.", currentNode.getHttp2Port());
NettyConfig nettyConfig = new NettyConfig(properties);
NettyMetrics nettyMetrics = new NettyMetrics(registry);
Http2ServerMetrics http2ServerMetrics = new Http2ServerMetrics(registry);
Http2ClientConfig http2ClientConfig = new Http2ClientConfig(properties);
NettyServerRequestResponseChannel requestResponseChannel = new NettyServerRequestResponseChannel(networkConfig.queuedMaxRequests, http2ServerMetrics);
VcrRequests vcrRequestsForHttp2 = new VcrRequests(cloudStorageManager, requestResponseChannel, clusterMap, currentNode, registry, serverMetrics, new FindTokenHelper(storeKeyFactory, replicationConfig), notificationSystem, vcrReplicationManager, storeKeyFactory, storeKeyConverterFactory);
requestHandlerPoolForHttp2 = new RequestHandlerPool(serverConfig.serverRequestHandlerNumOfThreads, requestResponseChannel, vcrRequestsForHttp2);
NioServerFactory nioServerFactory = new StorageServerNettyFactory(currentNode.getHttp2Port(), requestResponseChannel, sslFactory, nettyConfig, http2ClientConfig, serverMetrics, nettyMetrics, http2ServerMetrics, serverSecurityService);
nettyHttp2Server = nioServerFactory.getNioServer();
nettyHttp2Server.start();
}
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
logger.info("VCR startup time in Ms {}", processingTime);
} catch (Exception e) {
logger.error("Error during VCR startup", e);
throw new InstantiationException("failure during VCR startup " + e);
}
}
use of com.github.ambry.store.StoreKeyFactory in project ambry by linkedin.
the class MessageFormatInputStreamTest method messageFormatRecordsTest.
private void messageFormatRecordsTest(short blobVersion, BlobType blobType, boolean useV2Header) throws IOException, MessageFormatException {
StoreKey key = new MockId("id1");
StoreKeyFactory keyFactory = new MockIdFactory();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties prop = new BlobProperties(10, "servid", accountId, containerId, false);
byte[] encryptionKey = new byte[100];
new Random().nextBytes(encryptionKey);
byte[] usermetadata = new byte[1000];
new Random().nextBytes(usermetadata);
int blobContentSize = 2000;
byte[] data = new byte[blobContentSize];
new Random().nextBytes(data);
long blobSize = -1;
MessageFormatRecord.headerVersionToUse = useV2Header ? MessageFormatRecord.Message_Header_Version_V2 : MessageFormatRecord.Message_Header_Version_V1;
if (blobVersion == MessageFormatRecord.Blob_Version_V1) {
blobSize = MessageFormatRecord.Blob_Format_V1.getBlobRecordSize(blobContentSize);
} else if (blobVersion == MessageFormatRecord.Blob_Version_V2 && blobType == BlobType.DataBlob) {
blobSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize(blobContentSize);
} else if (blobVersion == MessageFormatRecord.Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data = byteBufferBlob.array();
blobContentSize = data.length;
blobSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize(blobContentSize);
}
ByteBufferInputStream stream = new ByteBufferInputStream(ByteBuffer.wrap(data));
MessageFormatInputStream messageFormatStream = (blobVersion == MessageFormatRecord.Blob_Version_V2) ? new PutMessageFormatInputStream(key, ByteBuffer.wrap(encryptionKey), prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key, prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType);
int headerSize = MessageFormatRecord.getHeaderSizeForVersion(useV2Header ? MessageFormatRecord.Message_Header_Version_V2 : MessageFormatRecord.Message_Header_Version_V1);
int blobEncryptionKeySize = useV2Header ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey)) : 0;
int blobPropertiesRecordSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(prop);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(ByteBuffer.wrap(usermetadata));
Assert.assertEquals(messageFormatStream.getSize(), headerSize + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + blobSize + key.sizeInBytes());
// verify header
byte[] headerOutput = new byte[headerSize];
messageFormatStream.read(headerOutput);
ByteBuffer headerBuf = ByteBuffer.wrap(headerOutput);
Assert.assertEquals(useV2Header ? MessageFormatRecord.Message_Header_Version_V2 : MessageFormatRecord.Message_Header_Version_V1, headerBuf.getShort());
Assert.assertEquals(blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + blobSize, headerBuf.getLong());
if (useV2Header) {
Assert.assertEquals(headerSize + key.sizeInBytes(), headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize, headerBuf.getInt());
Assert.assertEquals(MessageFormatRecord.Message_Header_Invalid_Relative_Offset, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize, headerBuf.getInt());
} else {
Assert.assertEquals(headerSize + key.sizeInBytes(), headerBuf.getInt());
Assert.assertEquals(MessageFormatRecord.Message_Header_Invalid_Relative_Offset, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobPropertiesRecordSize, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobPropertiesRecordSize + userMetadataSize, headerBuf.getInt());
}
Crc32 crc = new Crc32();
crc.update(headerOutput, 0, headerSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), headerBuf.getLong());
// verify handle
byte[] handleOutput = new byte[key.sizeInBytes()];
ByteBuffer handleOutputBuf = ByteBuffer.wrap(handleOutput);
messageFormatStream.read(handleOutput);
byte[] dest = new byte[key.sizeInBytes()];
handleOutputBuf.get(dest);
Assert.assertArrayEquals(dest, key.toBytes());
// verify encryption key
if (useV2Header) {
byte[] blobEncryptionKeyOutput = new byte[blobEncryptionKeySize];
ByteBuffer blobEncryptionKeyBuf = ByteBuffer.wrap(blobEncryptionKeyOutput);
messageFormatStream.read(blobEncryptionKeyOutput);
Assert.assertEquals(blobEncryptionKeyBuf.getShort(), MessageFormatRecord.Blob_Encryption_Key_V1);
Assert.assertEquals(blobEncryptionKeyBuf.getInt(), 100);
dest = new byte[100];
blobEncryptionKeyBuf.get(dest);
Assert.assertArrayEquals(dest, encryptionKey);
crc = new Crc32();
crc.update(blobEncryptionKeyOutput, 0, blobEncryptionKeySize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), blobEncryptionKeyBuf.getLong());
}
// verify blob properties
byte[] blobPropertiesOutput = new byte[blobPropertiesRecordSize];
ByteBuffer blobPropertiesBuf = ByteBuffer.wrap(blobPropertiesOutput);
messageFormatStream.read(blobPropertiesOutput);
Assert.assertEquals(blobPropertiesBuf.getShort(), 1);
BlobProperties propOutput = BlobPropertiesSerDe.getBlobPropertiesFromStream(new DataInputStream(new ByteBufferInputStream(blobPropertiesBuf)));
Assert.assertEquals(10, propOutput.getBlobSize());
Assert.assertEquals("servid", propOutput.getServiceId());
Assert.assertEquals("AccountId mismatch", accountId, propOutput.getAccountId());
Assert.assertEquals("ContainerId mismatch", containerId, propOutput.getContainerId());
crc = new Crc32();
crc.update(blobPropertiesOutput, 0, blobPropertiesRecordSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), blobPropertiesBuf.getLong());
// verify user metadata
byte[] userMetadataOutput = new byte[userMetadataSize];
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadataOutput);
messageFormatStream.read(userMetadataOutput);
Assert.assertEquals(userMetadataBuf.getShort(), 1);
Assert.assertEquals(userMetadataBuf.getInt(), 1000);
dest = new byte[1000];
userMetadataBuf.get(dest);
Assert.assertArrayEquals(dest, usermetadata);
crc = new Crc32();
crc.update(userMetadataOutput, 0, userMetadataSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), userMetadataBuf.getLong());
// verify blob
CrcInputStream crcstream = new CrcInputStream(messageFormatStream);
DataInputStream streamData = new DataInputStream(crcstream);
Assert.assertEquals(streamData.readShort(), blobVersion);
if (blobVersion == MessageFormatRecord.Blob_Version_V2) {
Assert.assertEquals(streamData.readShort(), blobType.ordinal());
}
Assert.assertEquals(streamData.readLong(), blobContentSize);
for (int i = 0; i < blobContentSize; i++) {
Assert.assertEquals((byte) streamData.read(), data[i]);
}
long crcVal = crcstream.getValue();
Assert.assertEquals(crcVal, streamData.readLong());
// Verify Blob All
stream = new ByteBufferInputStream(ByteBuffer.wrap(data));
messageFormatStream = (blobVersion == MessageFormatRecord.Blob_Version_V2) ? new PutMessageFormatInputStream(key, ByteBuffer.wrap(encryptionKey), prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key, prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType);
int totalSize;
if (useV2Header) {
totalSize = headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + (int) blobSize;
} else {
totalSize = headerSize + key.sizeInBytes() + blobPropertiesRecordSize + userMetadataSize + (int) blobSize;
}
ByteBuffer allBuf = ByteBuffer.allocate(totalSize);
messageFormatStream.read(allBuf.array());
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(new ByteBufferInputStream(allBuf), keyFactory);
Assert.assertEquals(key, blobAll.getStoreKey());
Assert.assertArrayEquals(usermetadata, blobAll.getBlobInfo().getUserMetadata());
Assert.assertEquals(blobContentSize, blobAll.getBlobData().getSize());
Assert.assertEquals(blobType, blobAll.getBlobData().getBlobType());
if (useV2Header) {
Assert.assertEquals(ByteBuffer.wrap(encryptionKey), blobAll.getBlobEncryptionKey());
} else {
Assert.assertEquals(null, blobAll.getBlobEncryptionKey());
}
Assert.assertEquals(ByteBuffer.wrap(data), blobAll.getBlobData().getStream().getByteBuffer());
}
use of com.github.ambry.store.StoreKeyFactory in project ambry by linkedin.
the class CloudBlobStoreTest method testPutWithTtl.
/**
* Test PUT(with TTL) and TtlUpdate record replication.
* Replication may happen after PUT and after TtlUpdate, or after TtlUpdate only.
* PUT may already expired, expiration time < upload threshold or expiration time >= upload threshold.
* @throws Exception
*/
@Test
public void testPutWithTtl() throws Exception {
// Set up remote host
MockClusterMap clusterMap = new MockClusterMap();
MockHost remoteHost = getLocalAndRemoteHosts(clusterMap).getSecond();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
PartitionId partitionId = partitionIds.get(0);
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
Map<DataNodeId, MockHost> hosts = new HashMap<>();
hosts.put(remoteHost.dataNodeId, remoteHost);
MockConnectionPool connectionPool = new MockConnectionPool(hosts, clusterMap, 4);
// Generate BlobIds for following PUT.
short blobIdVersion = CommonTestUtils.getCurrentBlobIdVersion();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
boolean toEncrypt = TestUtils.RANDOM.nextBoolean();
List<BlobId> blobIdList = new ArrayList<>();
for (int i = 0; i < 6; i++) {
blobIdList.add(new BlobId(blobIdVersion, BlobId.BlobIdType.NATIVE, ClusterMap.UNKNOWN_DATACENTER_ID, accountId, containerId, partitionId, toEncrypt, BlobId.BlobDataType.DATACHUNK));
}
// Set up VCR
Properties props = new Properties();
setBasicProperties(props);
props.setProperty("clustermap.port", "12300");
props.setProperty("vcr.ssl.port", "12345");
ReplicationConfig replicationConfig = new ReplicationConfig(new VerifiableProperties(props));
ClusterMapConfig clusterMapConfig = new ClusterMapConfig(new VerifiableProperties(props));
CloudConfig cloudConfig = new CloudConfig(new VerifiableProperties(props));
CloudDataNode cloudDataNode = new CloudDataNode(cloudConfig, clusterMapConfig);
LatchBasedInMemoryCloudDestination latchBasedInMemoryCloudDestination = new LatchBasedInMemoryCloudDestination(blobIdList, clusterMap);
CloudReplica cloudReplica = new CloudReplica(partitionId, cloudDataNode);
CloudBlobStore cloudBlobStore = new CloudBlobStore(new VerifiableProperties(props), partitionId, latchBasedInMemoryCloudDestination, clusterMap, new VcrMetrics(new MetricRegistry()));
cloudBlobStore.start();
// Create ReplicaThread and add RemoteReplicaInfo to it.
ReplicationMetrics replicationMetrics = new ReplicationMetrics(new MetricRegistry(), Collections.emptyList());
ReplicaThread replicaThread = new ReplicaThread("threadtest", new MockFindTokenHelper(storeKeyFactory, replicationConfig), clusterMap, new AtomicInteger(0), cloudDataNode, connectionPool, replicationConfig, replicationMetrics, null, storeKeyConverter, transformer, clusterMap.getMetricRegistry(), false, cloudDataNode.getDatacenterName(), new ResponseHandler(clusterMap), new MockTime(), null, null, null);
for (ReplicaId replica : partitionId.getReplicaIds()) {
if (replica.getDataNodeId() == remoteHost.dataNodeId) {
RemoteReplicaInfo remoteReplicaInfo = new RemoteReplicaInfo(replica, cloudReplica, cloudBlobStore, new MockFindToken(0, 0), Long.MAX_VALUE, SystemTime.getInstance(), new Port(remoteHost.dataNodeId.getPort(), PortType.PLAINTEXT));
replicaThread.addRemoteReplicaInfo(remoteReplicaInfo);
break;
}
}
long referenceTime = System.currentTimeMillis();
// Case 1: Put already expired. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
BlobId id = blobIdList.get(0);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
replicaThread.replicate();
assertFalse("Blob should not exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 2: Put already expired. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(1);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime - 2000, referenceTime - 1000);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 3: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(2);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
replicaThread.replicate();
if (isVcr) {
assertFalse("Blob should not exist (vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
} else {
assertTrue("Blob should exist (not vcr).", latchBasedInMemoryCloudDestination.doesBlobExist(id));
}
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 4: Put TTL less than cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud only after replicating ttlUpdate.
id = blobIdList.get(3);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays) - 1);
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 5: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after Put and after TtlUpdate.
// Upload to Cloud after Put and update ttl after TtlUpdate.
id = blobIdList.get(4);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
replicaThread.replicate();
assertTrue(latchBasedInMemoryCloudDestination.doesBlobExist(id));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Case 6: Put TTL greater than or equals to cloudConfig.vcrMinTtlDays. Replication happens after TtlUpdate.
// Upload to Cloud after TtlUpdate.
id = blobIdList.get(5);
addPutMessagesToReplicasOfPartition(id, accountId, containerId, partitionId, Collections.singletonList(remoteHost), referenceTime, referenceTime + TimeUnit.DAYS.toMillis(cloudConfig.vcrMinTtlDays));
addTtlUpdateMessagesToReplicasOfPartition(partitionId, id, Collections.singletonList(remoteHost), Utils.Infinite_Time);
replicaThread.replicate();
assertTrue("Blob should exist.", latchBasedInMemoryCloudDestination.doesBlobExist(id));
// Verify expiration time of all blobs.
Map<String, CloudBlobMetadata> map = latchBasedInMemoryCloudDestination.getBlobMetadata(blobIdList);
for (BlobId blobId : blobIdList) {
assertEquals("Blob ttl should be infinite now.", Utils.Infinite_Time, map.get(blobId.toString()).getExpirationTime());
}
}
use of com.github.ambry.store.StoreKeyFactory in project ambry by linkedin.
the class MessageFormatInputStreamTest method messageFormatPutRecordsTest.
private void messageFormatPutRecordsTest(short blobVersion, BlobType blobType, short headerVersion) throws IOException, MessageFormatException {
StoreKey key = new MockId("id1");
StoreKeyFactory keyFactory = new MockIdFactory();
short accountId = Utils.getRandomShort(TestUtils.RANDOM);
short containerId = Utils.getRandomShort(TestUtils.RANDOM);
BlobProperties prop = new BlobProperties(10, "servid", accountId, containerId, false);
byte[] encryptionKey = new byte[100];
new Random().nextBytes(encryptionKey);
byte[] usermetadata = new byte[1000];
new Random().nextBytes(usermetadata);
int blobContentSize = 2000;
byte[] data = new byte[blobContentSize];
new Random().nextBytes(data);
short lifeVersion = 1;
long blobSize = -1;
MessageFormatRecord.headerVersionToUse = headerVersion;
if (blobVersion == MessageFormatRecord.Blob_Version_V1) {
blobSize = MessageFormatRecord.Blob_Format_V1.getBlobRecordSize(blobContentSize);
} else if (blobVersion == MessageFormatRecord.Blob_Version_V2 && blobType == BlobType.DataBlob) {
blobSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize(blobContentSize);
} else if (blobVersion == MessageFormatRecord.Blob_Version_V2 && blobType == BlobType.MetadataBlob) {
ByteBuffer byteBufferBlob = MessageFormatTestUtils.getBlobContentForMetadataBlob(blobContentSize);
data = byteBufferBlob.array();
blobContentSize = data.length;
blobSize = (int) MessageFormatRecord.Blob_Format_V2.getBlobRecordSize(blobContentSize);
}
ByteBufferInputStream stream = new ByteBufferInputStream(ByteBuffer.wrap(data));
MessageFormatInputStream messageFormatStream = (blobVersion == MessageFormatRecord.Blob_Version_V2) ? new PutMessageFormatInputStream(key, ByteBuffer.wrap(encryptionKey), prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType, lifeVersion) : new PutMessageFormatBlobV1InputStream(key, prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType);
int headerSize = MessageFormatRecord.getHeaderSizeForVersion(headerVersion);
int blobEncryptionKeySize = headerVersion != MessageFormatRecord.Message_Header_Version_V1 ? MessageFormatRecord.BlobEncryptionKey_Format_V1.getBlobEncryptionKeyRecordSize(ByteBuffer.wrap(encryptionKey)) : 0;
int blobPropertiesRecordSize = MessageFormatRecord.BlobProperties_Format_V1.getBlobPropertiesRecordSize(prop);
int userMetadataSize = MessageFormatRecord.UserMetadata_Format_V1.getUserMetadataSize(ByteBuffer.wrap(usermetadata));
Assert.assertEquals(messageFormatStream.getSize(), headerSize + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + blobSize + key.sizeInBytes());
// verify header
byte[] headerOutput = new byte[headerSize];
messageFormatStream.read(headerOutput);
ByteBuffer headerBuf = ByteBuffer.wrap(headerOutput);
Assert.assertEquals(headerVersion, headerBuf.getShort());
if (headerVersion == MessageFormatRecord.Message_Header_Version_V3) {
Assert.assertEquals(lifeVersion, headerBuf.getShort());
}
Assert.assertEquals(blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + blobSize, headerBuf.getLong());
switch(headerVersion) {
case MessageFormatRecord.Message_Header_Version_V1:
Assert.assertEquals(headerSize + key.sizeInBytes(), headerBuf.getInt());
Assert.assertEquals(MessageFormatRecord.Message_Header_Invalid_Relative_Offset, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobPropertiesRecordSize, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobPropertiesRecordSize + userMetadataSize, headerBuf.getInt());
break;
default:
// case MessageFormatRecord.Message_Header_Version_V2 or V3:
Assert.assertEquals(headerSize + key.sizeInBytes(), headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize, headerBuf.getInt());
Assert.assertEquals(MessageFormatRecord.Message_Header_Invalid_Relative_Offset, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize, headerBuf.getInt());
Assert.assertEquals(headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize, headerBuf.getInt());
}
Crc32 crc = new Crc32();
crc.update(headerOutput, 0, headerSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), headerBuf.getLong());
// verify handle
byte[] handleOutput = new byte[key.sizeInBytes()];
ByteBuffer handleOutputBuf = ByteBuffer.wrap(handleOutput);
messageFormatStream.read(handleOutput);
byte[] dest = new byte[key.sizeInBytes()];
handleOutputBuf.get(dest);
Assert.assertArrayEquals(dest, key.toBytes());
// verify encryption key
if (headerVersion != MessageFormatRecord.Message_Header_Version_V1) {
byte[] blobEncryptionKeyOutput = new byte[blobEncryptionKeySize];
ByteBuffer blobEncryptionKeyBuf = ByteBuffer.wrap(blobEncryptionKeyOutput);
messageFormatStream.read(blobEncryptionKeyOutput);
Assert.assertEquals(blobEncryptionKeyBuf.getShort(), MessageFormatRecord.Blob_Encryption_Key_V1);
Assert.assertEquals(blobEncryptionKeyBuf.getInt(), 100);
dest = new byte[100];
blobEncryptionKeyBuf.get(dest);
Assert.assertArrayEquals(dest, encryptionKey);
crc = new Crc32();
crc.update(blobEncryptionKeyOutput, 0, blobEncryptionKeySize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), blobEncryptionKeyBuf.getLong());
}
// verify blob properties
byte[] blobPropertiesOutput = new byte[blobPropertiesRecordSize];
ByteBuffer blobPropertiesBuf = ByteBuffer.wrap(blobPropertiesOutput);
messageFormatStream.read(blobPropertiesOutput);
Assert.assertEquals(blobPropertiesBuf.getShort(), 1);
BlobProperties propOutput = BlobPropertiesSerDe.getBlobPropertiesFromStream(new DataInputStream(new ByteBufferInputStream(blobPropertiesBuf)));
Assert.assertEquals(10, propOutput.getBlobSize());
Assert.assertEquals("servid", propOutput.getServiceId());
Assert.assertEquals("AccountId mismatch", accountId, propOutput.getAccountId());
Assert.assertEquals("ContainerId mismatch", containerId, propOutput.getContainerId());
crc = new Crc32();
crc.update(blobPropertiesOutput, 0, blobPropertiesRecordSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), blobPropertiesBuf.getLong());
// verify user metadata
byte[] userMetadataOutput = new byte[userMetadataSize];
ByteBuffer userMetadataBuf = ByteBuffer.wrap(userMetadataOutput);
messageFormatStream.read(userMetadataOutput);
Assert.assertEquals(userMetadataBuf.getShort(), 1);
Assert.assertEquals(userMetadataBuf.getInt(), 1000);
dest = new byte[1000];
userMetadataBuf.get(dest);
Assert.assertArrayEquals(dest, usermetadata);
crc = new Crc32();
crc.update(userMetadataOutput, 0, userMetadataSize - MessageFormatRecord.Crc_Size);
Assert.assertEquals(crc.getValue(), userMetadataBuf.getLong());
// verify blob
CrcInputStream crcstream = new CrcInputStream(messageFormatStream);
DataInputStream streamData = new DataInputStream(crcstream);
Assert.assertEquals(streamData.readShort(), blobVersion);
if (blobVersion == MessageFormatRecord.Blob_Version_V2) {
Assert.assertEquals(streamData.readShort(), blobType.ordinal());
}
Assert.assertEquals(streamData.readLong(), blobContentSize);
for (int i = 0; i < blobContentSize; i++) {
Assert.assertEquals((byte) streamData.read(), data[i]);
}
long crcVal = crcstream.getValue();
Assert.assertEquals(crcVal, streamData.readLong());
// Verify Blob All
stream = new ByteBufferInputStream(ByteBuffer.wrap(data));
messageFormatStream = (blobVersion == MessageFormatRecord.Blob_Version_V2) ? new PutMessageFormatInputStream(key, ByteBuffer.wrap(encryptionKey), prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType) : new PutMessageFormatBlobV1InputStream(key, prop, ByteBuffer.wrap(usermetadata), stream, blobContentSize, blobType);
int totalSize;
switch(headerVersion) {
case MessageFormatRecord.Message_Header_Version_V1:
totalSize = headerSize + key.sizeInBytes() + blobPropertiesRecordSize + userMetadataSize + (int) blobSize;
break;
default:
// case MessageFormatRecord.Message_Header_Version_V2:
totalSize = headerSize + key.sizeInBytes() + blobEncryptionKeySize + blobPropertiesRecordSize + userMetadataSize + (int) blobSize;
}
ByteBuffer allBuf = ByteBuffer.allocate(totalSize);
messageFormatStream.read(allBuf.array());
BlobAll blobAll = MessageFormatRecord.deserializeBlobAll(new ByteBufferInputStream(allBuf), keyFactory);
Assert.assertEquals(key, blobAll.getStoreKey());
Assert.assertArrayEquals(usermetadata, blobAll.getBlobInfo().getUserMetadata());
Assert.assertEquals(blobContentSize, blobAll.getBlobData().getSize());
Assert.assertEquals(blobType, blobAll.getBlobData().getBlobType());
if (headerVersion != MessageFormatRecord.Message_Header_Version_V1) {
Assert.assertEquals(ByteBuffer.wrap(encryptionKey), blobAll.getBlobEncryptionKey());
} else {
Assert.assertEquals(null, blobAll.getBlobEncryptionKey());
}
ByteBuf byteBuf = blobAll.getBlobData().content();
try {
Assert.assertEquals(Unpooled.wrappedBuffer(data), byteBuf);
} finally {
byteBuf.release();
}
}
use of com.github.ambry.store.StoreKeyFactory in project ambry by linkedin.
the class ReplicationTest method replicaThreadTestConverter.
/**
* Tests that replication between a local and remote server who have different
* blob IDs for the same blobs (via StoreKeyConverter)
* @throws Exception
*/
@Test
public void replicaThreadTestConverter() throws Exception {
MockClusterMap clusterMap = new MockClusterMap();
Pair<MockHost, MockHost> localAndRemoteHosts = getLocalAndRemoteHosts(clusterMap);
MockHost localHost = localAndRemoteHosts.getFirst();
MockHost remoteHost = localAndRemoteHosts.getSecond();
MockHost expectedLocalHost = new MockHost(localHost.dataNodeId, clusterMap);
MockStoreKeyConverterFactory storeKeyConverterFactory = new MockStoreKeyConverterFactory(null, null);
storeKeyConverterFactory.setConversionMap(new HashMap<>());
storeKeyConverterFactory.setReturnInputIfAbsent(true);
MockStoreKeyConverterFactory.MockStoreKeyConverter storeKeyConverter = storeKeyConverterFactory.getStoreKeyConverter();
List<PartitionId> partitionIds = clusterMap.getWritablePartitionIds(null);
Map<PartitionId, List<StoreKey>> idsToBeIgnoredByPartition = new HashMap<>();
StoreKeyFactory storeKeyFactory = new BlobIdFactory(clusterMap);
Transformer transformer = new BlobIdTransformer(storeKeyFactory, storeKeyConverter);
int batchSize = 4;
Pair<Map<DataNodeId, List<RemoteReplicaInfo>>, ReplicaThread> replicasAndThread = getRemoteReplicasAndReplicaThread(batchSize, clusterMap, localHost, remoteHost, storeKeyConverter, transformer, null, null);
Map<DataNodeId, List<RemoteReplicaInfo>> replicasToReplicate = replicasAndThread.getFirst();
ReplicaThread replicaThread = replicasAndThread.getSecond();
/*
STORE KEY CONVERTER MAPPING
Key Value
B0 B0'
B1 B1'
B2 null
BEFORE
Local Remote
B0' B0
B1
B2
AFTER
Local Remote
B0' B0
B1' B1
B2
B0 is B0' for local,
B1 is B1' for local,
B2 is null for local,
so it already has B0/B0'
B1 is transferred to B1'
and B2 is invalid for L
so it does not count as missing
Missing Keys: 1
*/
Map<PartitionId, List<BlobId>> partitionIdToDeleteBlobId = new HashMap<>();
Map<StoreKey, StoreKey> conversionMap = new HashMap<>();
Map<PartitionId, BlobId> expectedPartitionIdToDeleteBlobId = new HashMap<>();
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<BlobId> deleteBlobIds = new ArrayList<>();
partitionIdToDeleteBlobId.put(partitionId, deleteBlobIds);
BlobId b0 = generateRandomBlobId(partitionId);
deleteBlobIds.add(b0);
BlobId b0p = generateRandomBlobId(partitionId);
expectedPartitionIdToDeleteBlobId.put(partitionId, b0p);
BlobId b1 = generateRandomBlobId(partitionId);
BlobId b1p = generateRandomBlobId(partitionId);
BlobId b2 = generateRandomBlobId(partitionId);
deleteBlobIds.add(b2);
conversionMap.put(b0, b0p);
conversionMap.put(b1, b1p);
conversionMap.put(b2, null);
// Convert current conversion map so that BlobIdTransformer can
// create b1p in expectedLocalHost
storeKeyConverter.setConversionMap(conversionMap);
storeKeyConverter.convert(conversionMap.keySet());
addPutMessagesToReplicasOfPartition(Arrays.asList(b0p), Arrays.asList(localHost));
addPutMessagesToReplicasOfPartition(Arrays.asList(b0, b1, b2), Arrays.asList(remoteHost));
addPutMessagesToReplicasOfPartition(Arrays.asList(b0p, b1), Arrays.asList(null, transformer), Arrays.asList(expectedLocalHost));
// Check that expected local host contains the correct blob ids
Set<BlobId> expectedLocalHostBlobIds = new HashSet<>();
expectedLocalHostBlobIds.add(b0p);
expectedLocalHostBlobIds.add(b1p);
for (MessageInfo messageInfo : expectedLocalHost.infosByPartition.get(partitionId)) {
assertTrue("Remove should never fail", expectedLocalHostBlobIds.remove(messageInfo.getStoreKey()));
}
assertTrue("expectedLocalHostBlobIds should now be empty", expectedLocalHostBlobIds.isEmpty());
}
storeKeyConverter.setConversionMap(conversionMap);
int expectedIndex = assertMissingKeysAndFixMissingStoreKeys(0, 2, batchSize, 1, replicaThread, remoteHost, replicasToReplicate);
// Check that there are no missing buffers between expectedLocalHost and LocalHost
Map<PartitionId, List<ByteBuffer>> missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
// delete blob
for (int i = 0; i < partitionIds.size(); i++) {
PartitionId partitionId = partitionIds.get(i);
List<BlobId> deleteBlobIds = partitionIdToDeleteBlobId.get(partitionId);
for (BlobId deleteBlobId : deleteBlobIds) {
addDeleteMessagesToReplicasOfPartition(partitionId, deleteBlobId, Arrays.asList(remoteHost));
}
addDeleteMessagesToReplicasOfPartition(partitionId, expectedPartitionIdToDeleteBlobId.get(partitionId), Arrays.asList(expectedLocalHost));
}
expectedIndex = assertMissingKeysAndFixMissingStoreKeys(expectedIndex, 2, batchSize, 0, replicaThread, remoteHost, replicasToReplicate);
// Check that there are no missing buffers between expectedLocalHost and LocalHost
missingBuffers = expectedLocalHost.getMissingBuffers(localHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
missingBuffers = localHost.getMissingBuffers(expectedLocalHost.buffersByPartition);
assertTrue(missingBuffers.isEmpty());
// 3 unconverted + 2 unconverted deleted expected missing buffers
verifyNoMoreMissingKeysAndExpectedMissingBufferCount(remoteHost, localHost, replicaThread, replicasToReplicate, idsToBeIgnoredByPartition, storeKeyConverter, expectedIndex, expectedIndex, 5);
}
Aggregations