Search in sources :

Example 21 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class BlobIdTransformerTest method testBasicOperation.

/**
 * Tests basic use of transformer with blobs that can be converted and those that aren't
 * @throws Exception
 */
@Test
public void testBasicOperation() throws Exception {
    for (Pair pair : pairList) {
        for (Class clazz : VALID_MESSAGE_FORMAT_INPUT_STREAM_IMPLS) {
            for (boolean divergeInfoFromData : new boolean[] { false, true }) {
                InputAndExpected inputAndExpected = new InputAndExpected(pair, clazz, divergeInfoFromData);
                TransformationOutput output = transformer.transform(inputAndExpected.getInput());
                assertNull("output exception should be null", output.getException());
                verifyOutput(output.getMsg(), inputAndExpected.getExpected());
            }
        }
    }
}
Also used : TransformationOutput(com.github.ambry.store.TransformationOutput) Pair(com.github.ambry.utils.Pair) Test(org.junit.Test)

Example 22 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class BlobIdTransformer method newMessage.

/**
 * Creates a Message from the old Message
 * input stream, replacing the old store key and account/container IDs
 * with a new store key and account/container IDs
 * @param inputStream the input stream of the Message
 * @param newKey the new StoreKey
 * @param oldMessageInfo the {@link MessageInfo} of the message being transformed
 * @return new Message message
 * @throws Exception
 */
private Message newMessage(InputStream inputStream, StoreKey newKey, MessageInfo oldMessageInfo) throws Exception {
    MessageHeader_Format headerFormat = getMessageHeader(inputStream);
    storeKeyFactory.getStoreKey(new DataInputStream(inputStream));
    BlobId newBlobId = (BlobId) newKey;
    if (headerFormat.isPutRecord()) {
        if (headerFormat.hasLifeVersion() && headerFormat.getLifeVersion() != oldMessageInfo.getLifeVersion()) {
            // The original Put buffer might have lifeVersion as 0, but the message info might have a higher lifeVersion.
            logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", headerFormat.getLifeVersion(), oldMessageInfo.getLifeVersion(), oldMessageInfo.getStoreKey());
        }
        ByteBuffer blobEncryptionKey = null;
        if (headerFormat.hasEncryptionKeyRecord()) {
            blobEncryptionKey = deserializeBlobEncryptionKey(inputStream);
        }
        BlobProperties oldProperties = deserializeBlobProperties(inputStream);
        ByteBuffer userMetaData = deserializeUserMetadata(inputStream);
        BlobData blobData = deserializeBlob(inputStream);
        ByteBuf blobDataBytes = blobData.content();
        long blobPropertiesSize = oldProperties.getBlobSize();
        // will be rewritten with transformed IDs
        if (blobData.getBlobType().equals(BlobType.MetadataBlob)) {
            ByteBuffer serializedMetadataContent = blobDataBytes.nioBuffer();
            CompositeBlobInfo compositeBlobInfo = MetadataContentSerDe.deserializeMetadataContentRecord(serializedMetadataContent, storeKeyFactory);
            Map<StoreKey, StoreKey> convertedKeys = storeKeyConverter.convert(compositeBlobInfo.getKeys());
            List<StoreKey> newKeys = new ArrayList<>();
            boolean isOldMetadataKeyDifferentFromNew = !oldMessageInfo.getStoreKey().getID().equals(newKey.getID());
            short metadataAccountId = newBlobId.getAccountId();
            short metadataContainerId = newBlobId.getContainerId();
            for (StoreKey oldDataChunkKey : compositeBlobInfo.getKeys()) {
                StoreKey newDataChunkKey = convertedKeys.get(oldDataChunkKey);
                if (newDataChunkKey == null) {
                    throw new IllegalStateException("Found metadata chunk with a deprecated data chunk. " + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
                }
                if (isOldMetadataKeyDifferentFromNew && newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
                    throw new IllegalStateException("Found changed metadata chunk with an unchanged data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID());
                }
                if (!isOldMetadataKeyDifferentFromNew && !newDataChunkKey.getID().equals(oldDataChunkKey.getID())) {
                    throw new IllegalStateException("Found unchanged metadata chunk with a changed data chunk" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkKey.getID());
                }
                BlobId newDataChunkBlobId = (BlobId) newDataChunkKey;
                if (newDataChunkBlobId.getAccountId() != metadataAccountId || newDataChunkBlobId.getContainerId() != metadataContainerId) {
                    throw new IllegalStateException("Found changed metadata chunk with a datachunk with a different account/container" + " Old MetadataID: " + oldMessageInfo.getStoreKey().getID() + " New MetadataID: " + newKey.getID() + " Old Datachunk ID: " + oldDataChunkKey.getID() + " New Datachunk ID: " + newDataChunkBlobId.getID() + " Metadata AccountId: " + metadataAccountId + " Metadata ContainerId: " + metadataContainerId + " Datachunk AccountId: " + newDataChunkBlobId.getAccountId() + " Datachunk ContainerId: " + newDataChunkBlobId.getContainerId());
                }
                newKeys.add(newDataChunkKey);
            }
            ByteBuffer metadataContent;
            if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V2) {
                metadataContent = MetadataContentSerDe.serializeMetadataContentV2(compositeBlobInfo.getChunkSize(), compositeBlobInfo.getTotalSize(), newKeys);
            } else if (compositeBlobInfo.getMetadataContentVersion() == Metadata_Content_Version_V3) {
                List<Pair<StoreKey, Long>> keyAndSizeList = new ArrayList<>();
                List<CompositeBlobInfo.ChunkMetadata> chunkMetadataList = compositeBlobInfo.getChunkMetadataList();
                for (int i = 0; i < newKeys.size(); i++) {
                    keyAndSizeList.add(new Pair<>(newKeys.get(i), chunkMetadataList.get(i).getSize()));
                }
                metadataContent = MetadataContentSerDe.serializeMetadataContentV3(compositeBlobInfo.getTotalSize(), keyAndSizeList);
            } else {
                throw new IllegalStateException("Unexpected metadata content version from composite blob: " + compositeBlobInfo.getMetadataContentVersion());
            }
            blobPropertiesSize = compositeBlobInfo.getTotalSize();
            metadataContent.flip();
            blobDataBytes.release();
            blobDataBytes = Unpooled.wrappedBuffer(metadataContent);
            blobData = new BlobData(blobData.getBlobType(), metadataContent.remaining(), blobDataBytes);
        }
        BlobProperties newProperties = new BlobProperties(blobPropertiesSize, oldProperties.getServiceId(), oldProperties.getOwnerId(), oldProperties.getContentType(), oldProperties.isPrivate(), oldProperties.getTimeToLiveInSeconds(), oldProperties.getCreationTimeInMs(), newBlobId.getAccountId(), newBlobId.getContainerId(), oldProperties.isEncrypted(), oldProperties.getExternalAssetTag(), oldProperties.getContentEncoding(), oldProperties.getFilename());
        // BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
        // which is still using java ByteBuffer. So, no need to consider releasing stuff.
        // @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
        PutMessageFormatInputStream putMessageFormatInputStream = new PutMessageFormatInputStream(newKey, blobEncryptionKey, newProperties, userMetaData, new ByteBufInputStream(blobDataBytes, true), blobData.getSize(), blobData.getBlobType(), oldMessageInfo.getLifeVersion());
        // Reuse the original CRC if present in the oldMessageInfo. This is important to ensure that messages that are
        // received via replication are sent to the store with proper CRCs (which the store needs to detect duplicate
        // messages). As an additional guard, here the original CRC is only reused if the key's ID in string form is the
        // same after conversion.
        Long originalCrc = oldMessageInfo.getStoreKey().getID().equals(newKey.getID()) ? oldMessageInfo.getCrc() : null;
        MessageInfo info = new MessageInfo.Builder(newKey, putMessageFormatInputStream.getSize(), newProperties.getAccountId(), newProperties.getContainerId(), oldMessageInfo.getOperationTimeMs()).isTtlUpdated(oldMessageInfo.isTtlUpdated()).expirationTimeInMs(oldMessageInfo.getExpirationTimeInMs()).crc(originalCrc).lifeVersion(oldMessageInfo.getLifeVersion()).build();
        return new Message(info, putMessageFormatInputStream);
    } else {
        throw new IllegalArgumentException("Only 'put' records are valid");
    }
}
Also used : Message(com.github.ambry.store.Message) CompositeBlobInfo(com.github.ambry.messageformat.CompositeBlobInfo) ArrayList(java.util.ArrayList) PutMessageFormatInputStream(com.github.ambry.messageformat.PutMessageFormatInputStream) ByteBuf(io.netty.buffer.ByteBuf) BlobData(com.github.ambry.messageformat.BlobData) ArrayList(java.util.ArrayList) List(java.util.List) Pair(com.github.ambry.utils.Pair) ByteBufInputStream(io.netty.buffer.ByteBufInputStream) DataInputStream(java.io.DataInputStream) ByteBuffer(java.nio.ByteBuffer) StoreKey(com.github.ambry.store.StoreKey) MessageInfo(com.github.ambry.store.MessageInfo) BlobProperties(com.github.ambry.messageformat.BlobProperties) BlobId(com.github.ambry.commons.BlobId)

Example 23 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class AmbryQuotaManager method parseQuotaEnforcerAndSourceInfo.

/**
 * Parse the json config for {@link QuotaEnforcer} and {@link QuotaSource} factory pair and return them in a {@link Map}.
 * @param quotaEnforcerSourceJson json config string.
 * @return List of {@link QuotaEnforcer} and {@link QuotaSource} factory {@link Pair}s.
 */
private List<Pair<String, String>> parseQuotaEnforcerAndSourceInfo(String quotaEnforcerSourceJson) {
    if (quotaEnforcerSourceJson.isEmpty()) {
        return Collections.emptyList();
    }
    List<Pair<String, String>> quotaEnforcerSourcePairs = new ArrayList<>();
    JSONObject root = new JSONObject(quotaEnforcerSourceJson);
    JSONArray all = root.getJSONArray(QuotaConfig.QUOTA_ENFORCER_SOURCE_PAIR_INFO_STR);
    for (int i = 0; i < all.length(); i++) {
        JSONObject entry = all.getJSONObject(i);
        String enforcer = entry.getString(QuotaConfig.ENFORCER_STR);
        String source = entry.getString(QuotaConfig.SOURCE_STR);
        quotaEnforcerSourcePairs.add(new Pair<>(enforcer, source));
    }
    return quotaEnforcerSourcePairs;
}
Also used : JSONObject(org.json.JSONObject) ArrayList(java.util.ArrayList) JSONArray(org.json.JSONArray) Pair(com.github.ambry.utils.Pair)

Example 24 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class StorageQuotaEnforcer method charge.

/**
 * Add given {@code usage} to the current storage usage of account/container carried in {@code restRequest} even
 * if the result exceeds quota for the target account/container. If there is no account and container found in
 * {@code restRequest}, then this is a no-op. If there is no quota found for the account/container, then this is
 * a no-op. A {@link Pair} whose first element is quota and second element is the storage usage after charge.
 * @param restRequest the {@link RestRequest} that carries account and container in the header.
 * @param usage the usage to charge
 * @return A {@link Pair} whose first element is quota and second element is the storage usage after charge.
 */
Pair<Long, Long> charge(RestRequest restRequest, long usage) {
    long quotaValue = -1L;
    long usageAfterCharge = 0L;
    try {
        Account account = RestUtils.getAccountFromArgs(restRequest.getArgs());
        Container container = RestUtils.getContainerFromArgs(restRequest.getArgs());
        QuotaResource quotaResource = account.getQuotaResourceType() == QuotaResourceType.ACCOUNT ? QuotaResource.fromAccount(account) : QuotaResource.fromContainer(container);
        quotaValue = getQuotaValueForResource(quotaResource);
        if (quotaValue != -1L) {
            AtomicLong existingUsage = new AtomicLong();
            storageUsages.compute(quotaResource, (k, v) -> {
                existingUsage.set(v == null ? 0 : v);
                if (v == null) {
                    return usage;
                }
                return v + usage;
            });
            usageAfterCharge = existingUsage.addAndGet(usage);
        }
    } catch (Exception e) {
        logger.error("Failed to charge for RestRequest {}", restRequest, e);
    }
    return new Pair<>(quotaValue, usageAfterCharge);
}
Also used : Account(com.github.ambry.account.Account) Container(com.github.ambry.account.Container) AtomicLong(java.util.concurrent.atomic.AtomicLong) QuotaResource(com.github.ambry.quota.QuotaResource) QuotaException(com.github.ambry.quota.QuotaException) Pair(com.github.ambry.utils.Pair)

Example 25 with Pair

use of com.github.ambry.utils.Pair in project ambry by linkedin.

the class ReplicationTestHelper method getLocalAndRemoteHosts.

// static methods to get local and remote hosts, add put/ttlupdate/delete/undelete messages to partitions
/**
 * Selects a local and remote host for replication tests that need it.
 * @param clusterMap the {@link MockClusterMap} to use.
 * @return a {@link Pair} with the first entry being the "local" host and the second, the "remote" host.
 */
public static Pair<MockHost, MockHost> getLocalAndRemoteHosts(MockClusterMap clusterMap) {
    // to make sure we select hosts with the SPECIAL_PARTITION_CLASS, pick hosts from the replicas of that partition
    PartitionId specialPartitionId = clusterMap.getWritablePartitionIds(MockClusterMap.SPECIAL_PARTITION_CLASS).get(0);
    // these hosts have replicas of the "special" partition and all the other partitions.
    MockHost localHost = new MockHost(specialPartitionId.getReplicaIds().get(0).getDataNodeId(), clusterMap);
    MockHost remoteHost = new MockHost(specialPartitionId.getReplicaIds().get(1).getDataNodeId(), clusterMap);
    return new Pair<>(localHost, remoteHost);
}
Also used : MockPartitionId(com.github.ambry.clustermap.MockPartitionId) PartitionId(com.github.ambry.clustermap.PartitionId) Pair(com.github.ambry.utils.Pair)

Aggregations

Pair (com.github.ambry.utils.Pair)64 ArrayList (java.util.ArrayList)29 HashMap (java.util.HashMap)28 Map (java.util.Map)28 Test (org.junit.Test)20 IOException (java.io.IOException)15 MetricRegistry (com.codahale.metrics.MetricRegistry)14 List (java.util.List)14 ByteBuffer (java.nio.ByteBuffer)13 Collections (java.util.Collections)13 File (java.io.File)12 Assert (org.junit.Assert)12 VerifiableProperties (com.github.ambry.config.VerifiableProperties)11 Utils (com.github.ambry.utils.Utils)10 HashSet (java.util.HashSet)10 Properties (java.util.Properties)10 Container (com.github.ambry.account.Container)9 TestUtils (com.github.ambry.utils.TestUtils)9 Arrays (java.util.Arrays)9 Set (java.util.Set)9