use of io.netty.buffer.ByteBufInputStream in project SpongeCommon by SpongePowered.
the class SpongeFavicon method decode.
private static BufferedImage decode(String encoded) throws IOException {
checkArgument(encoded.startsWith(SpongeFavicon.FAVICON_PREFIX), "Unknown favicon format");
ByteBuf base64 = Unpooled.copiedBuffer(encoded.substring(SpongeFavicon.FAVICON_PREFIX.length()), Charsets.UTF_8);
try {
ByteBuf buf = Base64.decode(base64);
try {
BufferedImage result = ImageIO.read(new ByteBufInputStream(buf));
checkState(result.getWidth() == 64, "favicon must be 64 pixels wide");
checkState(result.getHeight() == 64, "favicon must be 64 pixels high");
return result;
} finally {
buf.release();
}
} finally {
base64.release();
}
}
use of io.netty.buffer.ByteBufInputStream in project ambry by linkedin.
the class ValidatingKeyConvertingTransformer method transform.
@Override
public TransformationOutput transform(Message message) {
ByteBuffer encryptionKey;
BlobProperties props;
ByteBuffer metadata;
BlobData blobData;
MessageInfo msgInfo = message.getMessageInfo();
InputStream msgStream = message.getStream();
TransformationOutput transformationOutput;
try {
// Read header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
msgStream.read(headerVersion.array());
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.Data_Corrupt);
}
int headerSize = getHeaderSizeForVersion(version);
ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize);
headerBuffer.put(headerVersion.array());
msgStream.read(headerBuffer.array(), Version_Field_Size_In_Bytes, headerSize - Version_Field_Size_In_Bytes);
headerBuffer.rewind();
MessageHeader_Format header = getMessageHeader(version, headerBuffer);
header.verifyHeader();
StoreKey originalKey = storeKeyFactory.getStoreKey(new DataInputStream(msgStream));
if (header.isPutRecord()) {
encryptionKey = header.hasEncryptionKeyRecord() ? deserializeBlobEncryptionKey(msgStream) : null;
props = deserializeBlobProperties(msgStream);
metadata = deserializeUserMetadata(msgStream);
blobData = deserializeBlob(msgStream);
} else {
throw new IllegalArgumentException("Message cannot be a deleted record ");
}
if (msgInfo.getStoreKey().equals(originalKey)) {
StoreKey newKey = storeKeyConverter.convert(Collections.singletonList(originalKey)).get(originalKey);
if (newKey == null) {
System.out.println("No mapping for the given key, transformed message will be null");
transformationOutput = new TransformationOutput((Message) null);
} else {
MessageInfo transformedMsgInfo;
PutMessageFormatInputStream transformedStream = new PutMessageFormatInputStream(newKey, encryptionKey, props, metadata, new ByteBufInputStream(blobData.content(), true), blobData.getSize(), blobData.getBlobType(), msgInfo.getLifeVersion());
transformedMsgInfo = new MessageInfo.Builder(msgInfo).storeKey(newKey).size(transformedStream.getSize()).isUndeleted(false).build();
transformationOutput = new TransformationOutput(new Message(transformedMsgInfo, transformedStream));
}
} else {
throw new IllegalStateException("StoreKey in log " + originalKey + " failed to match store key from Index " + msgInfo.getStoreKey());
}
} catch (Exception e) {
transformationOutput = new TransformationOutput(e);
}
return transformationOutput;
}
use of io.netty.buffer.ByteBufInputStream in project ambry by linkedin.
the class ValidatingTransformer method transform.
@Override
public TransformationOutput transform(Message message) {
ByteBuffer encryptionKey;
BlobProperties props;
ByteBuffer metadata;
BlobData blobData;
MessageInfo msgInfo = message.getMessageInfo();
InputStream msgStream = message.getStream();
TransformationOutput transformationOutput = null;
try {
// Read header
ByteBuffer headerVersion = ByteBuffer.allocate(Version_Field_Size_In_Bytes);
msgStream.read(headerVersion.array());
short version = headerVersion.getShort();
if (!isValidHeaderVersion(version)) {
throw new MessageFormatException("Header version not supported " + version, MessageFormatErrorCodes.Data_Corrupt);
}
int headerSize = getHeaderSizeForVersion(version);
ByteBuffer headerBuffer = ByteBuffer.allocate(headerSize);
headerBuffer.put(headerVersion.array());
msgStream.read(headerBuffer.array(), Version_Field_Size_In_Bytes, headerSize - Version_Field_Size_In_Bytes);
headerBuffer.rewind();
MessageHeader_Format header = getMessageHeader(version, headerBuffer);
header.verifyHeader();
StoreKey keyInStream = storeKeyFactory.getStoreKey(new DataInputStream(msgStream));
if (header.isPutRecord()) {
if (header.hasLifeVersion() && header.getLifeVersion() != msgInfo.getLifeVersion()) {
logger.trace("LifeVersion in stream: {} failed to match lifeVersion from Index: {} for key {}", header.getLifeVersion(), msgInfo.getLifeVersion(), keyInStream);
}
encryptionKey = header.hasEncryptionKeyRecord() ? deserializeBlobEncryptionKey(msgStream) : null;
props = deserializeBlobProperties(msgStream);
metadata = deserializeUserMetadata(msgStream);
blobData = deserializeBlob(msgStream);
} else {
throw new IllegalStateException("Message cannot be anything rather than put record ");
}
if (msgInfo.getStoreKey().equals(keyInStream)) {
// BlobIDTransformer only exists on ambry-server and replication between servers is relying on blocking channel
// which is still using java ByteBuffer. So, no need to consider releasing stuff.
// @todo, when netty Bytebuf is adopted for blocking channel on ambry-server, remember to release this ByteBuf.
PutMessageFormatInputStream transformedStream = new PutMessageFormatInputStream(keyInStream, encryptionKey, props, metadata, new ByteBufInputStream(blobData.content(), true), blobData.getSize(), blobData.getBlobType(), msgInfo.getLifeVersion());
MessageInfo transformedMsgInfo = new MessageInfo.Builder(msgInfo).size(transformedStream.getSize()).isDeleted(false).isUndeleted(false).build();
transformationOutput = new TransformationOutput(new Message(transformedMsgInfo, transformedStream));
} else {
throw new IllegalStateException("StoreKey in stream: " + keyInStream + " failed to match store key from Index: " + msgInfo.getStoreKey());
}
} catch (Exception e) {
transformationOutput = new TransformationOutput(e);
}
return transformationOutput;
}
use of io.netty.buffer.ByteBufInputStream in project ambry by linkedin.
the class AmbryRequests method handlePutRequest.
@Override
public void handlePutRequest(NetworkRequest request) throws IOException, InterruptedException {
PutRequest receivedRequest;
if (request instanceof LocalChannelRequest) {
// This is a case where handlePutRequest is called when frontends are writing to Azure. In this case, this method
// is called by request handler threads running within the frontend router itself. So, the request can be directly
// referenced as java objects without any need for deserialization.
PutRequest sentRequest = (PutRequest) ((LocalChannelRequest) request).getRequestInfo().getRequest();
// However, we will create a new PutRequest object to represent the received Put request since the blob content
// 'buffer' in PutRequest is accessed as 'stream' while writing to Store. Also, crc value for this request
// would be null since it is only calculated (on the fly) when sending the request to network. It might be okay to
// use null crc here since the scenario for which we are using crc (i.e. possibility of collisions due to fast
// replication) as described in this PR https://github.com/linkedin/ambry/pull/549 might not be applicable when
// frontends are talking to Azure.
receivedRequest = new PutRequest(sentRequest.getCorrelationId(), sentRequest.getClientId(), sentRequest.getBlobId(), sentRequest.getBlobProperties(), sentRequest.getUsermetadata(), sentRequest.getBlobSize(), sentRequest.getBlobType(), sentRequest.getBlobEncryptionKey(), new ByteBufInputStream(sentRequest.getBlob()), null);
} else {
InputStream is = request.getInputStream();
DataInputStream dis = is instanceof DataInputStream ? (DataInputStream) is : new DataInputStream(is);
receivedRequest = PutRequest.readFrom(dis, clusterMap);
}
long requestQueueTime = SystemTime.getInstance().milliseconds() - request.getStartTimeInMs();
long totalTimeSpent = requestQueueTime;
metrics.putBlobRequestQueueTimeInMs.update(requestQueueTime);
metrics.putBlobRequestRate.mark();
long startTime = SystemTime.getInstance().milliseconds();
PutResponse response = null;
try {
ServerErrorCode error = validateRequest(receivedRequest.getBlobId().getPartition(), RequestOrResponseType.PutRequest, false);
if (error != ServerErrorCode.No_Error) {
logger.error("Validating put request failed with error {} for request {}", error, receivedRequest);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), error);
} else {
MessageFormatInputStream stream = new PutMessageFormatInputStream(receivedRequest.getBlobId(), receivedRequest.getBlobEncryptionKey(), receivedRequest.getBlobProperties(), receivedRequest.getUsermetadata(), receivedRequest.getBlobStream(), receivedRequest.getBlobSize(), receivedRequest.getBlobType());
BlobProperties properties = receivedRequest.getBlobProperties();
long expirationTime = Utils.addSecondsToEpochTime(receivedRequest.getBlobProperties().getCreationTimeInMs(), properties.getTimeToLiveInSeconds());
MessageInfo info = new MessageInfo.Builder(receivedRequest.getBlobId(), stream.getSize(), properties.getAccountId(), properties.getContainerId(), properties.getCreationTimeInMs()).expirationTimeInMs(expirationTime).crc(receivedRequest.getCrc()).lifeVersion(MessageInfo.LIFE_VERSION_FROM_FRONTEND).build();
ArrayList<MessageInfo> infoList = new ArrayList<>();
infoList.add(info);
MessageFormatWriteSet writeset = new MessageFormatWriteSet(stream, infoList, false);
Store storeToPut = storeManager.getStore(receivedRequest.getBlobId().getPartition());
storeToPut.put(writeset);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.No_Error);
metrics.blobSizeInBytes.update(receivedRequest.getBlobSize());
metrics.blobUserMetadataSizeInBytes.update(receivedRequest.getUsermetadata().limit());
if (notification != null) {
notification.onBlobReplicaCreated(currentNode.getHostname(), currentNode.getPort(), receivedRequest.getBlobId().getID(), BlobReplicaSourceType.PRIMARY);
}
}
} catch (StoreException e) {
logger.error("Store exception on a put with error code {} for request {}", e.getErrorCode(), receivedRequest, e);
if (e.getErrorCode() == StoreErrorCodes.Already_Exist) {
metrics.idAlreadyExistError.inc();
} else if (e.getErrorCode() == StoreErrorCodes.IOError) {
metrics.storeIOError.inc();
} else {
metrics.unExpectedStorePutError.inc();
}
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ErrorMapping.getStoreErrorMapping(e.getErrorCode()));
} catch (Exception e) {
logger.error("Unknown exception on a put for request {}", receivedRequest, e);
response = new PutResponse(receivedRequest.getCorrelationId(), receivedRequest.getClientId(), ServerErrorCode.Unknown_Error);
} finally {
long processingTime = SystemTime.getInstance().milliseconds() - startTime;
totalTimeSpent += processingTime;
publicAccessLogger.info("{} {} processingTime {}", receivedRequest, response, processingTime);
metrics.putBlobProcessingTimeInMs.update(processingTime);
metrics.updatePutBlobProcessingTimeBySize(receivedRequest.getBlobSize(), processingTime);
}
sendPutResponse(requestResponseChannel, response, request, metrics.putBlobResponseQueueTimeInMs, metrics.putBlobSendTimeInMs, metrics.putBlobTotalTimeInMs, totalTimeSpent, receivedRequest.getBlobSize(), metrics);
}
use of io.netty.buffer.ByteBufInputStream in project ambry by linkedin.
the class RouterUtils method mapToReceivedResponse.
/**
* This method is applicable when we are processing responses received from Azure APIs in Frontend via {@link LocalNetworkClient}.
* The responses received from Azure are constructed as java objects such as {@link GetResponse}, {@link PutResponse} in
* {@link com.github.ambry.protocol.AmbryRequests} class methods from {@link com.github.ambry.protocol.RequestHandler}
* threads running within the Frontend itself. The content in these responses is available as buffer but we access it
* as stream in the Frontend router. Hence, we create new Response objects by having a stream enclose the buffer.
*
* At the moment, only {@link GetResponse} carries content and needs to be constructed again as below. Other responses
* like {@link PutResponse}, {@link DeleteResponse}, etc which don't carry any content don't need to be reconstructed
* and can be referenced as they are.
* @param sentResponse {@link Response} object constructed at sender side.
* @return {@link Response} object constructed at receiver side.
*/
public static Response mapToReceivedResponse(Response sentResponse) {
Response receivedResponse;
if (sentResponse instanceof GetResponse) {
GetResponse getResponse = (GetResponse) sentResponse;
receivedResponse = new GetResponse(getResponse.getCorrelationId(), getResponse.getClientId(), getResponse.getPartitionResponseInfoList(), new ByteBufInputStream(getResponse.getDataToSend().content()), getResponse.getError());
} else {
receivedResponse = sentResponse;
}
return receivedResponse;
}
Aggregations