use of com.github.ambry.utils.ByteBufferChannel in project ambry by linkedin.
the class StoreCopier method copy.
/**
* Copies data starting from {@code startToken} until all the data is copied.
* @param startToken the {@link FindToken} to start copying from. It is expected that start token does not cause
* the copier to attempt to copy blobs that have already been copied. If that happens, the boolean
* in the return value will be {@code true}.
* @return a {@link Pair} of the {@link FindToken} until which data has been copied and a {@link Boolean} indicating
* whether the source had problems that were skipped over - like duplicates ({@code true} indicates that there were).
* @throws Exception if there is any exception during processing
*/
public Pair<FindToken, Boolean> copy(FindToken startToken) throws Exception {
boolean sourceHasProblems = false;
FindToken lastToken;
FindToken token = startToken;
do {
lastToken = token;
FindInfo findInfo = src.findEntriesSince(lastToken, fetchSizeInBytes, null, null);
List<MessageInfo> messageInfos = findInfo.getMessageEntries();
for (Transformer transformer : transformers) {
transformer.warmup(messageInfos);
}
for (MessageInfo messageInfo : messageInfos) {
logger.trace("Processing {} - isDeleted: {}, isExpired {}", messageInfo.getStoreKey(), messageInfo.isDeleted(), messageInfo.isExpired());
if (!messageInfo.isExpired() && !messageInfo.isDeleted()) {
if (tgt.findMissingKeys(Collections.singletonList(messageInfo.getStoreKey())).size() == 1) {
StoreInfo storeInfo = src.get(Collections.singletonList(messageInfo.getStoreKey()), EnumSet.allOf(StoreGetOptions.class));
MessageReadSet readSet = storeInfo.getMessageReadSet();
if (readSet.sizeInBytes(0) > Integer.MAX_VALUE) {
throw new IllegalStateException("Cannot copy blobs whose size > Integer.MAX_VALUE");
}
int size = (int) readSet.sizeInBytes(0);
byte[] buf = new byte[size];
readSet.writeTo(0, new ByteBufferChannel(ByteBuffer.wrap(buf)), 0, size);
Message message = new Message(storeInfo.getMessageReadSetInfo().get(0), new ByteArrayInputStream(buf));
for (Transformer transformer : transformers) {
TransformationOutput tfmOutput = transformer.transform(message);
if (tfmOutput.getException() != null) {
throw tfmOutput.getException();
} else {
message = tfmOutput.getMsg();
}
if (message == null) {
break;
}
}
if (message == null) {
logger.trace("Dropping {} because the transformers did not return a message", messageInfo.getStoreKey());
continue;
}
MessageFormatWriteSet writeSet = new MessageFormatWriteSet(message.getStream(), Collections.singletonList(message.getMessageInfo()), false);
tgt.put(writeSet);
MessageInfo tgtMsgInfo = message.getMessageInfo();
if (tgtMsgInfo.isTtlUpdated()) {
MessageInfo updateMsgInfo = new MessageInfo(tgtMsgInfo.getStoreKey(), 0, false, true, tgtMsgInfo.getExpirationTimeInMs(), tgtMsgInfo.getAccountId(), tgtMsgInfo.getContainerId(), tgtMsgInfo.getOperationTimeMs());
tgt.updateTtl(Collections.singletonList(updateMsgInfo));
}
logger.trace("Copied {} as {}", messageInfo.getStoreKey(), tgtMsgInfo.getStoreKey());
} else if (!messageInfo.isTtlUpdated()) {
logger.warn("Found a duplicate entry for {} while copying data", messageInfo.getStoreKey());
sourceHasProblems = true;
}
}
}
token = findInfo.getFindToken();
double percentBytesRead = src.isEmpty() ? 100.0 : token.getBytesRead() * 100.0 / src.getSizeInBytes();
logger.info("[{}] [{}] {}% copied", Thread.currentThread().getName(), storeId, df.format(percentBytesRead));
} while (!token.equals(lastToken));
return new Pair<>(token, sourceHasProblems);
}
use of com.github.ambry.utils.ByteBufferChannel in project ambry by linkedin.
the class PutOperationTest method testSendIncomplete.
/**
* Ensure that if any of the requests associated with the buffer of a PutChunk is not completely read out even
* after the associated chunk is complete, the buffer is not reused even though the PutChunk is reused.
*/
@Test
public void testSendIncomplete() throws Exception {
int numChunks = routerConfig.routerMaxInMemPutChunks + 1;
BlobProperties blobProperties = new BlobProperties(-1, "serviceId", "memberId", "contentType", false, Utils.Infinite_Time, Utils.getRandomShort(TestUtils.RANDOM), Utils.getRandomShort(TestUtils.RANDOM), false, null, null, null);
byte[] userMetadata = new byte[10];
byte[] content = new byte[chunkSize * numChunks];
random.nextBytes(content);
ReadableStreamChannel channel = new ByteBufferReadableStreamChannel(ByteBuffer.wrap(content));
FutureResult<String> future = new FutureResult<>();
MockNetworkClient mockNetworkClient = new MockNetworkClient();
PutOperation op = PutOperation.forUpload(routerConfig, routerMetrics, mockClusterMap, new LoggingNotificationSystem(), new InMemAccountService(true, false), userMetadata, channel, PutBlobOptions.DEFAULT, future, null, new RouterCallback(mockNetworkClient, new ArrayList<>()), null, null, null, null, time, blobProperties, MockClusterMap.DEFAULT_PARTITION_CLASS, quotaChargeCallback);
op.startOperation();
List<RequestInfo> requestInfos = new ArrayList<>();
requestRegistrationCallback.setRequestsToSend(requestInfos);
// Since this channel is in memory, one call to fill chunks would end up filling the maximum number of PutChunks.
op.fillChunks();
Assert.assertTrue("ReadyForPollCallback should have been invoked as chunks were fully filled", mockNetworkClient.getAndClearWokenUpStatus());
// A poll should therefore return requestParallelism number of requests from each chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(routerConfig.routerMaxInMemPutChunks * requestParallelism, requestInfos.size());
// There are routerMaxInMemPutChunks + 1 data chunks for this blob (and a metadata chunk).
// Once the first chunk is completely sent out, the first PutChunk will be reused. What the test verifies is that
// the buffer of the first PutChunk does not get reused. It does this as follows:
// For the first chunk,
// 1. use first request to succeed the chunk (the successTarget is set to 1).
// 2. read and store from the second for comparing later.
// 3. read from the third after the first PutChunk gets reused and ensure that the data from the third is the
// same as from what was saved off from the second. This means that the buffer was not reused by the first
// PutChunk.
// 1.
ResponseInfo responseInfo = getResponseInfo(requestInfos.get(0));
PutResponse putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(0).getRequest().release();
responseInfo.release();
// 2.
PutRequest putRequest = (PutRequest) requestInfos.get(1).getRequest();
ByteBuffer buf = ByteBuffer.allocate((int) putRequest.sizeInBytes());
ByteBufferChannel bufChannel = new ByteBufferChannel(buf);
// read it out (which also marks this request as complete).
putRequest.writeTo(bufChannel);
putRequest.release();
byte[] expectedRequestContent = buf.array();
// 3.
// first save the third request
PutRequest savedRequest = (PutRequest) requestInfos.get(2).getRequest();
// succeed all the other requests.
for (int i = 3; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
// fill the first PutChunk with the last chunk.
op.fillChunks();
// Verify that the last chunk was filled.
requestInfos.clear();
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
// Verify that the buffer of the third request is not affected.
buf = ByteBuffer.allocate((int) savedRequest.sizeInBytes());
bufChannel = new ByteBufferChannel(buf);
savedRequest.writeTo(bufChannel);
savedRequest.release();
byte[] savedRequestContent = buf.array();
// reset the correlation id as they will be different between the two requests.
resetCorrelationId(expectedRequestContent);
resetCorrelationId(savedRequestContent);
Assert.assertArrayEquals("Underlying buffer should not have be reused", expectedRequestContent, savedRequestContent);
// internal to the chunk (though this can be verified via coverage).
for (int i = 0; i < requestInfos.size(); i++) {
responseInfo = getResponseInfo(requestInfos.get(i));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
requestInfos.get(i).getRequest().release();
responseInfo.release();
}
requestInfos.clear();
// this should return requests for the metadata chunk
op.poll(requestRegistrationCallback);
Assert.assertEquals(1 * requestParallelism, requestInfos.size());
Assert.assertFalse("Operation should not be complete yet", op.isOperationComplete());
// once the metadata request succeeds, it should complete the operation.
responseInfo = getResponseInfo(requestInfos.get(0));
putResponse = responseInfo.getError() == null ? PutResponse.readFrom(new NettyByteBufDataInputStream(responseInfo.content())) : null;
op.handleResponse(responseInfo, putResponse);
responseInfo.release();
requestInfos.forEach(info -> info.getRequest().release());
Assert.assertTrue("Operation should be complete at this time", op.isOperationComplete());
}
use of com.github.ambry.utils.ByteBufferChannel in project ambry by linkedin.
the class StoredBlob method send.
/**
* Take in a request in the form of {@link Send} and return a response in the form of a
* {@link BoundedNettyByteBufReceive}.
* @param send the request.
* @return the response.
* @throws IOException if there was an error in interpreting the request.
*/
public BoundedNettyByteBufReceive send(Send send) throws IOException {
if (!shouldRespond) {
return null;
}
ServerErrorCode serverError = hardError != null ? hardError : serverErrors.size() > 0 ? serverErrors.poll() : ServerErrorCode.No_Error;
RequestOrResponseType type = ((RequestOrResponse) send).getRequestType();
RequestOrResponse response;
requestCounts.computeIfAbsent(type, k -> new LongAdder()).increment();
switch(type) {
case PutRequest:
response = makePutResponse((PutRequest) send, serverError);
break;
case GetRequest:
response = makeGetResponse((GetRequest) send, serverError);
break;
case DeleteRequest:
response = makeDeleteResponse((DeleteRequest) send, serverError);
break;
case TtlUpdateRequest:
response = makeTtlUpdateResponse((TtlUpdateRequest) send, serverError);
break;
case UndeleteRequest:
response = makeUndeleteResponse((UndeleteRequest) send, serverError);
break;
default:
throw new IOException("Unknown request type received");
}
ByteBufferChannel channel = new ByteBufferChannel(ByteBuffer.allocate((int) response.sizeInBytes()));
response.writeTo(channel);
response.release();
ByteBuffer payload = channel.getBuffer();
payload.flip();
BoundedNettyByteBufReceive receive = new BoundedNettyByteBufReceive(100 * 1024 * 1024);
receive.readFrom(Channels.newChannel(new ByteBufferInputStream(payload)));
return receive;
}
use of com.github.ambry.utils.ByteBufferChannel in project ambry by linkedin.
the class MockSelector method poll.
/**
* Mocks sending and polling. Creates a response for every send to be returned after the next poll,
* with the correlation id in the Send, unless beBad state is on. If beBad is on,
* all sends will result in disconnections.
* @param timeoutMs Ignored.
* @param sends The list of new sends.
*/
@Override
public void poll(long timeoutMs, List<NetworkSend> sends) throws IOException {
if (state == MockSelectorState.ThrowExceptionOnPoll) {
throw new IOException("Mock exception on poll");
}
disconnected = new ArrayList<>();
if (state == MockSelectorState.FailConnectionInitiationOnPoll) {
disconnected.addAll(nextConnected);
connected = new ArrayList<>();
nextConnected = new ArrayList<>();
} else if (state != MockSelectorState.IdlePoll) {
connected = nextConnected;
nextConnected = new ArrayList<>();
}
disconnected.addAll(delayedFailPassedList);
delayedFailPassedList.clear();
delayedFailPassedList.addAll(delayedFailFreshList);
delayedFailFreshList.clear();
disconnected.addAll(closedConnections);
this.sends = sends;
if (sends != null) {
for (NetworkSend send : sends) {
MockSend mockSend = (MockSend) send.getPayload();
if (state == MockSelectorState.DisconnectOnSend) {
disconnected.add(send.getConnectionId());
} else if (!closedConnections.contains(send.getConnectionId())) {
receives.add(new NetworkReceive(send.getConnectionId(), new MockBoundedNettyByteBufReceive(mockSend.getCorrelationId()), new MockTime()));
mockSend.writeTo(new ByteBufferChannel(ByteBuffer.allocate(MockSend.SEND_SIZE)));
if (mockSend.isSendComplete()) {
mockSend.release();
}
}
}
}
closedConnections.clear();
}
use of com.github.ambry.utils.ByteBufferChannel in project ambry by linkedin.
the class RequestResponseTest method serAndPrepForRead.
/**
* Serializes a {@link RequestOrResponseType} and prepares it for reading.
* @param requestOrResponse the {@link RequestOrResponseType} to serialize.
* @param channelSize the amount of data that the output channel should read in one iteration. Setting this to -1
* will set the size of the output channel buffer to 1/3rd the size of {@code requestOrResponse}
* @param isRequest {@code true} if {@code requestOrResponse} is a request. {@code false} otherwise.
* @return the serialized form of {@code requestOrResponse} as a {@link DataInputStream}.
* @throws IOException
*/
private DataInputStream serAndPrepForRead(RequestOrResponse requestOrResponse, int channelSize, boolean isRequest) throws IOException {
DataInputStream stream;
if (useByteBufContent && requestOrResponse.content() != null) {
stream = new NettyByteBufDataInputStream(requestOrResponse.content());
} else {
if (channelSize == -1) {
channelSize = (int) (requestOrResponse.sizeInBytes() / 3);
}
ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
int expectedWriteToCount = (int) ((requestOrResponse.sizeInBytes() + channelSize - 1) / channelSize);
int actualWriteToCount = 0;
do {
ByteBufferChannel channel = new ByteBufferChannel(ByteBuffer.allocate(channelSize));
requestOrResponse.writeTo(channel);
ByteBuffer underlyingBuf = channel.getBuffer();
underlyingBuf.flip();
outputStream.write(underlyingBuf.array(), underlyingBuf.arrayOffset(), underlyingBuf.remaining());
actualWriteToCount++;
} while (!requestOrResponse.isSendComplete());
Assert.assertEquals("Should not have written anything", 0, requestOrResponse.writeTo(new ByteBufferChannel(ByteBuffer.allocate(1))));
Assert.assertEquals("writeTo() should have written out as much as the channel could take in every call", expectedWriteToCount, actualWriteToCount);
stream = new DataInputStream(new ByteArrayInputStream(outputStream.toByteArray()));
}
// read length
stream.readLong();
if (isRequest) {
// read version
Assert.assertEquals(RequestOrResponseType.values()[stream.readShort()], requestOrResponse.getRequestType());
}
return stream;
}
Aggregations