use of org.elasticsearch.common.bytes.BytesArray in project crate by crate.
the class DigestBlobTests method testResumeDigestBlobAddHeadAfterContent.
@Test
public void testResumeDigestBlobAddHeadAfterContent() throws IOException {
UUID transferId = UUID.randomUUID();
BlobContainer container = new BlobContainer(tmpFolder.newFolder().toPath());
DigestBlob digestBlob = DigestBlob.resumeTransfer(container, "417de3231e23dcd6d224ff60918024bc6c59aa58", transferId, 2);
BytesArray contentTail = new BytesArray("CDEFGHIJKLMN".getBytes(StandardCharsets.UTF_8));
digestBlob.addContent(contentTail, false);
BytesArray contentHead = new BytesArray("AB".getBytes(StandardCharsets.UTF_8));
digestBlob.addToHead(contentHead);
contentTail = new BytesArray("O".getBytes(StandardCharsets.UTF_8));
digestBlob.addContent(contentTail, true);
// check if tmp file's content is correct
byte[] buffer = new byte[15];
try (FileInputStream stream = new FileInputStream(digestBlob.file())) {
assertThat(stream.read(buffer, 0, 15), is(15));
assertThat(new BytesArray(buffer).toUtf8().trim(), is("ABCDEFGHIJKLMNO"));
}
File file = digestBlob.commit();
// check if final file's content is correct
buffer = new byte[15];
try (FileInputStream stream = new FileInputStream(file)) {
assertThat(stream.read(buffer, 0, 15), is(15));
assertThat(new BytesArray(buffer).toUtf8().trim(), is("ABCDEFGHIJKLMNO"));
}
// assert file created
assertThat(file.exists(), is(true));
// just in case any references to file left
assertThat(file.delete(), is(true));
}
use of org.elasticsearch.common.bytes.BytesArray in project crate by crate.
the class SerializationTests method testPutChunkReplicaRequestSerialization.
@Test
public void testPutChunkReplicaRequestSerialization() throws Exception {
BytesStreamOutput outputStream = new BytesStreamOutput();
UUID transferId = UUID.randomUUID();
PutChunkReplicaRequest requestOut = new PutChunkReplicaRequest();
requestOut.index("foo");
requestOut.transferId = transferId;
requestOut.currentPos = 10;
requestOut.isLast = false;
requestOut.content = new BytesArray(new byte[] { 0x65, 0x66 });
requestOut.sourceNodeId = "nodeId";
requestOut.writeTo(outputStream);
StreamInput inputStream = StreamInput.wrap(outputStream.bytes());
PutChunkReplicaRequest requestIn = new PutChunkReplicaRequest();
requestIn.readFrom(inputStream);
assertEquals(requestOut.currentPos, requestIn.currentPos);
assertEquals(requestOut.isLast, requestIn.isLast);
assertEquals(requestOut.content, requestIn.content);
assertEquals(requestOut.transferId, requestIn.transferId);
assertEquals(requestOut.index(), requestIn.index());
}
use of org.elasticsearch.common.bytes.BytesArray in project crate by crate.
the class PutHeadChunkRunnable method run.
@Override
public void run() {
FileInputStream fileInputStream = null;
try {
int bufSize = 4096;
int bytesRead;
int size;
int maxFileGrowthWait = 5;
int fileGrowthWaited = 0;
byte[] buffer = new byte[bufSize];
long remainingBytes = bytesToSend;
File pendingFile;
try {
pendingFile = digestBlob.file();
if (pendingFile == null) {
pendingFile = digestBlob.getContainerFile();
}
fileInputStream = new FileInputStream(pendingFile);
} catch (FileNotFoundException e) {
// this happens if the file has already been moved from tmpDirectory to containerDirectory
pendingFile = digestBlob.getContainerFile();
fileInputStream = new FileInputStream(pendingFile);
}
while (remainingBytes > 0) {
size = (int) Math.min(bufSize, remainingBytes);
bytesRead = fileInputStream.read(buffer, 0, size);
if (bytesRead < size) {
waitUntilFileHasGrown(pendingFile);
fileGrowthWaited++;
if (fileGrowthWaited == maxFileGrowthWait) {
throw new HeadChunkFileTooSmallException(pendingFile.getAbsolutePath());
}
if (bytesRead < 1) {
continue;
}
}
remainingBytes -= bytesRead;
transportService.submitRequest(recipientNode, BlobHeadRequestHandler.Actions.PUT_BLOB_HEAD_CHUNK, new PutBlobHeadChunkRequest(transferId, new BytesArray(buffer, 0, bytesRead)), TransportRequestOptions.EMPTY, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
}
} catch (IOException ex) {
logger.error("IOException in PutHeadChunkRunnable", ex);
} finally {
blobTransferTarget.putHeadChunkTransferFinished(transferId);
if (watcher != null) {
try {
watcher.close();
} catch (IOException e) {
logger.error("Error closing WatchService in {}", e, getClass().getSimpleName());
}
}
if (fileInputStream != null) {
try {
fileInputStream.close();
} catch (IOException e) {
logger.error("Error closing HeadChunk", e);
}
}
}
}
use of org.elasticsearch.common.bytes.BytesArray in project crate by crate.
the class BlobRecoveryHandler method syncVarFiles.
private void syncVarFiles(AtomicReference<Exception> lastException) throws InterruptedException {
for (byte prefix : BlobContainer.PREFIXES) {
// byte[1] and byte[1] have different hashCodes
// so setA.removeAll(setB) wouldn't work with byte[], that's why BytesArray is used here
Set<BytesArray> remoteDigests = getExistingDigestsFromTarget(prefix);
Set<BytesArray> localDigests = new HashSet<BytesArray>();
for (byte[] digest : blobShard.currentDigests(prefix)) {
localDigests.add(new BytesArray(digest));
}
Set<BytesArray> localButNotRemoteDigests = new HashSet<BytesArray>(localDigests);
localButNotRemoteDigests.removeAll(remoteDigests);
final CountDownLatch latch = new CountDownLatch(localButNotRemoteDigests.size());
for (BytesArray digestBytes : localButNotRemoteDigests) {
final String digest = Hex.encodeHexString(digestBytes.toBytes());
logger.trace("[{}][{}] start to transfer file var/{} to {}", request.shardId().index().name(), request.shardId().id(), digest, request.targetNode().getName());
recoverySettings.concurrentStreamPool().execute(new TransferFileRunnable(blobShard.blobContainer().getFile(digest), lastException, latch));
}
latch.await();
remoteDigests.removeAll(localDigests);
if (!remoteDigests.isEmpty()) {
deleteFilesRequest(remoteDigests.toArray(new BytesArray[remoteDigests.size()]));
}
}
}
use of org.elasticsearch.common.bytes.BytesArray in project crate by crate.
the class BlobRecoveryHandler method getExistingDigestsFromTarget.
private Set<BytesArray> getExistingDigestsFromTarget(byte prefix) {
BlobStartPrefixResponse response = (BlobStartPrefixResponse) transportService.submitRequest(request.targetNode(), BlobRecoveryTarget.Actions.START_PREFIX, new BlobStartPrefixSyncRequest(request.recoveryId(), request.shardId(), prefix), TransportRequestOptions.EMPTY, new FutureTransportResponseHandler<TransportResponse>() {
@Override
public TransportResponse newInstance() {
return new BlobStartPrefixResponse();
}
}).txGet();
Set<BytesArray> result = new HashSet<BytesArray>();
for (byte[] digests : response.existingDigests) {
result.add(new BytesArray(digests));
}
return result;
}
Aggregations