use of org.apache.hyracks.storage.am.lsm.common.api.ILSMDiskComponent in project asterixdb by apache.
the class LSMBTreeFilterMergeTestDriver method runTest.
@Override
protected void runTest(ISerializerDeserializer[] fieldSerdes, int numKeys, BTreeLeafFrameType leafType, ITupleReference lowKey, ITupleReference highKey, ITupleReference prefixLowKey, ITupleReference prefixHighKey) throws Exception {
OrderedIndexTestContext ctx = createTestContext(fieldSerdes, numKeys, leafType, true);
ctx.getIndex().create();
ctx.getIndex().activate();
// to determine which field types to generate.
if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
orderedIndexTestUtils.bulkLoadIntTuples(ctx, numTuplesToInsert, true, getRandom());
} else if (fieldSerdes[0] instanceof UTF8StringSerializerDeserializer) {
orderedIndexTestUtils.bulkLoadStringTuples(ctx, numTuplesToInsert, true, getRandom());
}
int maxTreesToMerge = AccessMethodTestsConfig.LSM_BTREE_MAX_TREES_TO_MERGE;
ILSMIndexAccessor accessor = (ILSMIndexAccessor) ctx.getIndexAccessor();
IBinaryComparator comp = ctx.getComparatorFactories()[0].createBinaryComparator();
for (int i = 0; i < maxTreesToMerge; i++) {
int flushed = 0;
for (; flushed < i; flushed++) {
Pair<ITupleReference, ITupleReference> minMax = null;
if (fieldSerdes[0] instanceof IntegerSerializerDeserializer) {
minMax = orderedIndexTestUtils.insertIntTuples(ctx, numTuplesToInsert, true, getRandom());
} else {
minMax = orderedIndexTestUtils.insertStringTuples(ctx, numTuplesToInsert, true, getRandom());
}
if (minMax != null) {
ILSMComponentFilter f = ((LSMBTree) ctx.getIndex()).getCurrentMemoryComponent().getLSMComponentFilter();
Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(f);
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
}
StubIOOperationCallback stub = new StubIOOperationCallback();
BlockingIOOperationCallbackWrapper waiter = new BlockingIOOperationCallbackWrapper(stub);
accessor.scheduleFlush(waiter);
waiter.waitForIO();
if (minMax != null) {
Pair<ITupleReference, ITupleReference> obsMinMax = filterToMinMax(stub.getLastNewComponent().getLSMComponentFilter());
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(obsMinMax.getLeft(), minMax.getLeft(), comp));
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(obsMinMax.getRight(), minMax.getRight(), comp));
}
}
List<ILSMDiskComponent> flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
MutablePair<ITupleReference, ITupleReference> expectedMergeMinMax = null;
for (ILSMDiskComponent f : flushedComponents) {
Pair<ITupleReference, ITupleReference> componentMinMax = filterToMinMax(f.getLSMComponentFilter());
if (expectedMergeMinMax == null) {
expectedMergeMinMax = MutablePair.of(componentMinMax.getLeft(), componentMinMax.getRight());
}
if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(), componentMinMax.getLeft(), comp) > 0) {
expectedMergeMinMax.setLeft(componentMinMax.getLeft());
}
if (TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(), componentMinMax.getRight(), comp) < 0) {
expectedMergeMinMax.setRight(componentMinMax.getRight());
}
}
accessor.scheduleMerge(NoOpIOOperationCallbackFactory.INSTANCE.createIoOpCallback(), ((LSMBTree) ctx.getIndex()).getImmutableComponents());
flushedComponents = ((LSMBTree) ctx.getIndex()).getImmutableComponents();
Pair<ITupleReference, ITupleReference> mergedMinMax = filterToMinMax(flushedComponents.get(0).getLSMComponentFilter());
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getLeft(), mergedMinMax.getLeft(), comp));
Assert.assertEquals(0, TreeIndexTestUtils.compareFilterTuples(expectedMergeMinMax.getRight(), mergedMinMax.getRight(), comp));
orderedIndexTestUtils.checkPointSearches(ctx);
orderedIndexTestUtils.checkScan(ctx);
orderedIndexTestUtils.checkDiskOrderScan(ctx);
orderedIndexTestUtils.checkRangeSearch(ctx, lowKey, highKey, true, true);
if (prefixLowKey != null && prefixHighKey != null) {
orderedIndexTestUtils.checkRangeSearch(ctx, prefixLowKey, prefixHighKey, true, true);
}
}
ctx.getIndex().deactivate();
ctx.getIndex().destroy();
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMDiskComponent in project asterixdb by apache.
the class ReplicationManager method processJob.
/**
* Processes the replication job based on its specifications
*
* @param job
* The replication job
* @param replicasSockets
* The remote replicas sockets to send the request to.
* @param requestBuffer
* The buffer to use to send the request.
* @throws IOException
*/
private void processJob(IReplicationJob job, Map<String, SocketChannel> replicasSockets, ByteBuffer requestBuffer) throws IOException {
try {
//all of the job's files belong to a single storage partition.
//get any of them to determine the partition from the file path.
String jobFile = job.getJobFiles().iterator().next();
IndexFileProperties indexFileRef = localResourceRepo.getIndexFileRef(jobFile);
if (!replicationStrategy.isMatch(indexFileRef.getDatasetId())) {
return;
}
int jobPartitionId = indexFileRef.getPartitionId();
ByteBuffer responseBuffer = null;
LSMIndexFileProperties asterixFileProperties = new LSMIndexFileProperties();
if (requestBuffer == null) {
requestBuffer = ByteBuffer.allocate(INITIAL_BUFFER_SIZE);
}
boolean isLSMComponentFile = job.getJobType() == ReplicationJobType.LSM_COMPONENT;
try {
//if there isn't already a connection, establish a new one
if (replicasSockets == null) {
replicasSockets = getActiveRemoteReplicasSockets();
}
int remainingFiles = job.getJobFiles().size();
if (job.getOperation() == ReplicationOperation.REPLICATE) {
//if the replication job is an LSM_COMPONENT, its properties are sent first, then its files.
ILSMIndexReplicationJob LSMComponentJob = null;
if (job.getJobType() == ReplicationJobType.LSM_COMPONENT) {
//send LSMComponent properties
LSMComponentJob = (ILSMIndexReplicationJob) job;
LSMComponentProperties lsmCompProp = new LSMComponentProperties(LSMComponentJob, nodeId);
requestBuffer = ReplicationProtocol.writeLSMComponentPropertiesRequest(lsmCompProp, requestBuffer);
sendRequest(replicasSockets, requestBuffer);
}
for (String filePath : job.getJobFiles()) {
remainingFiles--;
Path path = Paths.get(filePath);
if (Files.notExists(path)) {
LOGGER.log(Level.SEVERE, "File deleted before replication: " + filePath);
continue;
}
LOGGER.log(Level.INFO, "Replicating file: " + filePath);
//open file for reading
try (RandomAccessFile fromFile = new RandomAccessFile(filePath, "r");
FileChannel fileChannel = fromFile.getChannel()) {
long fileSize = fileChannel.size();
if (LSMComponentJob != null) {
/**
* since this is LSM_COMPONENT REPLICATE job, the job will contain
* only the component being replicated.
*/
ILSMDiskComponent diskComponent = LSMComponentJob.getLSMIndexOperationContext().getComponentsToBeReplicated().get(0);
long lsnOffset = LSMIndexUtil.getComponentFileLSNOffset(LSMComponentJob.getLSMIndex(), diskComponent, filePath);
asterixFileProperties.initialize(filePath, fileSize, nodeId, isLSMComponentFile, lsnOffset, remainingFiles == 0);
} else {
asterixFileProperties.initialize(filePath, fileSize, nodeId, isLSMComponentFile, -1L, remainingFiles == 0);
}
requestBuffer = ReplicationProtocol.writeFileReplicationRequest(requestBuffer, asterixFileProperties, ReplicationRequestType.REPLICATE_FILE);
Iterator<Map.Entry<String, SocketChannel>> iterator = replicasSockets.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, SocketChannel> entry = iterator.next();
//if the remote replica is not interested in this partition, skip it.
if (!replica2PartitionsMap.get(entry.getKey()).contains(jobPartitionId)) {
continue;
}
SocketChannel socketChannel = entry.getValue();
//transfer request header & file
try {
NetworkingUtil.transferBufferToChannel(socketChannel, requestBuffer);
NetworkingUtil.sendFile(fileChannel, socketChannel);
if (asterixFileProperties.requiresAck()) {
ReplicationRequestType responseType = waitForResponse(socketChannel, responseBuffer);
if (responseType != ReplicationRequestType.ACK) {
throw new IOException("Could not receive ACK from replica " + entry.getKey());
}
}
} catch (IOException e) {
handleReplicationFailure(socketChannel, e);
iterator.remove();
} finally {
requestBuffer.position(0);
}
}
}
}
} else if (job.getOperation() == ReplicationOperation.DELETE) {
for (String filePath : job.getJobFiles()) {
remainingFiles--;
asterixFileProperties.initialize(filePath, -1, nodeId, isLSMComponentFile, -1L, remainingFiles == 0);
ReplicationProtocol.writeFileReplicationRequest(requestBuffer, asterixFileProperties, ReplicationRequestType.DELETE_FILE);
Iterator<Map.Entry<String, SocketChannel>> iterator = replicasSockets.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, SocketChannel> entry = iterator.next();
//if the remote replica is not interested in this partition, skip it.
if (!replica2PartitionsMap.get(entry.getKey()).contains(jobPartitionId)) {
continue;
}
SocketChannel socketChannel = entry.getValue();
try {
sendRequest(replicasSockets, requestBuffer);
if (asterixFileProperties.requiresAck()) {
waitForResponse(socketChannel, responseBuffer);
}
} catch (IOException e) {
handleReplicationFailure(socketChannel, e);
iterator.remove();
} finally {
requestBuffer.position(0);
}
}
}
}
} finally {
//if sync, close sockets with replicas since they wont be reused
if (job.getExecutionType() == ReplicationExecutionType.SYNC) {
closeReplicaSockets(replicasSockets);
}
}
} finally {
exitReplicatedLSMComponent(job);
}
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMDiskComponent in project asterixdb by apache.
the class AbstractLSMIndex method getOperationalComponents.
@Override
public void getOperationalComponents(ILSMIndexOperationContext ctx) throws HyracksDataException {
List<ILSMDiskComponent> immutableComponents = diskComponents;
List<ILSMComponent> operationalComponents = ctx.getComponentHolder();
int cmc = currentMutableComponentId.get();
ctx.setCurrentMutableComponentId(cmc);
operationalComponents.clear();
switch(ctx.getOperation()) {
case UPDATE:
case PHYSICALDELETE:
case FLUSH:
case DELETE:
case UPSERT:
operationalComponents.add(memoryComponents.get(cmc));
break;
case INSERT:
addOperationalMutableComponents(operationalComponents);
operationalComponents.addAll(immutableComponents);
break;
case SEARCH:
if (memoryComponentsAllocated) {
addOperationalMutableComponents(operationalComponents);
}
if (filterManager != null) {
for (ILSMComponent c : immutableComponents) {
if (c.getLSMComponentFilter().satisfy(((AbstractSearchPredicate) ctx.getSearchPredicate()).getMinFilterTuple(), ((AbstractSearchPredicate) ctx.getSearchPredicate()).getMaxFilterTuple(), ctx.getFilterCmp())) {
operationalComponents.add(c);
}
}
} else {
operationalComponents.addAll(immutableComponents);
}
break;
case MERGE:
operationalComponents.addAll(ctx.getComponentsToBeMerged());
break;
case FULL_MERGE:
operationalComponents.addAll(immutableComponents);
break;
case REPLICATE:
operationalComponents.addAll(ctx.getComponentsToBeReplicated());
break;
default:
throw new UnsupportedOperationException("Operation " + ctx.getOperation() + " not supported.");
}
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMDiskComponent in project asterixdb by apache.
the class LSMHarness method merge.
@Override
public void merge(ILSMIndexOperationContext ctx, ILSMIOOperation operation) throws HyracksDataException {
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Started a merge operation for index: " + lsmIndex + " ...");
}
ILSMDiskComponent newComponent = null;
try {
newComponent = lsmIndex.merge(operation);
operation.getCallback().afterOperation(LSMOperationType.MERGE, ctx.getComponentHolder(), newComponent);
lsmIndex.markAsValid(newComponent);
} catch (Throwable e) {
e.printStackTrace();
throw e;
} finally {
exitComponents(ctx, LSMOperationType.MERGE, newComponent, false);
operation.getCallback().afterFinalize(LSMOperationType.MERGE, newComponent);
}
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Finished the merge operation for index: " + lsmIndex);
}
}
use of org.apache.hyracks.storage.am.lsm.common.api.ILSMDiskComponent in project asterixdb by apache.
the class PrefixMergePolicy method getMergableImmutableComponentCount.
/**
* This method returns the number of mergable components among the given list
* of immutable components that are ordered from the latest component to order ones. A caller
* need to make sure the order in the list.
*
* @param immutableComponents
* @return the number of mergable component
*/
private int getMergableImmutableComponentCount(List<ILSMDiskComponent> immutableComponents) {
int count = 0;
for (ILSMComponent c : immutableComponents) {
long componentSize = ((ILSMDiskComponent) c).getComponentSize();
//stop when the first non-mergable component is found.
if (c.getState() != ComponentState.READABLE_UNWRITABLE || componentSize > maxMergableComponentSize) {
break;
}
++count;
}
return count;
}
Aggregations