Search in sources :

Example 31 with ITypeTraits

use of org.apache.hyracks.api.dataflow.value.ITypeTraits in project asterixdb by apache.

the class RTreeTestContext method create.

public static RTreeTestContext create(IBufferCache bufferCache, IFileMapProvider fileMapProvider, FileReference file, ISerializerDeserializer[] fieldSerdes, IPrimitiveValueProviderFactory[] valueProviderFactories, int numKeyFields, RTreePolicyType rtreePolicyType, IPageManagerFactory pageManagerFactory) throws Exception {
    ITypeTraits[] typeTraits = SerdeUtils.serdesToTypeTraits(fieldSerdes);
    IBinaryComparatorFactory[] cmpFactories = SerdeUtils.serdesToComparatorFactories(fieldSerdes, numKeyFields);
    RTree rtree = RTreeUtils.createRTree(bufferCache, fileMapProvider, typeTraits, valueProviderFactories, cmpFactories, rtreePolicyType, file, false, pageManagerFactory);
    RTreeTestContext testCtx = new RTreeTestContext(fieldSerdes, rtree);
    return testCtx;
}
Also used : AbstractRTreeTestContext(org.apache.hyracks.storage.am.rtree.AbstractRTreeTestContext) ITypeTraits(org.apache.hyracks.api.dataflow.value.ITypeTraits) IBinaryComparatorFactory(org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory) RTree(org.apache.hyracks.storage.am.rtree.impls.RTree)

Example 32 with ITypeTraits

use of org.apache.hyracks.api.dataflow.value.ITypeTraits in project asterixdb by apache.

the class FixedSizeFrameTupleTest method singleFieldTest.

/**
     * This test verifies the correct behavior of the FixedSizeFrameTuple class.
     * Frames containing FixedSizeFrameTuple's require neither tuple slots nor
     * field slots. The tests inserts generated data into a frame until the
     * frame is full, and then verifies the frame's contents.
     *
     */
@Test
public void singleFieldTest() throws Exception {
    ByteBuffer buffer = ByteBuffer.allocate(FRAME_SIZE);
    ITypeTraits[] fields = new ITypeTraits[1];
    fields[0] = IntegerPointable.TYPE_TRAITS;
    FixedSizeFrameTupleAppender ftapp = new FixedSizeFrameTupleAppender(FRAME_SIZE, fields);
    FixedSizeFrameTupleAccessor ftacc = new FixedSizeFrameTupleAccessor(FRAME_SIZE, fields);
    boolean frameHasSpace = true;
    ArrayList<Integer> check = new ArrayList<Integer>();
    ftapp.reset(buffer, true);
    while (frameHasSpace) {
        int val = rnd.nextInt();
        frameHasSpace = ftapp.append(val);
        if (frameHasSpace) {
            check.add(val);
            ftapp.incrementTupleCount(1);
        }
    }
    ftacc.reset(buffer);
    for (int i = 0; i < ftacc.getTupleCount(); i++) {
        int val = IntegerPointable.getInteger(ftacc.getBuffer().array(), ftacc.getTupleStartOffset(i));
        Assert.assertEquals(check.get(i).intValue(), val);
    }
}
Also used : ITypeTraits(org.apache.hyracks.api.dataflow.value.ITypeTraits) ArrayList(java.util.ArrayList) FixedSizeFrameTupleAccessor(org.apache.hyracks.storage.am.lsm.invertedindex.ondisk.FixedSizeFrameTupleAccessor) ByteBuffer(java.nio.ByteBuffer) FixedSizeFrameTupleAppender(org.apache.hyracks.storage.am.lsm.invertedindex.ondisk.FixedSizeFrameTupleAppender) Test(org.junit.Test)

Example 33 with ITypeTraits

use of org.apache.hyracks.api.dataflow.value.ITypeTraits in project asterixdb by apache.

the class OnDiskInvertedIndexLifecycleTest method setup.

@Override
public void setup() throws Exception {
    harness.setUp();
    ITypeTraits[] tokenTypeTraits = new ITypeTraits[] { UTF8StringPointable.TYPE_TRAITS };
    IBinaryComparatorFactory[] tokenCmpFactories = new IBinaryComparatorFactory[] { PointableBinaryComparatorFactory.of(UTF8StringPointable.FACTORY) };
    ITypeTraits[] invListTypeTraits = new ITypeTraits[] { IntegerPointable.TYPE_TRAITS };
    IBinaryComparatorFactory[] invListCmpFactories = new IBinaryComparatorFactory[] { PointableBinaryComparatorFactory.of(IntegerPointable.FACTORY) };
    IInvertedListBuilder invListBuilder = new FixedSizeElementInvertedListBuilder(invListTypeTraits);
    FileReference btreeFile = harness.getIOManager().resolveAbsolutePath(harness.getInvListsFileRef().getFile().getAbsolutePath() + "_btree");
    index = new OnDiskInvertedIndex(harness.getDiskBufferCache(), harness.getDiskFileMapProvider(), invListBuilder, invListTypeTraits, invListCmpFactories, tokenTypeTraits, tokenCmpFactories, harness.getInvListsFileRef(), btreeFile, harness.getMetadataPageManagerFactory());
}
Also used : ITypeTraits(org.apache.hyracks.api.dataflow.value.ITypeTraits) IBinaryComparatorFactory(org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory) IInvertedListBuilder(org.apache.hyracks.storage.am.lsm.invertedindex.api.IInvertedListBuilder) FileReference(org.apache.hyracks.api.io.FileReference)

Example 34 with ITypeTraits

use of org.apache.hyracks.api.dataflow.value.ITypeTraits in project asterixdb by apache.

the class QueryTranslator method handleCreateIndexStatement.

protected void handleCreateIndexStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
    ProgressState progress = ProgressState.NO_PROGRESS;
    CreateIndexStatement stmtCreateIndex = (CreateIndexStatement) stmt;
    String dataverseName = getActiveDataverse(stmtCreateIndex.getDataverseName());
    String datasetName = stmtCreateIndex.getDatasetName().getValue();
    List<Integer> keySourceIndicators = stmtCreateIndex.getFieldSourceIndicators();
    MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
    boolean bActiveTxn = true;
    metadataProvider.setMetadataTxnContext(mdTxnCtx);
    MetadataLockManager.INSTANCE.createIndexBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + datasetName);
    String indexName = null;
    JobSpecification spec = null;
    Dataset ds = null;
    // For external datasets
    List<ExternalFile> externalFilesSnapshot = null;
    boolean firstExternalDatasetIndex = false;
    boolean filesIndexReplicated = false;
    Index filesIndex = null;
    boolean datasetLocked = false;
    Index index = null;
    try {
        ds = metadataProvider.findDataset(dataverseName, datasetName);
        if (ds == null) {
            throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
        }
        indexName = stmtCreateIndex.getIndexName().getValue();
        index = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, indexName);
        Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(), ds.getItemTypeDataverseName(), ds.getItemTypeName());
        ARecordType aRecordType = (ARecordType) dt.getDatatype();
        ARecordType metaRecordType = null;
        if (ds.hasMetaPart()) {
            Datatype metaDt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(), ds.getMetaItemTypeDataverseName(), ds.getMetaItemTypeName());
            metaRecordType = (ARecordType) metaDt.getDatatype();
        }
        List<List<String>> indexFields = new ArrayList<>();
        List<IAType> indexFieldTypes = new ArrayList<>();
        int keyIndex = 0;
        for (Pair<List<String>, TypeExpression> fieldExpr : stmtCreateIndex.getFieldExprs()) {
            IAType fieldType = null;
            ARecordType subType = KeyFieldTypeUtil.chooseSource(keySourceIndicators, keyIndex, aRecordType, metaRecordType);
            boolean isOpen = subType.isOpen();
            int i = 0;
            if (fieldExpr.first.size() > 1 && !isOpen) {
                while (i < fieldExpr.first.size() - 1 && !isOpen) {
                    subType = (ARecordType) subType.getFieldType(fieldExpr.first.get(i));
                    i++;
                    isOpen = subType.isOpen();
                }
            }
            if (fieldExpr.second == null) {
                fieldType = subType.getSubFieldType(fieldExpr.first.subList(i, fieldExpr.first.size()));
            } else {
                if (!stmtCreateIndex.isEnforced()) {
                    throw new AlgebricksException("Cannot create typed index on \"" + fieldExpr.first + "\" field without enforcing it's type");
                }
                if (!isOpen) {
                    throw new AlgebricksException("Typed index on \"" + fieldExpr.first + "\" field could be created only for open datatype");
                }
                if (stmtCreateIndex.hasMetaField()) {
                    throw new AlgebricksException("Typed open index can only be created on the record part");
                }
                Map<TypeSignature, IAType> typeMap = TypeTranslator.computeTypes(mdTxnCtx, fieldExpr.second, indexName, dataverseName);
                TypeSignature typeSignature = new TypeSignature(dataverseName, indexName);
                fieldType = typeMap.get(typeSignature);
            }
            if (fieldType == null) {
                throw new AlgebricksException("Unknown type " + (fieldExpr.second == null ? fieldExpr.first : fieldExpr.second));
            }
            indexFields.add(fieldExpr.first);
            indexFieldTypes.add(fieldType);
            ++keyIndex;
        }
        ValidateUtil.validateKeyFields(aRecordType, metaRecordType, indexFields, keySourceIndicators, indexFieldTypes, stmtCreateIndex.getIndexType());
        if (index != null) {
            if (stmtCreateIndex.getIfNotExists()) {
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                return;
            } else {
                throw new AlgebricksException("An index with this name " + indexName + " already exists.");
            }
        }
        // error message and stop.
        if (stmtCreateIndex.getIndexType() == IndexType.SINGLE_PARTITION_WORD_INVIX || stmtCreateIndex.getIndexType() == IndexType.SINGLE_PARTITION_NGRAM_INVIX || stmtCreateIndex.getIndexType() == IndexType.LENGTH_PARTITIONED_WORD_INVIX || stmtCreateIndex.getIndexType() == IndexType.LENGTH_PARTITIONED_NGRAM_INVIX) {
            List<List<String>> partitioningKeys = ds.getPrimaryKeys();
            for (List<String> partitioningKey : partitioningKeys) {
                IAType keyType = aRecordType.getSubFieldType(partitioningKey);
                ITypeTraits typeTrait = TypeTraitProvider.INSTANCE.getTypeTrait(keyType);
                // If it is not a fixed length
                if (typeTrait.getFixedLength() < 0) {
                    throw new AlgebricksException("The keyword or ngram index -" + indexName + " cannot be created on the dataset -" + datasetName + " due to its variable-length primary key field - " + partitioningKey);
                }
            }
        }
        if (ds.getDatasetType() == DatasetType.INTERNAL) {
            validateIfResourceIsActiveInFeed(ds);
        } else {
            // Check if the dataset is indexible
            if (!ExternalIndexingOperations.isIndexible((ExternalDatasetDetails) ds.getDatasetDetails())) {
                throw new AlgebricksException("dataset using " + ((ExternalDatasetDetails) ds.getDatasetDetails()).getAdapter() + " Adapter can't be indexed");
            }
            // Check if the name of the index is valid
            if (!ExternalIndexingOperations.isValidIndexName(datasetName, indexName)) {
                throw new AlgebricksException("external dataset index name is invalid");
            }
            // Check if the files index exist
            filesIndex = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName));
            firstExternalDatasetIndex = filesIndex == null;
            // Lock external dataset
            ExternalDatasetsRegistry.INSTANCE.buildIndexBegin(ds, firstExternalDatasetIndex);
            datasetLocked = true;
            if (firstExternalDatasetIndex) {
                // Verify that no one has created an index before we acquire the lock
                filesIndex = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName));
                if (filesIndex != null) {
                    ExternalDatasetsRegistry.INSTANCE.buildIndexEnd(ds, firstExternalDatasetIndex);
                    firstExternalDatasetIndex = false;
                    ExternalDatasetsRegistry.INSTANCE.buildIndexBegin(ds, firstExternalDatasetIndex);
                }
            }
            if (firstExternalDatasetIndex) {
                // Get snapshot from External File System
                externalFilesSnapshot = ExternalIndexingOperations.getSnapshotFromExternalFileSystem(ds);
                // Add an entry for the files index
                filesIndex = new Index(dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName), IndexType.BTREE, ExternalIndexingOperations.FILE_INDEX_FIELD_NAMES, null, ExternalIndexingOperations.FILE_INDEX_FIELD_TYPES, false, false, MetadataUtil.PENDING_ADD_OP);
                MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), filesIndex);
                // Add files to the external files index
                for (ExternalFile file : externalFilesSnapshot) {
                    MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
                }
                // This is the first index for the external dataset, replicate the files index
                spec = ExternalIndexingOperations.buildFilesIndexCreateJobSpec(ds, externalFilesSnapshot, metadataProvider);
                if (spec == null) {
                    throw new CompilationException("Failed to create job spec for replicating Files Index For external dataset");
                }
                filesIndexReplicated = true;
                JobUtils.runJob(hcc, spec, true);
            }
        }
        // check whether there exists another enforced index on the same field
        if (stmtCreateIndex.isEnforced()) {
            List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
            for (Index existingIndex : indexes) {
                if (existingIndex.getKeyFieldNames().equals(indexFields) && !existingIndex.getKeyFieldTypes().equals(indexFieldTypes) && existingIndex.isEnforcingKeyFileds()) {
                    throw new CompilationException("Cannot create index " + indexName + " , enforced index " + existingIndex.getIndexName() + " on field \"" + StringUtils.join(indexFields, ',') + "\" is already defined with type \"" + existingIndex.getKeyFieldTypes() + "\"");
                }
            }
        }
        // #. add a new index with PendingAddOp
        index = new Index(dataverseName, datasetName, indexName, stmtCreateIndex.getIndexType(), indexFields, keySourceIndicators, indexFieldTypes, stmtCreateIndex.getGramLength(), stmtCreateIndex.isEnforced(), false, MetadataUtil.PENDING_ADD_OP);
        MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), index);
        // #. prepare to create the index artifact in NC.
        spec = IndexUtil.buildSecondaryIndexCreationJobSpec(ds, index, metadataProvider);
        if (spec == null) {
            throw new CompilationException("Failed to create job spec for creating index '" + stmtCreateIndex.getDatasetName() + "." + stmtCreateIndex.getIndexName() + "'");
        }
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;
        progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
        // #. create the index artifact in NC.
        JobUtils.runJob(hcc, spec, true);
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        bActiveTxn = true;
        metadataProvider.setMetadataTxnContext(mdTxnCtx);
        // #. load data into the index in NC.
        spec = IndexUtil.buildSecondaryIndexLoadingJobSpec(ds, index, metadataProvider);
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
        bActiveTxn = false;
        JobUtils.runJob(hcc, spec, true);
        // #. begin new metadataTxn
        mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
        bActiveTxn = true;
        metadataProvider.setMetadataTxnContext(mdTxnCtx);
        // #. add another new index with PendingNoOp after deleting the index with PendingAddOp
        MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, indexName);
        index.setPendingOp(MetadataUtil.PENDING_NO_OP);
        MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), index);
        // PendingAddOp
        if (firstExternalDatasetIndex) {
            MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, filesIndex.getIndexName());
            filesIndex.setPendingOp(MetadataUtil.PENDING_NO_OP);
            MetadataManager.INSTANCE.addIndex(metadataProvider.getMetadataTxnContext(), filesIndex);
            // update transaction timestamp
            ((ExternalDatasetDetails) ds.getDatasetDetails()).setRefreshTimestamp(new Date());
            MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
        }
        MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
    } catch (Exception e) {
        if (bActiveTxn) {
            abort(e, e, mdTxnCtx);
        }
        // If files index was replicated for external dataset, it should be cleaned up on NC side
        if (filesIndexReplicated) {
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            try {
                JobSpecification jobSpec = ExternalIndexingOperations.buildDropFilesIndexJobSpec(metadataProvider, ds);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                JobUtils.runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }
        }
        if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
            // #. execute compensation operations
            // remove the index in NC
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            bActiveTxn = true;
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                JobSpecification jobSpec = IndexUtil.buildDropIndexJobSpec(index, metadataProvider, ds);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                bActiveTxn = false;
                JobUtils.runJob(hcc, jobSpec, true);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                if (bActiveTxn) {
                    abort(e, e2, mdTxnCtx);
                }
            }
            if (firstExternalDatasetIndex) {
                mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
                metadataProvider.setMetadataTxnContext(mdTxnCtx);
                try {
                    // Drop External Files from metadata
                    MetadataManager.INSTANCE.dropDatasetExternalFiles(mdTxnCtx, ds);
                    MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                } catch (Exception e2) {
                    e.addSuppressed(e2);
                    abort(e, e2, mdTxnCtx);
                    throw new IllegalStateException("System is inconsistent state: pending files for(" + dataverseName + "." + datasetName + ") couldn't be removed from the metadata", e);
                }
                mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
                metadataProvider.setMetadataTxnContext(mdTxnCtx);
                try {
                    // Drop the files index from metadata
                    MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName));
                    MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
                } catch (Exception e2) {
                    e.addSuppressed(e2);
                    abort(e, e2, mdTxnCtx);
                    throw new IllegalStateException("System is inconsistent state: pending index(" + dataverseName + "." + datasetName + "." + IndexingConstants.getFilesIndexName(datasetName) + ") couldn't be removed from the metadata", e);
                }
            }
            // remove the record from the metadata.
            mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
            metadataProvider.setMetadataTxnContext(mdTxnCtx);
            try {
                MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, indexName);
                MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
            } catch (Exception e2) {
                e.addSuppressed(e2);
                abort(e, e2, mdTxnCtx);
                throw new IllegalStateException("System is in inconsistent state: pending index(" + dataverseName + "." + datasetName + "." + indexName + ") couldn't be removed from the metadata", e);
            }
        }
        throw e;
    } finally {
        metadataProvider.getLocks().unlock();
        if (datasetLocked) {
            ExternalDatasetsRegistry.INSTANCE.buildIndexEnd(ds, firstExternalDatasetIndex);
        }
    }
}
Also used : ProgressState(org.apache.asterix.common.utils.JobUtils.ProgressState) ArrayList(java.util.ArrayList) MetadataTransactionContext(org.apache.asterix.metadata.MetadataTransactionContext) Index(org.apache.asterix.metadata.entities.Index) Datatype(org.apache.asterix.metadata.entities.Datatype) TypeSignature(org.apache.asterix.om.types.TypeSignature) ExternalDatasetDetails(org.apache.asterix.metadata.entities.ExternalDatasetDetails) ArrayList(java.util.ArrayList) List(java.util.List) JobSpecification(org.apache.hyracks.api.job.JobSpecification) CompilationException(org.apache.asterix.common.exceptions.CompilationException) ITypeTraits(org.apache.hyracks.api.dataflow.value.ITypeTraits) IHyracksDataset(org.apache.hyracks.api.dataset.IHyracksDataset) IDataset(org.apache.asterix.common.metadata.IDataset) Dataset(org.apache.asterix.metadata.entities.Dataset) TypeExpression(org.apache.asterix.lang.common.expression.TypeExpression) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) CreateIndexStatement(org.apache.asterix.lang.common.statement.CreateIndexStatement) ExternalFile(org.apache.asterix.external.indexing.ExternalFile) DatasetNodegroupCardinalityHint(org.apache.asterix.metadata.dataset.hints.DatasetHints.DatasetNodegroupCardinalityHint) AlgebricksAbsolutePartitionConstraint(org.apache.hyracks.algebricks.common.constraints.AlgebricksAbsolutePartitionConstraint) Date(java.util.Date) ACIDException(org.apache.asterix.common.exceptions.ACIDException) MetadataException(org.apache.asterix.metadata.MetadataException) AlgebricksException(org.apache.hyracks.algebricks.common.exceptions.AlgebricksException) HyracksDataException(org.apache.hyracks.api.exceptions.HyracksDataException) CompilationException(org.apache.asterix.common.exceptions.CompilationException) IOException(java.io.IOException) RemoteException(java.rmi.RemoteException) AsterixException(org.apache.asterix.common.exceptions.AsterixException) ARecordType(org.apache.asterix.om.types.ARecordType) IAType(org.apache.asterix.om.types.IAType)

Example 35 with ITypeTraits

use of org.apache.hyracks.api.dataflow.value.ITypeTraits in project asterixdb by apache.

the class SecondaryRTreeOperationsHelper method setSecondaryRecDescAndComparators.

@Override
protected void setSecondaryRecDescAndComparators() throws AlgebricksException {
    List<List<String>> secondaryKeyFields = index.getKeyFieldNames();
    int numSecondaryKeys = secondaryKeyFields.size();
    boolean isEnforcingKeyTypes = index.isEnforcingKeyFileds();
    if (numSecondaryKeys != 1) {
        throw new AsterixException("Cannot use " + numSecondaryKeys + " fields as a key for the R-tree index. " + "There can be only one field as a key for the R-tree index.");
    }
    Pair<IAType, Boolean> spatialTypePair = Index.getNonNullableOpenFieldType(index.getKeyFieldTypes().get(0), secondaryKeyFields.get(0), itemType);
    IAType spatialType = spatialTypePair.first;
    anySecondaryKeyIsNullable = spatialTypePair.second;
    if (spatialType == null) {
        throw new AsterixException("Could not find field " + secondaryKeyFields.get(0) + " in the schema.");
    }
    isPointMBR = spatialType.getTypeTag() == ATypeTag.POINT || spatialType.getTypeTag() == ATypeTag.POINT3D;
    int numDimensions = NonTaggedFormatUtil.getNumDimensions(spatialType.getTypeTag());
    numNestedSecondaryKeyFields = numDimensions * 2;
    int recordColumn = dataset.getDatasetType() == DatasetType.INTERNAL ? numPrimaryKeys : 0;
    secondaryFieldAccessEvalFactories = metadataProvider.getFormat().createMBRFactory(isEnforcingKeyTypes ? enforcedItemType : itemType, secondaryKeyFields.get(0), recordColumn, numDimensions, filterFieldName, isPointMBR);
    secondaryComparatorFactories = new IBinaryComparatorFactory[numNestedSecondaryKeyFields];
    valueProviderFactories = new IPrimitiveValueProviderFactory[numNestedSecondaryKeyFields];
    ISerializerDeserializer[] secondaryRecFields = new ISerializerDeserializer[numPrimaryKeys + numNestedSecondaryKeyFields + numFilterFields];
    ISerializerDeserializer[] enforcedRecFields = new ISerializerDeserializer[1 + numPrimaryKeys + numFilterFields];
    secondaryTypeTraits = new ITypeTraits[numNestedSecondaryKeyFields + numPrimaryKeys];
    ITypeTraits[] enforcedTypeTraits = new ITypeTraits[1 + numPrimaryKeys];
    IAType nestedKeyType = NonTaggedFormatUtil.getNestedSpatialType(spatialType.getTypeTag());
    keyType = nestedKeyType.getTypeTag();
    for (int i = 0; i < numNestedSecondaryKeyFields; i++) {
        ISerializerDeserializer keySerde = SerializerDeserializerProvider.INSTANCE.getSerializerDeserializer(nestedKeyType);
        secondaryRecFields[i] = keySerde;
        secondaryComparatorFactories[i] = BinaryComparatorFactoryProvider.INSTANCE.getBinaryComparatorFactory(nestedKeyType, true);
        secondaryTypeTraits[i] = TypeTraitProvider.INSTANCE.getTypeTrait(nestedKeyType);
        valueProviderFactories[i] = metadataProvider.getStorageComponentProvider().getPrimitiveValueProviderFactory();
    }
    // Add serializers and comparators for primary index fields.
    if (dataset.getDatasetType() == DatasetType.INTERNAL) {
        for (int i = 0; i < numPrimaryKeys; i++) {
            secondaryRecFields[numNestedSecondaryKeyFields + i] = primaryRecDesc.getFields()[i];
            secondaryTypeTraits[numNestedSecondaryKeyFields + i] = primaryRecDesc.getTypeTraits()[i];
            enforcedRecFields[i] = primaryRecDesc.getFields()[i];
            enforcedTypeTraits[i] = primaryRecDesc.getTypeTraits()[i];
        }
    } else {
        for (int i = 0; i < numPrimaryKeys; i++) {
            secondaryRecFields[numNestedSecondaryKeyFields + i] = IndexingConstants.getSerializerDeserializer(i);
            secondaryTypeTraits[numNestedSecondaryKeyFields + i] = IndexingConstants.getTypeTraits(i);
            enforcedRecFields[i] = IndexingConstants.getSerializerDeserializer(i);
            enforcedTypeTraits[i] = IndexingConstants.getTypeTraits(i);
        }
    }
    enforcedRecFields[numPrimaryKeys] = SerializerDeserializerProvider.INSTANCE.getSerializerDeserializer(itemType);
    enforcedRecDesc = new RecordDescriptor(enforcedRecFields, enforcedTypeTraits);
    if (numFilterFields > 0) {
        rtreeFields = new int[numNestedSecondaryKeyFields + numPrimaryKeys];
        for (int i = 0; i < rtreeFields.length; i++) {
            rtreeFields[i] = i;
        }
        Pair<IAType, Boolean> typePair = Index.getNonNullableKeyFieldType(filterFieldName, itemType);
        IAType type = typePair.first;
        ISerializerDeserializer serde = SerializerDeserializerProvider.INSTANCE.getSerializerDeserializer(type);
        secondaryRecFields[numPrimaryKeys + numNestedSecondaryKeyFields] = serde;
    }
    secondaryRecDesc = new RecordDescriptor(secondaryRecFields);
    primaryKeyFields = new int[numPrimaryKeys];
    for (int i = 0; i < primaryKeyFields.length; i++) {
        primaryKeyFields[i] = i + numNestedSecondaryKeyFields;
    }
    if (isPointMBR) {
        int numNestedSecondaryKeyFieldForPointMBR = numNestedSecondaryKeyFields / 2;
        ISerializerDeserializer[] recFieldsForPointMBR = new ISerializerDeserializer[numPrimaryKeys + numNestedSecondaryKeyFieldForPointMBR + numFilterFields];
        int idx = 0;
        for (int i = 0; i < numNestedSecondaryKeyFieldForPointMBR; i++) {
            recFieldsForPointMBR[idx++] = secondaryRecFields[i];
        }
        for (int i = 0; i < numPrimaryKeys + numFilterFields; i++) {
            recFieldsForPointMBR[idx++] = secondaryRecFields[numNestedSecondaryKeyFields + i];
        }
        secondaryRecDescForPointMBR = new RecordDescriptor(recFieldsForPointMBR);
    }
}
Also used : ITypeTraits(org.apache.hyracks.api.dataflow.value.ITypeTraits) RecordDescriptor(org.apache.hyracks.api.dataflow.value.RecordDescriptor) ISerializerDeserializer(org.apache.hyracks.api.dataflow.value.ISerializerDeserializer) AsterixException(org.apache.asterix.common.exceptions.AsterixException) List(java.util.List) IAType(org.apache.asterix.om.types.IAType)

Aggregations

ITypeTraits (org.apache.hyracks.api.dataflow.value.ITypeTraits)67 IBinaryComparatorFactory (org.apache.hyracks.api.dataflow.value.IBinaryComparatorFactory)45 ISerializerDeserializer (org.apache.hyracks.api.dataflow.value.ISerializerDeserializer)34 Test (org.junit.Test)22 ArrayTupleBuilder (org.apache.hyracks.dataflow.common.comm.io.ArrayTupleBuilder)21 ArrayTupleReference (org.apache.hyracks.dataflow.common.comm.io.ArrayTupleReference)18 ITreeIndex (org.apache.hyracks.storage.am.common.api.ITreeIndex)17 HyracksDataException (org.apache.hyracks.api.exceptions.HyracksDataException)16 RecordDescriptor (org.apache.hyracks.api.dataflow.value.RecordDescriptor)15 IIndexAccessor (org.apache.hyracks.storage.common.IIndexAccessor)15 IAType (org.apache.asterix.om.types.IAType)14 UTF8StringSerializerDeserializer (org.apache.hyracks.dataflow.common.data.marshalling.UTF8StringSerializerDeserializer)11 IPrimitiveValueProviderFactory (org.apache.hyracks.storage.am.common.api.IPrimitiveValueProviderFactory)10 List (java.util.List)9 CompilationException (org.apache.asterix.common.exceptions.CompilationException)9 ARecordType (org.apache.asterix.om.types.ARecordType)8 ITypeTraitProvider (org.apache.hyracks.algebricks.data.ITypeTraitProvider)8 IStorageManager (org.apache.hyracks.storage.common.IStorageManager)8 IFileSplitProvider (org.apache.hyracks.dataflow.std.file.IFileSplitProvider)7 IIndexDataflowHelperFactory (org.apache.hyracks.storage.am.common.dataflow.IIndexDataflowHelperFactory)7