use of org.apache.asterix.metadata.entities.ExternalDatasetDetails in project asterixdb by apache.
the class QueryTranslator method handleExternalDatasetRefreshStatement.
protected void handleExternalDatasetRefreshStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
RefreshExternalDatasetStatement stmtRefresh = (RefreshExternalDatasetStatement) stmt;
String dataverseName = getActiveDataverse(stmtRefresh.getDataverseName());
String datasetName = stmtRefresh.getDatasetName().getValue();
TransactionState transactionState = TransactionState.COMMIT;
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
JobSpecification spec = null;
Dataset ds = null;
List<ExternalFile> metadataFiles = null;
List<ExternalFile> deletedFiles = null;
List<ExternalFile> addedFiles = null;
List<ExternalFile> appendedFiles = null;
List<Index> indexes = null;
Dataset transactionDataset = null;
boolean lockAquired = false;
boolean success = false;
MetadataLockManager.INSTANCE.refreshDatasetBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + datasetName);
try {
ds = metadataProvider.findDataset(dataverseName, datasetName);
// Dataset exists ?
if (ds == null) {
throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
}
// Dataset external ?
if (ds.getDatasetType() != DatasetType.EXTERNAL) {
throw new AlgebricksException("dataset " + datasetName + " in dataverse " + dataverseName + " is not an external dataset");
}
// Dataset has indexes ?
indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
if (indexes.isEmpty()) {
throw new AlgebricksException("External dataset " + datasetName + " in dataverse " + dataverseName + " doesn't have any index");
}
// Record transaction time
Date txnTime = new Date();
// refresh lock here
ExternalDatasetsRegistry.INSTANCE.refreshBegin(ds);
lockAquired = true;
// Get internal files
metadataFiles = MetadataManager.INSTANCE.getDatasetExternalFiles(mdTxnCtx, ds);
deletedFiles = new ArrayList<>();
addedFiles = new ArrayList<>();
appendedFiles = new ArrayList<>();
// Now we compare snapshot with external file system
if (ExternalIndexingOperations.isDatasetUptodate(ds, metadataFiles, addedFiles, deletedFiles, appendedFiles)) {
((ExternalDatasetDetails) ds.getDatasetDetails()).setRefreshTimestamp(txnTime);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
// latch will be released in the finally clause
return;
}
// At this point, we know data has changed in the external file system, record
// transaction in metadata and start
transactionDataset = ExternalIndexingOperations.createTransactionDataset(ds);
/*
* Remove old dataset record and replace it with a new one
*/
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);
// Add delta files to the metadata
for (ExternalFile file : addedFiles) {
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
}
for (ExternalFile file : appendedFiles) {
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
}
for (ExternalFile file : deletedFiles) {
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
}
// Create the files index update job
spec = ExternalIndexingOperations.buildFilesIndexUpdateOp(ds, metadataFiles, addedFiles, appendedFiles, metadataProvider);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
transactionState = TransactionState.BEGIN;
// run the files update job
JobUtils.runJob(hcc, spec, true);
for (Index index : indexes) {
if (!ExternalIndexingOperations.isFileIndex(index)) {
spec = ExternalIndexingOperations.buildIndexUpdateOp(ds, index, metadataFiles, addedFiles, appendedFiles, metadataProvider);
// run the files update job
JobUtils.runJob(hcc, spec, true);
}
}
// all index updates has completed successfully, record transaction state
spec = ExternalIndexingOperations.buildCommitJob(ds, indexes, metadataProvider);
// Aquire write latch again -> start a transaction and record the decision to commit
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
bActiveTxn = true;
((ExternalDatasetDetails) transactionDataset.getDatasetDetails()).setState(TransactionState.READY_TO_COMMIT);
((ExternalDatasetDetails) transactionDataset.getDatasetDetails()).setRefreshTimestamp(txnTime);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
transactionState = TransactionState.READY_TO_COMMIT;
// We don't release the latch since this job is expected to be quick
JobUtils.runJob(hcc, spec, true);
// Start a new metadata transaction to record the final state of the transaction
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
bActiveTxn = true;
for (ExternalFile file : metadataFiles) {
if (file.getPendingOp() == ExternalFilePendingOp.DROP_OP) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
} else if (file.getPendingOp() == ExternalFilePendingOp.NO_OP) {
Iterator<ExternalFile> iterator = appendedFiles.iterator();
while (iterator.hasNext()) {
ExternalFile appendedFile = iterator.next();
if (file.getFileName().equals(appendedFile.getFileName())) {
// delete existing file
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
// delete existing appended file
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, appendedFile);
// add the original file with appended information
appendedFile.setFileNumber(file.getFileNumber());
appendedFile.setPendingOp(ExternalFilePendingOp.NO_OP);
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, appendedFile);
iterator.remove();
}
}
}
}
// remove the deleted files delta
for (ExternalFile file : deletedFiles) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
}
// insert new files
for (ExternalFile file : addedFiles) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
file.setPendingOp(ExternalFilePendingOp.NO_OP);
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
}
// mark the transaction as complete
((ExternalDatasetDetails) transactionDataset.getDatasetDetails()).setState(TransactionState.COMMIT);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, transactionDataset);
// commit metadata transaction
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
success = true;
} catch (Exception e) {
if (bActiveTxn) {
abort(e, e, mdTxnCtx);
}
if (transactionState == TransactionState.READY_TO_COMMIT) {
throw new IllegalStateException("System is inconsistent state: commit of (" + dataverseName + "." + datasetName + ") refresh couldn't carry out the commit phase", e);
}
if (transactionState == TransactionState.COMMIT) {
// Nothing to do , everything should be clean
throw e;
}
if (transactionState == TransactionState.BEGIN) {
// transaction failed, need to do the following
// clean NCs removing transaction components
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
spec = ExternalIndexingOperations.buildAbortOp(ds, indexes, metadataProvider);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
try {
JobUtils.runJob(hcc, spec, true);
} catch (Exception e2) {
// This should never happen -- fix throw illegal
e.addSuppressed(e2);
throw new IllegalStateException("System is in inconsistent state. Failed to abort refresh", e);
}
// return the state of the dataset to committed
try {
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
for (ExternalFile file : deletedFiles) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
}
for (ExternalFile file : addedFiles) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
}
for (ExternalFile file : appendedFiles) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
}
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, ds);
// commit metadata transaction
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e2) {
abort(e, e2, mdTxnCtx);
e.addSuppressed(e2);
throw new IllegalStateException("System is in inconsistent state. Failed to drop delta files", e);
}
}
} finally {
if (lockAquired) {
ExternalDatasetsRegistry.INSTANCE.refreshEnd(ds, success);
}
metadataProvider.getLocks().unlock();
}
}
use of org.apache.asterix.metadata.entities.ExternalDatasetDetails in project asterixdb by apache.
the class GlobalRecoveryManager method recoverDataset.
private MetadataTransactionContext recoverDataset(ICcApplicationContext appCtx, MetadataTransactionContext mdTxnCtx, Dataverse dataverse) throws Exception {
if (!dataverse.getDataverseName().equals(MetadataConstants.METADATA_DATAVERSE_NAME)) {
MetadataProvider metadataProvider = new MetadataProvider(appCtx, dataverse, componentProvider);
try {
List<Dataset> datasets = MetadataManager.INSTANCE.getDataverseDatasets(mdTxnCtx, dataverse.getDataverseName());
for (Dataset dataset : datasets) {
if (dataset.getDatasetType() == DatasetType.EXTERNAL) {
// External dataset
// Get indexes
List<Index> indexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName());
// Get the state of the dataset
ExternalDatasetDetails dsd = (ExternalDatasetDetails) dataset.getDatasetDetails();
TransactionState datasetState = dsd.getState();
if (!indexes.isEmpty()) {
if (datasetState == TransactionState.BEGIN) {
List<ExternalFile> files = MetadataManager.INSTANCE.getDatasetExternalFiles(mdTxnCtx, dataset);
// 1. delete all pending files
for (ExternalFile file : files) {
if (file.getPendingOp() != ExternalFilePendingOp.NO_OP) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
}
}
}
// 2. clean artifacts in NCs
metadataProvider.setMetadataTxnContext(mdTxnCtx);
JobSpecification jobSpec = ExternalIndexingOperations.buildAbortOp(dataset, indexes, metadataProvider);
executeHyracksJob(jobSpec);
// 3. correct the dataset state
((ExternalDatasetDetails) dataset.getDatasetDetails()).setState(TransactionState.COMMIT);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, dataset);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
} else if (datasetState == TransactionState.READY_TO_COMMIT) {
List<ExternalFile> files = MetadataManager.INSTANCE.getDatasetExternalFiles(mdTxnCtx, dataset);
// if ready to commit, roll forward
// 1. commit indexes in NCs
metadataProvider.setMetadataTxnContext(mdTxnCtx);
JobSpecification jobSpec = ExternalIndexingOperations.buildRecoverOp(dataset, indexes, metadataProvider);
executeHyracksJob(jobSpec);
// 2. add pending files in metadata
for (ExternalFile file : files) {
if (file.getPendingOp() == ExternalFilePendingOp.ADD_OP) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
file.setPendingOp(ExternalFilePendingOp.NO_OP);
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, file);
} else if (file.getPendingOp() == ExternalFilePendingOp.DROP_OP) {
// find original file
for (ExternalFile originalFile : files) {
if (originalFile.getFileName().equals(file.getFileName())) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, originalFile);
break;
}
}
} else if (file.getPendingOp() == ExternalFilePendingOp.APPEND_OP) {
// find original file
for (ExternalFile originalFile : files) {
if (originalFile.getFileName().equals(file.getFileName())) {
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, file);
MetadataManager.INSTANCE.dropExternalFile(mdTxnCtx, originalFile);
originalFile.setSize(file.getSize());
MetadataManager.INSTANCE.addExternalFile(mdTxnCtx, originalFile);
}
}
}
// 3. correct the dataset state
((ExternalDatasetDetails) dataset.getDatasetDetails()).setState(TransactionState.COMMIT);
MetadataManager.INSTANCE.updateDataset(mdTxnCtx, dataset);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
}
}
}
}
} finally {
metadataProvider.getLocks().unlock();
}
}
return mdTxnCtx;
}
use of org.apache.asterix.metadata.entities.ExternalDatasetDetails in project asterixdb by apache.
the class MetadataProvider method buildExternalDataLookupRuntime.
public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildExternalDataLookupRuntime(JobSpecification jobSpec, Dataset dataset, int[] ridIndexes, boolean retainInput, IVariableTypeEnvironment typeEnv, IOperatorSchema opSchema, JobGenContext context, MetadataProvider metadataProvider, boolean retainMissing) throws AlgebricksException {
try {
// Get data type
ARecordType itemType = (ARecordType) MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(), dataset.getDataverseName(), dataset.getItemTypeName()).getDatatype();
ExternalDatasetDetails datasetDetails = (ExternalDatasetDetails) dataset.getDatasetDetails();
LookupAdapterFactory<?> adapterFactory = AdapterFactoryProvider.getLookupAdapterFactory(getApplicationContext().getServiceContext(), datasetDetails.getProperties(), itemType, ridIndexes, retainInput, retainMissing, context.getMissingWriterFactory());
String fileIndexName = IndexingConstants.getFilesIndexName(dataset.getDatasetName());
Pair<IFileSplitProvider, AlgebricksPartitionConstraint> spPc = metadataProvider.getSplitProviderAndConstraints(dataset, fileIndexName);
Index fileIndex = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataset.getDataverseName(), dataset.getDatasetName(), fileIndexName);
// Create the file index data flow helper
IIndexDataflowHelperFactory indexDataflowHelperFactory = new IndexDataflowHelperFactory(storaegComponentProvider.getStorageManager(), spPc.first);
// Create the out record descriptor, appContext and fileSplitProvider for the files index
RecordDescriptor outRecDesc = JobGenHelper.mkRecordDescriptor(typeEnv, opSchema, context);
ISearchOperationCallbackFactory searchOpCallbackFactory = dataset.getSearchCallbackFactory(storaegComponentProvider, fileIndex, jobId, IndexOperation.SEARCH, null);
// Create the operator
ExternalLookupOperatorDescriptor op = new ExternalLookupOperatorDescriptor(jobSpec, adapterFactory, outRecDesc, indexDataflowHelperFactory, searchOpCallbackFactory, ExternalDatasetsRegistry.INSTANCE.getAndLockDatasetVersion(dataset, this));
return new Pair<>(op, spPc.second);
} catch (Exception e) {
throw new AlgebricksException(e);
}
}
use of org.apache.asterix.metadata.entities.ExternalDatasetDetails in project asterixdb by apache.
the class DatasetTupleTranslator method createDatasetFromARecord.
protected Dataset createDatasetFromARecord(ARecord datasetRecord) throws HyracksDataException {
String dataverseName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATAVERSENAME_FIELD_INDEX)).getStringValue();
String datasetName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETNAME_FIELD_INDEX)).getStringValue();
String typeName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATATYPENAME_FIELD_INDEX)).getStringValue();
String typeDataverseName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATATYPEDATAVERSENAME_FIELD_INDEX)).getStringValue();
DatasetType datasetType = DatasetType.valueOf(((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETTYPE_FIELD_INDEX)).getStringValue());
IDatasetDetails datasetDetails = null;
int datasetId = ((AInt32) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETID_FIELD_INDEX)).getIntegerValue();
int pendingOp = ((AInt32) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_PENDINGOP_FIELD_INDEX)).getIntegerValue();
String nodeGroupName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_GROUPNAME_FIELD_INDEX)).getStringValue();
String compactionPolicy = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_COMPACTION_POLICY_FIELD_INDEX)).getStringValue();
IACursor cursor = ((AOrderedList) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_COMPACTION_POLICY_PROPERTIES_FIELD_INDEX)).getCursor();
Map<String, String> compactionPolicyProperties = new LinkedHashMap<>();
String key;
String value;
while (cursor.next()) {
ARecord field = (ARecord) cursor.get();
key = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_NAME_FIELD_INDEX)).getStringValue();
value = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_VALUE_FIELD_INDEX)).getStringValue();
compactionPolicyProperties.put(key, value);
}
switch(datasetType) {
case INTERNAL:
{
ARecord datasetDetailsRecord = (ARecord) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_INTERNALDETAILS_FIELD_INDEX);
FileStructure fileStructure = FileStructure.valueOf(((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_FILESTRUCTURE_FIELD_INDEX)).getStringValue());
PartitioningStrategy partitioningStrategy = PartitioningStrategy.valueOf(((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_PARTITIONSTRATEGY_FIELD_INDEX)).getStringValue());
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_PARTITIONKEY_FIELD_INDEX)).getCursor();
List<List<String>> partitioningKey = new ArrayList<>();
List<IAType> partitioningKeyType = new ArrayList<>();
AOrderedList fieldNameList;
while (cursor.next()) {
fieldNameList = (AOrderedList) cursor.get();
IACursor nestedFieldNameCursor = (fieldNameList.getCursor());
List<String> nestedFieldName = new ArrayList<>();
while (nestedFieldNameCursor.next()) {
nestedFieldName.add(((AString) nestedFieldNameCursor.get()).getStringValue());
}
partitioningKey.add(nestedFieldName);
partitioningKeyType.add(BuiltinType.ASTRING);
}
boolean autogenerated = ((ABoolean) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_AUTOGENERATED_FIELD_INDEX)).getBoolean();
// Check if there is a filter field.
List<String> filterField = null;
int filterFieldPos = datasetDetailsRecord.getType().getFieldIndex(InternalDatasetDetails.FILTER_FIELD_NAME);
if (filterFieldPos >= 0) {
filterField = new ArrayList<>();
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(filterFieldPos)).getCursor();
while (cursor.next()) {
filterField.add(((AString) cursor.get()).getStringValue());
}
}
// Read a field-source-indicator field.
List<Integer> keyFieldSourceIndicator = new ArrayList<>();
int keyFieldSourceIndicatorIndex = datasetDetailsRecord.getType().getFieldIndex(InternalDatasetDetails.KEY_FILD_SOURCE_INDICATOR_FIELD_NAME);
if (keyFieldSourceIndicatorIndex >= 0) {
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(keyFieldSourceIndicatorIndex)).getCursor();
while (cursor.next()) {
keyFieldSourceIndicator.add((int) ((AInt8) cursor.get()).getByteValue());
}
} else {
for (int index = 0; index < partitioningKey.size(); ++index) {
keyFieldSourceIndicator.add(0);
}
}
// Temporary dataset only lives in the compiler therefore the temp field is false.
// DatasetTupleTranslator always read from the metadata node, so the temp flag should be always false.
datasetDetails = new InternalDatasetDetails(fileStructure, partitioningStrategy, partitioningKey, partitioningKey, keyFieldSourceIndicator, partitioningKeyType, autogenerated, filterField, false);
break;
}
case EXTERNAL:
ARecord datasetDetailsRecord = (ARecord) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_EXTERNALDETAILS_FIELD_INDEX);
String adapter = ((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_DATASOURCE_ADAPTER_FIELD_INDEX)).getStringValue();
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_PROPERTIES_FIELD_INDEX)).getCursor();
Map<String, String> properties = new HashMap<>();
while (cursor.next()) {
ARecord field = (ARecord) cursor.get();
key = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_NAME_FIELD_INDEX)).getStringValue();
value = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_VALUE_FIELD_INDEX)).getStringValue();
properties.put(key, value);
}
// Timestamp
Date timestamp = new Date((((ADateTime) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_LAST_REFRESH_TIME_FIELD_INDEX))).getChrononTime());
// State
TransactionState state = TransactionState.values()[((AInt32) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_TRANSACTION_STATE_FIELD_INDEX)).getIntegerValue()];
datasetDetails = new ExternalDatasetDetails(adapter, properties, timestamp, state);
}
Map<String, String> hints = getDatasetHints(datasetRecord);
String metaTypeDataverseName = null;
String metaTypeName = null;
int metaTypeDataverseNameIndex = datasetRecord.getType().getFieldIndex(MetadataRecordTypes.FIELD_NAME_METADATA_DATAVERSE);
if (metaTypeDataverseNameIndex >= 0) {
metaTypeDataverseName = ((AString) datasetRecord.getValueByPos(metaTypeDataverseNameIndex)).getStringValue();
int metaTypeNameIndex = datasetRecord.getType().getFieldIndex(MetadataRecordTypes.FIELD_NAME_METATYPE_NAME);
metaTypeName = ((AString) datasetRecord.getValueByPos(metaTypeNameIndex)).getStringValue();
}
// Read the rebalance count if there is one.
int rebalanceCountIndex = datasetRecord.getType().getFieldIndex(REBALANCE_ID_FIELD_NAME);
long rebalanceCount = rebalanceCountIndex >= 0 ? ((AInt64) datasetRecord.getValueByPos(rebalanceCountIndex)).getLongValue() : 0;
return new Dataset(dataverseName, datasetName, typeDataverseName, typeName, metaTypeDataverseName, metaTypeName, nodeGroupName, compactionPolicy, compactionPolicyProperties, datasetDetails, hints, datasetType, datasetId, pendingOp, rebalanceCount);
}
use of org.apache.asterix.metadata.entities.ExternalDatasetDetails in project asterixdb by apache.
the class DatasetDataSource method buildDatasourceScanRuntime.
@Override
public Pair<IOperatorDescriptor, AlgebricksPartitionConstraint> buildDatasourceScanRuntime(MetadataProvider metadataProvider, IDataSource<DataSourceId> dataSource, List<LogicalVariable> scanVariables, List<LogicalVariable> projectVariables, boolean projectPushed, List<LogicalVariable> minFilterVars, List<LogicalVariable> maxFilterVars, IOperatorSchema opSchema, IVariableTypeEnvironment typeEnv, JobGenContext context, JobSpecification jobSpec, Object implConfig) throws AlgebricksException {
switch(dataset.getDatasetType()) {
case EXTERNAL:
Dataset externalDataset = ((DatasetDataSource) dataSource).getDataset();
String itemTypeName = externalDataset.getItemTypeName();
IAType itemType = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(), externalDataset.getItemTypeDataverseName(), itemTypeName).getDatatype();
ExternalDatasetDetails edd = (ExternalDatasetDetails) externalDataset.getDatasetDetails();
IAdapterFactory adapterFactory = metadataProvider.getConfiguredAdapterFactory(externalDataset, edd.getAdapter(), edd.getProperties(), (ARecordType) itemType, null);
return metadataProvider.buildExternalDatasetDataScannerRuntime(jobSpec, itemType, adapterFactory, NonTaggedDataFormat.INSTANCE);
case INTERNAL:
DataSourceId id = getId();
String dataverseName = id.getDataverseName();
String datasetName = id.getDatasourceName();
Index primaryIndex = MetadataManager.INSTANCE.getIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, datasetName);
int[] minFilterFieldIndexes = createFilterIndexes(minFilterVars, opSchema);
int[] maxFilterFieldIndexes = createFilterIndexes(maxFilterVars, opSchema);
return metadataProvider.buildBtreeRuntime(jobSpec, opSchema, typeEnv, context, true, false, ((DatasetDataSource) dataSource).getDataset(), primaryIndex.getIndexName(), null, null, true, true, minFilterFieldIndexes, maxFilterFieldIndexes);
default:
throw new AlgebricksException("Unknown datasource type");
}
}
Aggregations