use of org.apache.asterix.metadata.IDatasetDetails in project asterixdb by apache.
the class DatasetTupleTranslator method createDatasetFromARecord.
protected Dataset createDatasetFromARecord(ARecord datasetRecord) throws HyracksDataException {
String dataverseName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATAVERSENAME_FIELD_INDEX)).getStringValue();
String datasetName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETNAME_FIELD_INDEX)).getStringValue();
String typeName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATATYPENAME_FIELD_INDEX)).getStringValue();
String typeDataverseName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATATYPEDATAVERSENAME_FIELD_INDEX)).getStringValue();
DatasetType datasetType = DatasetType.valueOf(((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETTYPE_FIELD_INDEX)).getStringValue());
IDatasetDetails datasetDetails = null;
int datasetId = ((AInt32) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_DATASETID_FIELD_INDEX)).getIntegerValue();
int pendingOp = ((AInt32) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_PENDINGOP_FIELD_INDEX)).getIntegerValue();
String nodeGroupName = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_GROUPNAME_FIELD_INDEX)).getStringValue();
String compactionPolicy = ((AString) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_COMPACTION_POLICY_FIELD_INDEX)).getStringValue();
IACursor cursor = ((AOrderedList) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_COMPACTION_POLICY_PROPERTIES_FIELD_INDEX)).getCursor();
Map<String, String> compactionPolicyProperties = new LinkedHashMap<>();
String key;
String value;
while (cursor.next()) {
ARecord field = (ARecord) cursor.get();
key = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_NAME_FIELD_INDEX)).getStringValue();
value = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_VALUE_FIELD_INDEX)).getStringValue();
compactionPolicyProperties.put(key, value);
}
switch(datasetType) {
case INTERNAL:
{
ARecord datasetDetailsRecord = (ARecord) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_INTERNALDETAILS_FIELD_INDEX);
FileStructure fileStructure = FileStructure.valueOf(((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_FILESTRUCTURE_FIELD_INDEX)).getStringValue());
PartitioningStrategy partitioningStrategy = PartitioningStrategy.valueOf(((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_PARTITIONSTRATEGY_FIELD_INDEX)).getStringValue());
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_PARTITIONKEY_FIELD_INDEX)).getCursor();
List<List<String>> partitioningKey = new ArrayList<>();
List<IAType> partitioningKeyType = new ArrayList<>();
AOrderedList fieldNameList;
while (cursor.next()) {
fieldNameList = (AOrderedList) cursor.get();
IACursor nestedFieldNameCursor = (fieldNameList.getCursor());
List<String> nestedFieldName = new ArrayList<>();
while (nestedFieldNameCursor.next()) {
nestedFieldName.add(((AString) nestedFieldNameCursor.get()).getStringValue());
}
partitioningKey.add(nestedFieldName);
partitioningKeyType.add(BuiltinType.ASTRING);
}
boolean autogenerated = ((ABoolean) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.INTERNAL_DETAILS_ARECORD_AUTOGENERATED_FIELD_INDEX)).getBoolean();
// Check if there is a filter field.
List<String> filterField = null;
int filterFieldPos = datasetDetailsRecord.getType().getFieldIndex(InternalDatasetDetails.FILTER_FIELD_NAME);
if (filterFieldPos >= 0) {
filterField = new ArrayList<>();
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(filterFieldPos)).getCursor();
while (cursor.next()) {
filterField.add(((AString) cursor.get()).getStringValue());
}
}
// Read a field-source-indicator field.
List<Integer> keyFieldSourceIndicator = new ArrayList<>();
int keyFieldSourceIndicatorIndex = datasetDetailsRecord.getType().getFieldIndex(InternalDatasetDetails.KEY_FILD_SOURCE_INDICATOR_FIELD_NAME);
if (keyFieldSourceIndicatorIndex >= 0) {
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(keyFieldSourceIndicatorIndex)).getCursor();
while (cursor.next()) {
keyFieldSourceIndicator.add((int) ((AInt8) cursor.get()).getByteValue());
}
} else {
for (int index = 0; index < partitioningKey.size(); ++index) {
keyFieldSourceIndicator.add(0);
}
}
// Temporary dataset only lives in the compiler therefore the temp field is false.
// DatasetTupleTranslator always read from the metadata node, so the temp flag should be always false.
datasetDetails = new InternalDatasetDetails(fileStructure, partitioningStrategy, partitioningKey, partitioningKey, keyFieldSourceIndicator, partitioningKeyType, autogenerated, filterField, false);
break;
}
case EXTERNAL:
ARecord datasetDetailsRecord = (ARecord) datasetRecord.getValueByPos(MetadataRecordTypes.DATASET_ARECORD_EXTERNALDETAILS_FIELD_INDEX);
String adapter = ((AString) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_DATASOURCE_ADAPTER_FIELD_INDEX)).getStringValue();
cursor = ((AOrderedList) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_PROPERTIES_FIELD_INDEX)).getCursor();
Map<String, String> properties = new HashMap<>();
while (cursor.next()) {
ARecord field = (ARecord) cursor.get();
key = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_NAME_FIELD_INDEX)).getStringValue();
value = ((AString) field.getValueByPos(MetadataRecordTypes.PROPERTIES_VALUE_FIELD_INDEX)).getStringValue();
properties.put(key, value);
}
// Timestamp
Date timestamp = new Date((((ADateTime) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_LAST_REFRESH_TIME_FIELD_INDEX))).getChrononTime());
// State
TransactionState state = TransactionState.values()[((AInt32) datasetDetailsRecord.getValueByPos(MetadataRecordTypes.EXTERNAL_DETAILS_ARECORD_TRANSACTION_STATE_FIELD_INDEX)).getIntegerValue()];
datasetDetails = new ExternalDatasetDetails(adapter, properties, timestamp, state);
}
Map<String, String> hints = getDatasetHints(datasetRecord);
String metaTypeDataverseName = null;
String metaTypeName = null;
int metaTypeDataverseNameIndex = datasetRecord.getType().getFieldIndex(MetadataRecordTypes.FIELD_NAME_METADATA_DATAVERSE);
if (metaTypeDataverseNameIndex >= 0) {
metaTypeDataverseName = ((AString) datasetRecord.getValueByPos(metaTypeDataverseNameIndex)).getStringValue();
int metaTypeNameIndex = datasetRecord.getType().getFieldIndex(MetadataRecordTypes.FIELD_NAME_METATYPE_NAME);
metaTypeName = ((AString) datasetRecord.getValueByPos(metaTypeNameIndex)).getStringValue();
}
// Read the rebalance count if there is one.
int rebalanceCountIndex = datasetRecord.getType().getFieldIndex(REBALANCE_ID_FIELD_NAME);
long rebalanceCount = rebalanceCountIndex >= 0 ? ((AInt64) datasetRecord.getValueByPos(rebalanceCountIndex)).getLongValue() : 0;
return new Dataset(dataverseName, datasetName, typeDataverseName, typeName, metaTypeDataverseName, metaTypeName, nodeGroupName, compactionPolicy, compactionPolicyProperties, datasetDetails, hints, datasetType, datasetId, pendingOp, rebalanceCount);
}
use of org.apache.asterix.metadata.IDatasetDetails in project asterixdb by apache.
the class QueryTranslator method handleCreateDatasetStatement.
public void handleCreateDatasetStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws CompilationException, Exception {
MutableObject<ProgressState> progress = new MutableObject<>(ProgressState.NO_PROGRESS);
DatasetDecl dd = (DatasetDecl) stmt;
String dataverseName = getActiveDataverse(dd.getDataverse());
String datasetName = dd.getName().getValue();
DatasetType dsType = dd.getDatasetType();
String itemTypeDataverseName = getActiveDataverse(dd.getItemTypeDataverse());
String itemTypeName = dd.getItemTypeName().getValue();
String metaItemTypeDataverseName = getActiveDataverse(dd.getMetaItemTypeDataverse());
String metaItemTypeName = dd.getMetaItemTypeName().getValue();
Identifier ngNameId = dd.getNodegroupName();
String nodegroupName = ngNameId == null ? null : ngNameId.getValue();
String compactionPolicy = dd.getCompactionPolicy();
Map<String, String> compactionPolicyProperties = dd.getCompactionPolicyProperties();
boolean defaultCompactionPolicy = compactionPolicy == null;
boolean temp = dd.getDatasetDetailsDecl().isTemp();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
MetadataLockManager.INSTANCE.createDatasetBegin(metadataProvider.getLocks(), dataverseName, itemTypeDataverseName, itemTypeDataverseName + "." + itemTypeName, metaItemTypeDataverseName, metaItemTypeDataverseName + "." + metaItemTypeName, nodegroupName, compactionPolicy, dataverseName + "." + datasetName, defaultCompactionPolicy);
Dataset dataset = null;
try {
IDatasetDetails datasetDetails = null;
Dataset ds = metadataProvider.findDataset(dataverseName, datasetName);
if (ds != null) {
if (dd.getIfNotExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("A dataset with this name " + datasetName + " already exists.");
}
}
Datatype dt = MetadataManager.INSTANCE.getDatatype(metadataProvider.getMetadataTxnContext(), itemTypeDataverseName, itemTypeName);
if (dt == null) {
throw new AlgebricksException(": type " + itemTypeName + " could not be found.");
}
String ngName = ngNameId != null ? ngNameId.getValue() : configureNodegroupForDataset(appCtx, dd.getHints(), dataverseName, datasetName, metadataProvider);
if (compactionPolicy == null) {
compactionPolicy = GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME;
compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
} else {
validateCompactionPolicy(compactionPolicy, compactionPolicyProperties, mdTxnCtx, false);
}
switch(dd.getDatasetType()) {
case INTERNAL:
IAType itemType = dt.getDatatype();
if (itemType.getTypeTag() != ATypeTag.OBJECT) {
throw new AlgebricksException("Dataset type has to be a record type.");
}
IAType metaItemType = null;
if (metaItemTypeDataverseName != null && metaItemTypeName != null) {
metaItemType = metadataProvider.findType(metaItemTypeDataverseName, metaItemTypeName);
}
if (metaItemType != null && metaItemType.getTypeTag() != ATypeTag.OBJECT) {
throw new AlgebricksException("Dataset meta type has to be a record type.");
}
ARecordType metaRecType = (ARecordType) metaItemType;
List<List<String>> partitioningExprs = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getPartitioningExprs();
List<Integer> keySourceIndicators = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getKeySourceIndicators();
boolean autogenerated = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).isAutogenerated();
ARecordType aRecordType = (ARecordType) itemType;
List<IAType> partitioningTypes = ValidateUtil.validatePartitioningExpressions(aRecordType, metaRecType, partitioningExprs, keySourceIndicators, autogenerated);
List<String> filterField = ((InternalDetailsDecl) dd.getDatasetDetailsDecl()).getFilterField();
if (filterField != null) {
ValidateUtil.validateFilterField(aRecordType, filterField);
}
if (compactionPolicy == null && filterField != null) {
// If the dataset has a filter and the user didn't specify a merge
// policy, then we will pick the
// correlated-prefix as the default merge policy.
compactionPolicy = GlobalConfig.DEFAULT_FILTERED_DATASET_COMPACTION_POLICY_NAME;
compactionPolicyProperties = GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES;
}
datasetDetails = new InternalDatasetDetails(InternalDatasetDetails.FileStructure.BTREE, InternalDatasetDetails.PartitioningStrategy.HASH, partitioningExprs, partitioningExprs, keySourceIndicators, partitioningTypes, autogenerated, filterField, temp);
break;
case EXTERNAL:
String adapter = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getAdapter();
Map<String, String> properties = ((ExternalDetailsDecl) dd.getDatasetDetailsDecl()).getProperties();
datasetDetails = new ExternalDatasetDetails(adapter, properties, new Date(), TransactionState.COMMIT);
break;
default:
throw new CompilationException("Unknown datatype " + dd.getDatasetType());
}
// #. initialize DatasetIdFactory if it is not initialized.
if (!DatasetIdFactory.isInitialized()) {
DatasetIdFactory.initialize(MetadataManager.INSTANCE.getMostRecentDatasetId());
}
// #. add a new dataset with PendingAddOp
dataset = new Dataset(dataverseName, datasetName, itemTypeDataverseName, itemTypeName, metaItemTypeDataverseName, metaItemTypeName, ngName, compactionPolicy, compactionPolicyProperties, datasetDetails, dd.getHints(), dsType, DatasetIdFactory.generateDatasetId(), MetadataUtil.PENDING_ADD_OP);
MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);
if (dd.getDatasetType() == DatasetType.INTERNAL) {
JobSpecification jobSpec = DatasetUtil.createDatasetJobSpec(dataset, metadataProvider);
// #. make metadataTxn commit before calling runJob.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
progress.setValue(ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA);
// #. runJob
JobUtils.runJob(hcc, jobSpec, true);
// #. begin new metadataTxn
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
}
// #. add a new dataset with PendingNoOp after deleting the dataset with PendingAddOp
MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
dataset.setPendingOp(MetadataUtil.PENDING_NO_OP);
MetadataManager.INSTANCE.addDataset(metadataProvider.getMetadataTxnContext(), dataset);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
if (bActiveTxn) {
abort(e, e, mdTxnCtx);
}
if (progress.getValue() == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
// #. execute compensation operations
// remove the index in NC
// [Notice]
// As long as we updated(and committed) metadata, we should remove any effect of the job
// because an exception occurs during runJob.
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
try {
JobSpecification jobSpec = DatasetUtil.dropDatasetJobSpec(dataset, metadataProvider);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
JobUtils.runJob(hcc, jobSpec, true);
} catch (Exception e2) {
e.addSuppressed(e2);
if (bActiveTxn) {
abort(e, e2, mdTxnCtx);
}
}
// remove the record from the metadata.
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
try {
MetadataManager.INSTANCE.dropDataset(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName);
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx);
throw new IllegalStateException("System is inconsistent state: pending dataset(" + dataverseName + "." + datasetName + ") couldn't be removed from the metadata", e);
}
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
}
}
use of org.apache.asterix.metadata.IDatasetDetails in project asterixdb by apache.
the class MetadataBootstrap method insertMetadataDatasets.
/**
* Inserts a metadata dataset to the physical dataset index
* Should be performed on a bootstrap of a new universe
*
* @param mdTxnCtx
* @param indexes
* @throws MetadataException
*/
public static void insertMetadataDatasets(MetadataTransactionContext mdTxnCtx, IMetadataIndex[] indexes) throws MetadataException {
for (int i = 0; i < indexes.length; i++) {
IDatasetDetails id = new InternalDatasetDetails(FileStructure.BTREE, PartitioningStrategy.HASH, indexes[i].getPartitioningExpr(), indexes[i].getPartitioningExpr(), null, indexes[i].getPartitioningExprType(), false, null, false);
MetadataManager.INSTANCE.addDataset(mdTxnCtx, new Dataset(indexes[i].getDataverseName(), indexes[i].getIndexedDatasetName(), indexes[i].getDataverseName(), indexes[i].getPayloadRecordType().getTypeName(), indexes[i].getNodeGroupName(), GlobalConfig.DEFAULT_COMPACTION_POLICY_NAME, GlobalConfig.DEFAULT_COMPACTION_POLICY_PROPERTIES, id, new HashMap<String, String>(), DatasetType.INTERNAL, indexes[i].getDatasetId().getId(), MetadataUtil.PENDING_NO_OP));
}
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Finished inserting initial datasets.");
}
}
Aggregations