use of org.apache.asterix.active.ActiveLifecycleListener in project asterixdb by apache.
the class FeedEventsListener method finish.
private void finish() throws Exception {
IHyracksClientConnection hcc = appCtx.getHcc();
JobStatus status = hcc.getJobStatus(jobId);
state = status.equals(JobStatus.FAILURE) ? ActivityState.FAILED : ActivityState.STOPPED;
ActiveLifecycleListener activeLcListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
activeLcListener.getNotificationHandler().removeListener(this);
}
use of org.apache.asterix.active.ActiveLifecycleListener in project asterixdb by apache.
the class ActivePartitionMessage method handle.
@Override
public void handle(ICcApplicationContext appCtx) throws HyracksDataException, InterruptedException {
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
activeListener.receive(this);
}
use of org.apache.asterix.active.ActiveLifecycleListener in project asterixdb by apache.
the class CCApplication method start.
@Override
public void start(IServiceContext serviceCtx, String[] args) throws Exception {
if (args.length > 0) {
throw new IllegalArgumentException("Unrecognized argument(s): " + Arrays.toString(args));
}
final ClusterControllerService controllerService = (ClusterControllerService) serviceCtx.getControllerService();
this.ccServiceCtx = (ICCServiceContext) serviceCtx;
ccServiceCtx.setMessageBroker(new CCMessageBroker(controllerService));
configureLoggingLevel(ccServiceCtx.getAppConfig().getLoggingLevel(ExternalProperties.Option.LOG_LEVEL));
if (LOGGER.isLoggable(Level.INFO)) {
LOGGER.info("Starting Asterix cluster controller");
}
ccServiceCtx.setThreadFactory(new AsterixThreadFactory(ccServiceCtx.getThreadFactory(), new LifeCycleComponentManager()));
ILibraryManager libraryManager = new ExternalLibraryManager();
ResourceIdManager resourceIdManager = new ResourceIdManager();
IReplicationStrategy repStrategy = ClusterProperties.INSTANCE.getReplicationStrategy();
IFaultToleranceStrategy ftStrategy = FaultToleranceStrategyFactory.create(ClusterProperties.INSTANCE.getCluster(), repStrategy, ccServiceCtx);
ExternalLibraryUtils.setUpExternaLibraries(libraryManager, false);
componentProvider = new StorageComponentProvider();
GlobalRecoveryManager.instantiate(ccServiceCtx, getHcc(), componentProvider);
statementExecutorCtx = new StatementExecutorContext();
appCtx = new CcApplicationContext(ccServiceCtx, getHcc(), libraryManager, resourceIdManager, () -> MetadataManager.INSTANCE, GlobalRecoveryManager.instance(), ftStrategy, new ActiveLifecycleListener(), componentProvider);
ClusterStateManager.INSTANCE.setCcAppCtx(appCtx);
ccExtensionManager = new CCExtensionManager(getExtensions());
appCtx.setExtensionManager(ccExtensionManager);
final CCConfig ccConfig = controllerService.getCCConfig();
if (System.getProperty("java.rmi.server.hostname") == null) {
System.setProperty("java.rmi.server.hostname", ccConfig.getClusterListenAddress());
}
MetadataProperties metadataProperties = appCtx.getMetadataProperties();
setAsterixStateProxy(AsterixStateProxy.registerRemoteObject(metadataProperties.getMetadataCallbackPort()));
ccServiceCtx.setDistributedState(proxy);
MetadataManager.initialize(proxy, metadataProperties);
ccServiceCtx.addJobLifecycleListener(appCtx.getActiveLifecycleListener());
// create event loop groups
webManager = new WebManager();
configureServers();
webManager.start();
ClusterManagerProvider.getClusterManager().registerSubscriber(GlobalRecoveryManager.instance());
ccServiceCtx.addClusterLifecycleListener(new ClusterLifecycleListener(appCtx));
jobCapacityController = new JobCapacityController(controllerService.getResourceManager());
}
use of org.apache.asterix.active.ActiveLifecycleListener in project asterixdb by apache.
the class QueryTranslator method handleIndexDropStatement.
protected void handleIndexDropStatement(MetadataProvider metadataProvider, Statement stmt, IHyracksClientConnection hcc) throws Exception {
IndexDropStatement stmtIndexDrop = (IndexDropStatement) stmt;
String datasetName = stmtIndexDrop.getDatasetName().getValue();
String dataverseName = getActiveDataverse(stmtIndexDrop.getDataverseName());
ProgressState progress = ProgressState.NO_PROGRESS;
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
boolean bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
List<JobSpecification> jobsToExecute = new ArrayList<>();
MetadataLockManager.INSTANCE.dropIndexBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + datasetName);
String indexName = null;
// For external index
boolean dropFilesIndex = false;
try {
Dataset ds = metadataProvider.findDataset(dataverseName, datasetName);
if (ds == null) {
throw new AlgebricksException("There is no dataset with this name " + datasetName + " in dataverse " + dataverseName);
}
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
IActiveEntityEventsListener[] listeners = activeEventHandler.getEventListeners();
StringBuilder builder = null;
for (IActiveEntityEventsListener listener : listeners) {
if (listener.isEntityUsingDataset(ds)) {
if (builder == null) {
builder = new StringBuilder();
}
builder.append(new FeedConnectionId(listener.getEntityId(), datasetName) + "\n");
}
}
if (builder != null) {
throw new CompilationException("Dataset" + datasetName + " is currently being fed into by the following active entities: " + builder.toString());
}
if (ds.getDatasetType() == DatasetType.INTERNAL) {
indexName = stmtIndexDrop.getIndexName().getValue();
Index index = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataverseName, datasetName, indexName);
if (index == null) {
if (stmtIndexDrop.getIfExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("There is no index with this name " + indexName + ".");
}
}
// #. prepare a job to drop the index in NC.
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(index, metadataProvider, ds));
// #. mark PendingDropOp on the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
MetadataManager.INSTANCE.addIndex(mdTxnCtx, new Index(dataverseName, datasetName, indexName, index.getIndexType(), index.getKeyFieldNames(), index.getKeyFieldSourceIndicators(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index.isPrimaryIndex(), MetadataUtil.PENDING_DROP_OP));
// #. commit the existing transaction before calling runJob.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
// #. begin a new transaction
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// #. finally, delete the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
} else {
// External dataset
indexName = stmtIndexDrop.getIndexName().getValue();
Index index = MetadataManager.INSTANCE.getIndex(mdTxnCtx, dataverseName, datasetName, indexName);
if (index == null) {
if (stmtIndexDrop.getIfExists()) {
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
return;
} else {
throw new AlgebricksException("There is no index with this name " + indexName + ".");
}
} else if (ExternalIndexingOperations.isFileIndex(index)) {
throw new AlgebricksException("Dropping a dataset's files index is not allowed.");
}
// #. prepare a job to drop the index in NC.
jobsToExecute.add(IndexUtil.buildDropIndexJobSpec(index, metadataProvider, ds));
List<Index> datasetIndexes = MetadataManager.INSTANCE.getDatasetIndexes(mdTxnCtx, dataverseName, datasetName);
if (datasetIndexes.size() == 2) {
dropFilesIndex = true;
// only one index + the files index, we need to delete both of the indexes
for (Index externalIndex : datasetIndexes) {
if (ExternalIndexingOperations.isFileIndex(externalIndex)) {
jobsToExecute.add(ExternalIndexingOperations.buildDropFilesIndexJobSpec(metadataProvider, ds));
// #. mark PendingDropOp on the existing files index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, externalIndex.getIndexName());
MetadataManager.INSTANCE.addIndex(mdTxnCtx, new Index(dataverseName, datasetName, externalIndex.getIndexName(), externalIndex.getIndexType(), externalIndex.getKeyFieldNames(), externalIndex.getKeyFieldSourceIndicators(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), externalIndex.isPrimaryIndex(), MetadataUtil.PENDING_DROP_OP));
}
}
}
// #. mark PendingDropOp on the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
MetadataManager.INSTANCE.addIndex(mdTxnCtx, new Index(dataverseName, datasetName, indexName, index.getIndexType(), index.getKeyFieldNames(), index.getKeyFieldSourceIndicators(), index.getKeyFieldTypes(), index.isEnforcingKeyFileds(), index.isPrimaryIndex(), MetadataUtil.PENDING_DROP_OP));
// #. commit the existing transaction before calling runJob.
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
bActiveTxn = false;
progress = ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA;
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
// #. begin a new transaction
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
bActiveTxn = true;
metadataProvider.setMetadataTxnContext(mdTxnCtx);
// #. finally, delete the existing index
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, indexName);
if (dropFilesIndex) {
// delete the files index too
MetadataManager.INSTANCE.dropIndex(mdTxnCtx, dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName));
MetadataManager.INSTANCE.dropDatasetExternalFiles(mdTxnCtx, ds);
ExternalDatasetsRegistry.INSTANCE.removeDatasetInfo(ds);
}
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
if (bActiveTxn) {
abort(e, e, mdTxnCtx);
}
if (progress == ProgressState.ADDED_PENDINGOP_RECORD_TO_METADATA) {
// remove the all indexes in NC
try {
for (JobSpecification jobSpec : jobsToExecute) {
JobUtils.runJob(hcc, jobSpec, true);
}
} catch (Exception e2) {
// do no throw exception since still the metadata needs to be compensated.
e.addSuppressed(e2);
}
// remove the record from the metadata.
mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
try {
MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, indexName);
if (dropFilesIndex) {
MetadataManager.INSTANCE.dropIndex(metadataProvider.getMetadataTxnContext(), dataverseName, datasetName, IndexingConstants.getFilesIndexName(datasetName));
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e2) {
e.addSuppressed(e2);
abort(e, e2, mdTxnCtx);
throw new IllegalStateException("System is inconsistent state: pending index(" + dataverseName + "." + datasetName + "." + indexName + ") couldn't be removed from the metadata", e);
}
}
throw e;
} finally {
metadataProvider.getLocks().unlock();
ExternalDatasetsRegistry.INSTANCE.releaseAcquiredLocks(metadataProvider);
}
}
use of org.apache.asterix.active.ActiveLifecycleListener in project asterixdb by apache.
the class QueryTranslator method handleDisconnectFeedStatement.
protected void handleDisconnectFeedStatement(MetadataProvider metadataProvider, Statement stmt) throws Exception {
DisconnectFeedStatement cfs = (DisconnectFeedStatement) stmt;
String dataverseName = getActiveDataverse(cfs.getDataverseName());
String datasetName = cfs.getDatasetName().getValue();
String feedName = cfs.getFeedName().getValue();
MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction();
metadataProvider.setMetadataTxnContext(mdTxnCtx);
ActiveLifecycleListener activeListener = (ActiveLifecycleListener) appCtx.getActiveLifecycleListener();
ActiveJobNotificationHandler activeEventHandler = activeListener.getNotificationHandler();
// Check whether feed is alive
if (activeEventHandler.getActiveEntityListener(new EntityId(Feed.EXTENSION_NAME, dataverseName, feedName)) != null) {
throw new CompilationException(ErrorCode.FEED_CHANGE_FEED_CONNECTIVITY_ON_ALIVE_FEED, feedName);
}
MetadataLockManager.INSTANCE.disconnectFeedBegin(metadataProvider.getLocks(), dataverseName, dataverseName + "." + datasetName, dataverseName + "." + cfs.getFeedName());
try {
FeedMetadataUtil.validateIfDatasetExists(metadataProvider, dataverseName, cfs.getDatasetName().getValue(), mdTxnCtx);
FeedMetadataUtil.validateIfFeedExists(dataverseName, cfs.getFeedName().getValue(), mdTxnCtx);
FeedConnection fc = MetadataManager.INSTANCE.getFeedConnection(metadataProvider.getMetadataTxnContext(), dataverseName, feedName, datasetName);
if (fc == null) {
throw new CompilationException("Feed " + feedName + " is currently not connected to " + cfs.getDatasetName().getValue() + ". Invalid operation!");
}
MetadataManager.INSTANCE.dropFeedConnection(mdTxnCtx, dataverseName, feedName, datasetName);
for (FunctionSignature functionSignature : fc.getAppliedFunctions()) {
Function function = MetadataManager.INSTANCE.getFunction(mdTxnCtx, functionSignature);
function.dereference();
MetadataManager.INSTANCE.updateFunction(mdTxnCtx, function);
}
MetadataManager.INSTANCE.commitTransaction(mdTxnCtx);
} catch (Exception e) {
abort(e, e, mdTxnCtx);
throw e;
} finally {
metadataProvider.getLocks().unlock();
}
}
Aggregations