use of org.apache.hyracks.control.cc.ClusterControllerService in project asterixdb by apache.
the class AsterixHyracksIntegrationUtil method init.
public void init(boolean deleteOldInstanceData) throws Exception {
final ICCApplication ccApplication = createCCApplication();
configManager = new ConfigManager();
ccApplication.registerConfig(configManager);
final CCConfig ccConfig = createCCConfig(configManager);
cc = new ClusterControllerService(ccConfig, ccApplication);
nodeNames = ccConfig.getConfigManager().getNodeNames();
if (deleteOldInstanceData) {
deleteTransactionLogs();
removeTestStorageFiles();
}
final List<NodeControllerService> nodeControllers = new ArrayList<>();
for (String nodeId : nodeNames) {
// mark this NC as virtual in the CC's config manager, so he doesn't try to contact NCService...
configManager.set(nodeId, NCConfig.Option.VIRTUAL_NC, true);
final INCApplication ncApplication = createNCApplication();
ConfigManager ncConfigManager = new ConfigManager();
ncApplication.registerConfig(ncConfigManager);
nodeControllers.add(new NodeControllerService(fixupIODevices(createNCConfig(nodeId, ncConfigManager)), ncApplication));
}
;
cc.start();
// Starts ncs.
nodeNames = ccConfig.getConfigManager().getNodeNames();
List<Thread> startupThreads = new ArrayList<>();
for (NodeControllerService nc : nodeControllers) {
Thread ncStartThread = new Thread("IntegrationUtil-" + nc.getId()) {
@Override
public void run() {
try {
nc.start();
} catch (Exception e) {
LOGGER.log(Level.SEVERE, e.getMessage(), e);
}
}
};
ncStartThread.start();
startupThreads.add(ncStartThread);
}
//wait until all NCs complete their startup
for (Thread thread : startupThreads) {
thread.join();
}
// Wait until cluster becomes active
ClusterStateManager.INSTANCE.waitForState(ClusterState.ACTIVE);
hcc = new HyracksConnection(cc.getConfig().getClientListenAddress(), cc.getConfig().getClientListenPort());
this.ncs = nodeControllers.toArray(new NodeControllerService[nodeControllers.size()]);
}
use of org.apache.hyracks.control.cc.ClusterControllerService in project asterixdb by apache.
the class ExecuteStatementRequestMessage method handle.
@Override
public void handle(ICcApplicationContext ccAppCtx) throws HyracksDataException, InterruptedException {
ICCServiceContext ccSrvContext = ccAppCtx.getServiceContext();
ClusterControllerService ccSrv = (ClusterControllerService) ccSrvContext.getControllerService();
CCApplication ccApp = (CCApplication) ccSrv.getApplication();
CCMessageBroker messageBroker = (CCMessageBroker) ccSrvContext.getMessageBroker();
CCExtensionManager ccExtMgr = (CCExtensionManager) ccAppCtx.getExtensionManager();
ILangCompilationProvider compilationProvider = ccExtMgr.getCompilationProvider(lang);
IStorageComponentProvider storageComponentProvider = ccAppCtx.getStorageComponentProvider();
IStatementExecutorFactory statementExecutorFactory = ccApp.getStatementExecutorFactory();
IStatementExecutorContext statementExecutorContext = ccApp.getStatementExecutorContext();
ccSrv.getExecutor().submit(() -> {
ExecuteStatementResponseMessage responseMsg = new ExecuteStatementResponseMessage(requestMessageId);
try {
final IClusterManagementWork.ClusterState clusterState = ClusterStateManager.INSTANCE.getState();
if (clusterState != IClusterManagementWork.ClusterState.ACTIVE) {
throw new IllegalStateException("Cannot execute request, cluster is " + clusterState);
}
IParser parser = compilationProvider.getParserFactory().createParser(statementsText);
List<Statement> statements = parser.parse();
StringWriter outWriter = new StringWriter(256);
PrintWriter outPrinter = new PrintWriter(outWriter);
SessionOutput.ResultDecorator resultPrefix = ResultUtil.createPreResultDecorator();
SessionOutput.ResultDecorator resultPostfix = ResultUtil.createPostResultDecorator();
SessionOutput.ResultAppender appendHandle = ResultUtil.createResultHandleAppender(handleUrl);
SessionOutput.ResultAppender appendStatus = ResultUtil.createResultStatusAppender();
SessionOutput sessionOutput = new SessionOutput(sessionConfig, outPrinter, resultPrefix, resultPostfix, appendHandle, appendStatus);
IStatementExecutor.ResultMetadata outMetadata = new IStatementExecutor.ResultMetadata();
MetadataManager.INSTANCE.init();
IStatementExecutor translator = statementExecutorFactory.create(ccAppCtx, statements, sessionOutput, compilationProvider, storageComponentProvider);
translator.compileAndExecute(ccAppCtx.getHcc(), null, delivery, outMetadata, new IStatementExecutor.Stats(), clientContextID, statementExecutorContext);
outPrinter.close();
responseMsg.setResult(outWriter.toString());
responseMsg.setMetadata(outMetadata);
} catch (AlgebricksException | HyracksException | TokenMgrError | org.apache.asterix.aqlplus.parser.TokenMgrError pe) {
// we trust that "our" exceptions are serializable and have a comprehensible error message
GlobalConfig.ASTERIX_LOGGER.log(Level.WARNING, pe.getMessage(), pe);
responseMsg.setError(pe);
} catch (Exception e) {
GlobalConfig.ASTERIX_LOGGER.log(Level.SEVERE, "Unexpected exception", e);
responseMsg.setError(new Exception(e.toString()));
}
try {
messageBroker.sendApplicationMessageToNC(responseMsg, requestNodeId);
} catch (Exception e) {
LOGGER.log(Level.WARNING, e.toString(), e);
}
});
}
use of org.apache.hyracks.control.cc.ClusterControllerService in project asterixdb by apache.
the class ClusterStateManager method getClusterStateDescription.
public synchronized ObjectNode getClusterStateDescription() {
ObjectMapper om = new ObjectMapper();
ObjectNode stateDescription = om.createObjectNode();
stateDescription.put("state", state.name());
stateDescription.put("metadata_node", currentMetadataNode);
ArrayNode ncs = om.createArrayNode();
stateDescription.set("ncs", ncs);
for (String node : new TreeSet<>(((ClusterControllerService) appCtx.getServiceContext().getControllerService()).getNodeManager().getAllNodeIds())) {
ObjectNode nodeJSON = om.createObjectNode();
nodeJSON.put("node_id", node);
boolean allActive = true;
boolean anyActive = false;
Set<Map<String, Object>> partitions = new HashSet<>();
if (node2PartitionsMap.containsKey(node)) {
for (ClusterPartition part : node2PartitionsMap.get(node)) {
HashMap<String, Object> partition = new HashMap<>();
partition.put("partition_id", "partition_" + part.getPartitionId());
partition.put("active", part.isActive());
partitions.add(partition);
allActive = allActive && part.isActive();
if (allActive) {
anyActive = true;
}
}
}
nodeJSON.put("state", failedNodes.contains(node) ? "FAILED" : allActive && anyActive ? "ACTIVE" : anyActive ? "PARTIALLY_ACTIVE" : "INACTIVE");
nodeJSON.putPOJO("partitions", partitions);
ncs.add(nodeJSON);
}
return stateDescription;
}
use of org.apache.hyracks.control.cc.ClusterControllerService in project asterixdb by apache.
the class HyracksUtils method init.
public static void init() throws Exception {
CCConfig ccConfig = new CCConfig();
ccConfig.setClientListenAddress(CC_HOST);
ccConfig.setClusterListenAddress(CC_HOST);
ccConfig.setClusterListenPort(TEST_HYRACKS_CC_PORT);
ccConfig.setClientListenPort(TEST_HYRACKS_CC_CLIENT_PORT);
ccConfig.setJobHistorySize(0);
ccConfig.setProfileDumpPeriod(-1);
// cluster controller
cc = new ClusterControllerService(ccConfig);
cc.start();
// two node controllers
NCConfig ncConfig1 = new NCConfig(NC1_ID);
ncConfig1.setClusterAddress("localhost");
ncConfig1.setClusterListenAddress("localhost");
ncConfig1.setClusterPort(TEST_HYRACKS_CC_PORT);
ncConfig1.setDataListenAddress("127.0.0.1");
ncConfig1.setResultListenAddress("127.0.0.1");
nc1 = new NodeControllerService(ncConfig1);
nc1.start();
NCConfig ncConfig2 = new NCConfig(NC2_ID);
ncConfig2.setClusterAddress("localhost");
ncConfig2.setClusterListenAddress("localhost");
ncConfig2.setClusterPort(TEST_HYRACKS_CC_PORT);
ncConfig2.setDataListenAddress("127.0.0.1");
ncConfig2.setResultListenAddress("127.0.0.1");
nc2 = new NodeControllerService(ncConfig2);
nc2.start();
// hyracks connection
hcc = new HyracksConnection(CC_HOST, TEST_HYRACKS_CC_CLIENT_PORT);
}
use of org.apache.hyracks.control.cc.ClusterControllerService in project asterixdb by apache.
the class AlgebricksHyracksIntegrationUtil method init.
public static void init() throws Exception {
FileUtils.deleteQuietly(new File(joinPath("target", "data")));
FileUtils.copyDirectory(new File("data"), new File(joinPath("target", "data")));
CCConfig ccConfig = new CCConfig();
ccConfig.setClientListenAddress("127.0.0.1");
ccConfig.setClientListenPort(TEST_HYRACKS_CC_CLIENT_NET_PORT);
ccConfig.setClusterListenAddress("127.0.0.1");
ccConfig.setClusterListenPort(TEST_HYRACKS_CC_CLUSTER_NET_PORT);
cc = new ClusterControllerService(ccConfig);
cc.start();
NCConfig ncConfig1 = new NCConfig(NC1_ID);
ncConfig1.setClusterAddress("localhost");
ncConfig1.setClusterPort(TEST_HYRACKS_CC_CLUSTER_NET_PORT);
ncConfig1.setClusterListenAddress("127.0.0.1");
ncConfig1.setDataListenAddress("127.0.0.1");
ncConfig1.setResultListenAddress("127.0.0.1");
ncConfig1.setIODevices(new String[] { joinPath(System.getProperty("user.dir"), "target", "data", "device0") });
FileUtils.forceMkdir(new File(ncConfig1.getIODevices()[0]));
nc1 = new NodeControllerService(ncConfig1);
nc1.start();
NCConfig ncConfig2 = new NCConfig(NC2_ID);
ncConfig2.setClusterAddress("localhost");
ncConfig2.setClusterPort(TEST_HYRACKS_CC_CLUSTER_NET_PORT);
ncConfig2.setClusterListenAddress("127.0.0.1");
ncConfig2.setDataListenAddress("127.0.0.1");
ncConfig2.setResultListenAddress("127.0.0.1");
ncConfig2.setIODevices(new String[] { joinPath(System.getProperty("user.dir"), "target", "data", "device1") });
FileUtils.forceMkdir(new File(ncConfig1.getIODevices()[0]));
nc2 = new NodeControllerService(ncConfig2);
nc2.start();
hcc = new HyracksConnection(ccConfig.getClientListenAddress(), ccConfig.getClientListenPort());
}
Aggregations