use of org.apache.hyracks.api.config.IApplicationConfig in project asterixdb by apache.
the class PropertiesAccessor method configureNc.
private void configureNc(ConfigManager configManager, String ncId, MutableInt uniquePartitionId) throws AsterixException {
// Now we assign the coredump and txnlog directories for this node.
// QQQ Default values? Should they be specified here? Or should there
// be a default.ini? Certainly wherever they are, they should be platform-dependent.
IApplicationConfig nodeCfg = cfg.getNCEffectiveConfig(ncId);
coredumpConfig.put(ncId, nodeCfg.getString(NodeProperties.Option.CORE_DUMP_DIR));
transactionLogDirs.put(ncId, nodeCfg.getString(NodeProperties.Option.TXN_LOG_DIR));
int partitionId = nodeCfg.getInt(NodeProperties.Option.STARTING_PARTITION_ID);
if (partitionId != -1) {
uniquePartitionId.setValue(partitionId);
} else {
configManager.set(ncId, NodeProperties.Option.STARTING_PARTITION_ID, uniquePartitionId.getValue());
}
// Now we create an array of ClusterPartitions for all the partitions
// on this NC.
String[] iodevices = nodeCfg.getStringArray(IODEVICES);
String storageSubdir = nodeCfg.getString(STORAGE_SUBDIR);
String[] nodeStores = new String[iodevices.length];
ClusterPartition[] nodePartitions = new ClusterPartition[iodevices.length];
for (int i = 0; i < nodePartitions.length; i++) {
// Construct final storage path from iodevice dir + storage subdirs
nodeStores[i] = iodevices[i] + File.separator + storageSubdir;
// Create ClusterPartition instances for this NC.
ClusterPartition partition = new ClusterPartition(uniquePartitionId.getAndIncrement(), ncId, i);
ClusterPartition orig = clusterPartitions.put(partition.getPartitionId(), partition);
if (orig != null) {
throw AsterixException.create(ErrorCode.DUPLICATE_PARTITION_ID, partition.getPartitionId(), ncId, orig.getNodeId());
}
nodePartitions[i] = partition;
}
stores.put(ncId, nodeStores);
nodePartitionsMap.put(ncId, nodePartitions);
}
use of org.apache.hyracks.api.config.IApplicationConfig in project asterixdb by apache.
the class RegisterNodeWork method doRun.
@Override
protected void doRun() throws Exception {
String id = reg.getNodeId();
IIPCHandle ncIPCHandle = ccs.getClusterIPC().getHandle(reg.getNodeControllerAddress());
CCNCFunctions.NodeRegistrationResult result;
Map<IOption, Object> ncConfiguration = new HashMap<>();
try {
INodeController nodeController = new NodeControllerRemoteProxy(ncIPCHandle);
NodeControllerState state = new NodeControllerState(nodeController, reg);
INodeManager nodeManager = ccs.getNodeManager();
nodeManager.addNode(id, state);
IApplicationConfig cfg = state.getNCConfig().getConfigManager().getNodeEffectiveConfig(id);
for (IOption option : cfg.getOptions()) {
ncConfiguration.put(option, cfg.get(option));
}
LOGGER.log(Level.INFO, "Registered INodeController: id = " + id);
NodeParameters params = new NodeParameters();
params.setClusterControllerInfo(ccs.getClusterControllerInfo());
params.setDistributedState(ccs.getContext().getDistributedState());
params.setHeartbeatPeriod(ccs.getCCConfig().getHeartbeatPeriod());
params.setProfileDumpPeriod(ccs.getCCConfig().getProfileDumpPeriod());
result = new CCNCFunctions.NodeRegistrationResult(params, null);
} catch (Exception e) {
result = new CCNCFunctions.NodeRegistrationResult(null, e);
}
ncIPCHandle.send(-1, result, null);
ccs.getContext().notifyNodeJoin(id, ncConfiguration);
}
Aggregations