use of org.apache.asterix.event.schema.cluster.Cluster in project asterixdb by apache.
the class AbstractLSMBaseExperimentBuilder method doBuild.
@Override
protected void doBuild(Experiment e) throws Exception {
SequentialActionList execs = new SequentialActionList();
String clusterConfigPath = localExperimentRoot.resolve(LSMExperimentConstants.CONFIG_DIR).resolve(clusterConfigFileName).toString();
String asterixConfigPath = localExperimentRoot.resolve(LSMExperimentConstants.CONFIG_DIR).resolve(LSMExperimentConstants.ASTERIX_CONFIGURATION).toString();
//create instance
execs.add(new StopAsterixManagixAction(managixHomePath, ASTERIX_INSTANCE_NAME));
execs.add(new DeleteAsterixManagixAction(managixHomePath, ASTERIX_INSTANCE_NAME));
execs.add(new SleepAction(30000));
execs.add(new CreateAsterixManagixAction(managixHomePath, ASTERIX_INSTANCE_NAME, clusterConfigPath, asterixConfigPath));
//run ddl statements
execs.add(new SleepAction(15000));
// TODO: implement retry handler
execs.add(new RunAQLFileAction(httpClient, restHost, restPort, localExperimentRoot.resolve(LSMExperimentConstants.AQL_DIR).resolve(LSMExperimentConstants.BASE_TYPES)));
doBuildDDL(execs);
execs.add(new RunAQLFileAction(httpClient, restHost, restPort, localExperimentRoot.resolve(LSMExperimentConstants.AQL_DIR).resolve(LSMExperimentConstants.BASE_DIR).resolve(ingestFileName)));
Map<String, List<String>> dgenPairs = readDatagenPairs(localExperimentRoot.resolve(LSMExperimentConstants.DGEN_DIR).resolve(dgenFileName));
final Set<String> ncHosts = new HashSet<>();
for (List<String> ncHostList : dgenPairs.values()) {
for (String ncHost : ncHostList) {
ncHosts.add(ncHost.split(":")[0]);
}
}
if (statFile != null) {
ParallelActionSet ioCountActions = new ParallelActionSet();
for (String ncHost : ncHosts) {
ioCountActions.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
@Override
protected String getCommand() {
String cmd = "screen -d -m sh -c \"sar -b -u 1 > " + statFile + "\"";
return cmd;
}
});
}
execs.add(ioCountActions);
}
SequentialActionList postLSAction = new SequentialActionList();
File file = new File(clusterConfigPath);
JAXBContext ctx = JAXBContext.newInstance(Cluster.class);
Unmarshaller unmarshaller = ctx.createUnmarshaller();
final Cluster cluster = (Cluster) unmarshaller.unmarshal(file);
String[] storageRoots = cluster.getIodevices().split(",");
for (String ncHost : ncHosts) {
for (final String sRoot : storageRoots) {
lsAction.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
@Override
protected String getCommand() {
return "ls -Rl " + sRoot;
}
});
postLSAction.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
@Override
protected String getCommand() {
return "ls -Rl " + sRoot;
}
});
}
}
// main exp
doBuildDataGen(execs, dgenPairs);
// if (statFile != null) {
// ParallelActionSet ioCountKillActions = new ParallelActionSet();
// for (String ncHost : ncHosts) {
// ioCountKillActions.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
//
// @Override
// protected String getCommand() {
// String cmd = "screen -X -S `screen -list | grep Detached | awk '{print $1}'` quit";
// return cmd;
// }
// });
// }
// execs.add(ioCountKillActions);
// }
execs.add(new SleepAction(10000));
if (countFileName != null) {
execs.add(new RunAQLFileAction(httpClient, restHost, restPort, localExperimentRoot.resolve(LSMExperimentConstants.AQL_DIR).resolve(countFileName)));
}
execs.add(postLSAction);
doPost(execs);
ParallelActionSet killCmds = new ParallelActionSet();
for (String ncHost : ncHosts) {
killCmds.add(new RemoteAsterixDriverKill(ncHost, username, sshKeyLocation));
}
//killCmds.add(new RemoteAsterixDriverKill(restHost, username, sshKeyLocation));
execs.add(killCmds);
execs.add(new StopAsterixManagixAction(managixHomePath, ASTERIX_INSTANCE_NAME));
if (statFile != null) {
ParallelActionSet collectIOActions = new ParallelActionSet();
for (String ncHost : ncHosts) {
collectIOActions.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
@Override
protected String getCommand() {
String cmd = "cp " + statFile + " " + cluster.getLogDir();
return cmd;
}
});
}
execs.add(collectIOActions);
}
//collect profile information
// if (ExperimentProfiler.PROFILE_MODE) {
// if (!SpatialIndexProfiler.PROFILE_HOME_DIR.contentEquals(cluster.getLogDir())) {
// ParallelActionSet collectProfileInfo = new ParallelActionSet();
// for (String ncHost : ncHosts) {
// collectProfileInfo.add(new AbstractRemoteExecutableAction(ncHost, username, sshKeyLocation) {
// @Override
// protected String getCommand() {
// String cmd = "mv " + SpatialIndexProfiler.PROFILE_HOME_DIR + "*.txt " + cluster.getLogDir();
// return cmd;
// }
// });
// }
// execs.add(collectProfileInfo);
// }
// }
execs.add(new LogAsterixManagixAction(managixHomePath, ASTERIX_INSTANCE_NAME, localExperimentRoot.resolve(LSMExperimentConstants.LOG_DIR + "-" + logDirSuffix).resolve(getName()).toString()));
if (getName().contains("SpatialIndexExperiment2") || getName().contains("SpatialIndexExperiment5")) {
//get query result file
SequentialActionList getQueryResultFileActions = new SequentialActionList();
final String queryResultFilePath = openStreetMapFilePath.substring(0, openStreetMapFilePath.lastIndexOf(File.separator)) + File.separator + "QueryGenResult-*.txt";
for (final String qgenHost : dgenPairs.keySet()) {
getQueryResultFileActions.add(new AbstractRemoteExecutableAction(restHost, username, sshKeyLocation) {
@Override
protected String getCommand() {
String cmd = "scp " + username + "@" + qgenHost + ":" + queryResultFilePath + " " + localExperimentRoot.resolve(LSMExperimentConstants.LOG_DIR + "-" + logDirSuffix).resolve(getName()).toString();
return cmd;
}
});
}
execs.add(getQueryResultFileActions);
}
e.addBody(execs);
}
use of org.apache.asterix.event.schema.cluster.Cluster in project asterixdb by apache.
the class ReplicationProperties method getDataReplicationPort.
public int getDataReplicationPort(String nodeId) {
final Cluster cluster = ClusterProperties.INSTANCE.getCluster();
Node node = ClusterProperties.INSTANCE.getNodeById(nodeId);
if (node != null) {
return node.getReplicationPort() != null ? node.getReplicationPort().intValue() : cluster.getHighAvailability().getDataReplication().getReplicationPort().intValue();
}
return REPLICATION_DATAPORT_DEFAULT;
}
use of org.apache.asterix.event.schema.cluster.Cluster in project asterixdb by apache.
the class VerificationUtil method getAsterixRuntimeState.
public static AsterixRuntimeState getAsterixRuntimeState(AsterixInstance instance) throws Exception {
Cluster cluster = instance.getCluster();
List<String> args = new ArrayList<String>();
args.add(instance.getName());
args.add(instance.getCluster().getMasterNode().getClusterIp());
for (Node node : cluster.getNode()) {
args.add(node.getClusterIp());
args.add(instance.getName() + "_" + node.getId());
}
Thread.sleep(2000);
String output = AsterixEventServiceUtil.executeLocalScript(VERIFY_SCRIPT_PATH, args);
boolean ccRunning = true;
List<String> failedNCs = new ArrayList<String>();
String[] infoFields;
List<ProcessInfo> processes = new ArrayList<ProcessInfo>();
for (String line : output.split("\n")) {
String nodeid = null;
infoFields = line.split(":");
try {
int pid = Integer.parseInt(infoFields[3]);
if (infoFields[0].equals("NC")) {
nodeid = infoFields[2].split("_")[1];
} else {
nodeid = instance.getCluster().getMasterNode().getId();
}
processes.add(new ProcessInfo(infoFields[0], infoFields[1], nodeid, pid));
} catch (Exception e) {
if (infoFields[0].equalsIgnoreCase("CC")) {
ccRunning = false;
} else {
failedNCs.add(infoFields[1]);
}
}
}
return new AsterixRuntimeState(processes, failedNCs, ccRunning);
}
use of org.apache.asterix.event.schema.cluster.Cluster in project asterixdb by apache.
the class EventDriver method main.
public static void main(String[] args) throws Exception {
String eventsHome = System.getenv("EVENT_HOME");
if (eventsHome == null) {
throw new IllegalStateException("EVENT_HOME is not set");
}
eventsDir = eventsHome + File.separator + EventUtil.EVENTS_DIR;
EventConfig eventConfig = new EventConfig();
CmdLineParser parser = new CmdLineParser(eventConfig);
try {
parser.parseArgument(args);
if (eventConfig.help) {
parser.printUsage(System.out);
}
if (eventConfig.seed > 0) {
Randomizer.getInstance(eventConfig.seed);
}
Cluster cluster = initializeCluster(eventConfig.clusterPath);
if (!eventConfig.dryRun) {
prepare(cluster);
}
if (!eventConfig.dryRun) {
cleanup(cluster);
}
} catch (Exception e) {
e.printStackTrace();
parser.printUsage(System.err);
}
}
use of org.apache.asterix.event.schema.cluster.Cluster in project asterixdb by apache.
the class PatternCreator method getHDFSBackUpAsterixPattern.
private Patterns getHDFSBackUpAsterixPattern(AsterixInstance instance, Backup backupConf) throws Exception {
Cluster cluster = instance.getCluster();
String hdfsUrl = backupConf.getHdfs().getUrl();
String hadoopVersion = backupConf.getHdfs().getVersion();
String hdfsBackupDir = backupConf.getBackupDir();
VerificationUtil.verifyBackupRestoreConfiguration(hdfsUrl, hadoopVersion, hdfsBackupDir);
String workingDir = cluster.getWorkingDir().getDir();
String backupId = Integer.toString(instance.getBackupInfo().size());
String store;
String pargs;
String iodevices;
store = cluster.getStore();
List<Pattern> patternList = new ArrayList<>();
for (Node node : cluster.getNode()) {
Nodeid nodeid = new Nodeid(new Value(null, node.getId()));
iodevices = node.getIodevices() == null ? instance.getCluster().getIodevices() : node.getIodevices();
pargs = workingDir + " " + instance.getName() + " " + iodevices + " " + store + " " + StorageConstants.METADATA_ROOT + " " + AsterixEventServiceUtil.TXN_LOG_DIR + " " + backupId + " " + hdfsBackupDir + " " + "hdfs" + " " + node.getId() + " " + hdfsUrl + " " + hadoopVersion;
Event event = new Event("backup", nodeid, pargs);
patternList.add(new Pattern(null, 1, null, event));
}
return new Patterns(patternList);
}
Aggregations