use of org.nd4j.nativeblas.NativeOps in project deeplearning4j by deeplearning4j.
the class BaseStatsListener method iterationDone.
@Override
public void iterationDone(Model model, int iteration) {
StatsUpdateConfiguration config = updateConfig;
ModelInfo modelInfo = getModelInfo(model);
boolean backpropParamsOnly = backpropParamsOnly(model);
long currentTime = getTime();
if (modelInfo.iterCount == 0) {
modelInfo.initTime = currentTime;
doInit(model);
}
if (config.collectPerformanceStats()) {
updateExamplesMinibatchesCounts(model);
}
if (config.reportingFrequency() > 1 && (iteration == 0 || iteration % config.reportingFrequency() != 0)) {
modelInfo.iterCount = iteration;
return;
}
StatsReport report = getNewStatsReport();
//TODO support NTP time
report.reportIDs(getSessionID(model), TYPE_ID, workerID, System.currentTimeMillis());
//--- Performance and System Stats ---
if (config.collectPerformanceStats()) {
//Stats to collect: total runtime, total examples, total minibatches, iterations/second, examples/second
double examplesPerSecond;
double minibatchesPerSecond;
if (modelInfo.iterCount == 0) {
//Not possible to work out perf/second: first iteration...
examplesPerSecond = 0.0;
minibatchesPerSecond = 0.0;
} else {
long deltaTimeMS = currentTime - modelInfo.lastReportTime;
examplesPerSecond = 1000.0 * modelInfo.examplesSinceLastReport / deltaTimeMS;
minibatchesPerSecond = 1000.0 * modelInfo.minibatchesSinceLastReport / deltaTimeMS;
}
long totalRuntimeMS = currentTime - modelInfo.initTime;
report.reportPerformance(totalRuntimeMS, modelInfo.totalExamples, modelInfo.totalMinibatches, examplesPerSecond, minibatchesPerSecond);
modelInfo.examplesSinceLastReport = 0;
modelInfo.minibatchesSinceLastReport = 0;
}
if (config.collectMemoryStats()) {
Runtime runtime = Runtime.getRuntime();
long jvmTotal = runtime.totalMemory();
long jvmMax = runtime.maxMemory();
//Off-heap memory
long offheapTotal = Pointer.totalBytes();
long offheapMax = Pointer.maxBytes();
//GPU
long[] gpuCurrentBytes = null;
long[] gpuMaxBytes = null;
NativeOps nativeOps = NativeOpsHolder.getInstance().getDeviceNativeOps();
int nDevices = nativeOps.getAvailableDevices();
if (nDevices > 0) {
gpuCurrentBytes = new long[nDevices];
gpuMaxBytes = new long[nDevices];
for (int i = 0; i < nDevices; i++) {
try {
Pointer p = getDevicePointer(i);
if (p == null) {
gpuMaxBytes[i] = 0;
gpuCurrentBytes[i] = 0;
} else {
gpuMaxBytes[i] = nativeOps.getDeviceTotalMemory(p);
gpuCurrentBytes[i] = gpuMaxBytes[i] - nativeOps.getDeviceFreeMemory(p);
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
report.reportMemoryUse(jvmTotal, jvmMax, offheapTotal, offheapMax, gpuCurrentBytes, gpuMaxBytes);
}
if (config.collectGarbageCollectionStats()) {
if (modelInfo.lastReportIteration == -1 || gcBeans == null) {
//Haven't reported GC stats before...
gcBeans = ManagementFactory.getGarbageCollectorMXBeans();
gcStatsAtLastReport = new HashMap<>();
for (GarbageCollectorMXBean bean : gcBeans) {
long count = bean.getCollectionCount();
long timeMs = bean.getCollectionTime();
gcStatsAtLastReport.put(bean.getName(), new Pair<>(count, timeMs));
}
} else {
for (GarbageCollectorMXBean bean : gcBeans) {
long count = bean.getCollectionCount();
long timeMs = bean.getCollectionTime();
Pair<Long, Long> lastStats = gcStatsAtLastReport.get(bean.getName());
long deltaGCCount = count - lastStats.getFirst();
long deltaGCTime = timeMs - lastStats.getSecond();
lastStats.setFirst(count);
lastStats.setSecond(timeMs);
report.reportGarbageCollection(bean.getName(), (int) deltaGCCount, (int) deltaGCTime);
}
}
}
//--- General ---
//Always report score
report.reportScore(model.score());
if (config.collectLearningRates()) {
Map<String, Double> lrs = new HashMap<>();
if (model instanceof MultiLayerNetwork) {
//Need to append "0_", "1_" etc to param names from layers...
int layerIdx = 0;
for (Layer l : ((MultiLayerNetwork) model).getLayers()) {
NeuralNetConfiguration conf = l.conf();
Map<String, Double> layerLrs = conf.getLearningRateByParam();
Set<String> backpropParams = l.paramTable(true).keySet();
for (Map.Entry<String, Double> entry : layerLrs.entrySet()) {
if (!backpropParams.contains(entry.getKey()))
//Skip pretrain params
continue;
lrs.put(layerIdx + "_" + entry.getKey(), entry.getValue());
}
layerIdx++;
}
} else if (model instanceof ComputationGraph) {
for (Layer l : ((ComputationGraph) model).getLayers()) {
//Need to append layer name
NeuralNetConfiguration conf = l.conf();
Map<String, Double> layerLrs = conf.getLearningRateByParam();
String layerName = conf.getLayer().getLayerName();
Set<String> backpropParams = l.paramTable(true).keySet();
for (Map.Entry<String, Double> entry : layerLrs.entrySet()) {
if (!backpropParams.contains(entry.getKey()))
//Skip pretrain params
continue;
lrs.put(layerName + "_" + entry.getKey(), entry.getValue());
}
}
} else if (model instanceof Layer) {
Layer l = (Layer) model;
Map<String, Double> map = l.conf().getLearningRateByParam();
lrs.putAll(map);
}
report.reportLearningRates(lrs);
}
if (config.collectHistograms(StatsType.Parameters)) {
Map<String, Histogram> paramHistograms = getHistograms(model.paramTable(backpropParamsOnly), config.numHistogramBins(StatsType.Parameters));
report.reportHistograms(StatsType.Parameters, paramHistograms);
}
if (config.collectHistograms(StatsType.Gradients)) {
Map<String, Histogram> gradientHistograms = getHistograms(gradientsPreUpdateMap, config.numHistogramBins(StatsType.Gradients));
report.reportHistograms(StatsType.Gradients, gradientHistograms);
}
if (config.collectHistograms(StatsType.Updates)) {
Map<String, Histogram> updateHistograms = getHistograms(model.gradient().gradientForVariable(), config.numHistogramBins(StatsType.Updates));
report.reportHistograms(StatsType.Updates, updateHistograms);
}
if (config.collectHistograms(StatsType.Activations)) {
Map<String, Histogram> activationHistograms = getHistograms(activationsMap, config.numHistogramBins(StatsType.Activations));
report.reportHistograms(StatsType.Activations, activationHistograms);
}
if (config.collectMean(StatsType.Parameters)) {
Map<String, Double> meanParams = calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.Mean);
report.reportMean(StatsType.Parameters, meanParams);
}
if (config.collectMean(StatsType.Gradients)) {
Map<String, Double> meanGradients = calculateSummaryStats(gradientsPreUpdateMap, StatType.Mean);
report.reportMean(StatsType.Gradients, meanGradients);
}
if (config.collectMean(StatsType.Updates)) {
Map<String, Double> meanUpdates = calculateSummaryStats(model.gradient().gradientForVariable(), StatType.Mean);
report.reportMean(StatsType.Updates, meanUpdates);
}
if (config.collectMean(StatsType.Activations)) {
Map<String, Double> meanActivations = calculateSummaryStats(activationsMap, StatType.Mean);
report.reportMean(StatsType.Activations, meanActivations);
}
if (config.collectStdev(StatsType.Parameters)) {
Map<String, Double> stdevParams = calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.Stdev);
report.reportStdev(StatsType.Parameters, stdevParams);
}
if (config.collectStdev(StatsType.Gradients)) {
Map<String, Double> stdevGradient = calculateSummaryStats(gradientsPreUpdateMap, StatType.Stdev);
report.reportStdev(StatsType.Gradients, stdevGradient);
}
if (config.collectStdev(StatsType.Updates)) {
Map<String, Double> stdevUpdates = calculateSummaryStats(model.gradient().gradientForVariable(), StatType.Stdev);
report.reportStdev(StatsType.Updates, stdevUpdates);
}
if (config.collectStdev(StatsType.Activations)) {
Map<String, Double> stdevActivations = calculateSummaryStats(activationsMap, StatType.Stdev);
report.reportStdev(StatsType.Activations, stdevActivations);
}
if (config.collectMeanMagnitudes(StatsType.Parameters)) {
Map<String, Double> meanMagParams = calculateSummaryStats(model.paramTable(backpropParamsOnly), StatType.MeanMagnitude);
report.reportMeanMagnitudes(StatsType.Parameters, meanMagParams);
}
if (config.collectMeanMagnitudes(StatsType.Gradients)) {
Map<String, Double> meanMagGradients = calculateSummaryStats(gradientsPreUpdateMap, StatType.MeanMagnitude);
report.reportMeanMagnitudes(StatsType.Gradients, meanMagGradients);
}
if (config.collectMeanMagnitudes(StatsType.Updates)) {
Map<String, Double> meanMagUpdates = calculateSummaryStats(model.gradient().gradientForVariable(), StatType.MeanMagnitude);
report.reportMeanMagnitudes(StatsType.Updates, meanMagUpdates);
}
if (config.collectMeanMagnitudes(StatsType.Activations)) {
Map<String, Double> meanMagActivations = calculateSummaryStats(activationsMap, StatType.MeanMagnitude);
report.reportMeanMagnitudes(StatsType.Activations, meanMagActivations);
}
long endTime = getTime();
//Amount of time required to alculate all histograms, means etc.
report.reportStatsCollectionDurationMS((int) (endTime - currentTime));
modelInfo.lastReportTime = currentTime;
modelInfo.lastReportIteration = iteration;
report.reportIterationCount(iteration);
this.router.putUpdate(report);
modelInfo.iterCount = iteration;
activationsMap = null;
}
use of org.nd4j.nativeblas.NativeOps in project deeplearning4j by deeplearning4j.
the class BaseStatsListener method doInit.
private void doInit(Model model) {
boolean backpropParamsOnly = backpropParamsOnly(model);
//TODO support NTP
long initTime = System.currentTimeMillis();
StatsInitializationReport initReport = getNewInitializationReport();
initReport.reportIDs(getSessionID(model), TYPE_ID, workerID, initTime);
if (initConfig.collectSoftwareInfo()) {
OperatingSystemMXBean osBean = ManagementFactory.getOperatingSystemMXBean();
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
String arch = osBean.getArch();
String osName = osBean.getName();
String jvmName = runtime.getVmName();
String jvmVersion = System.getProperty("java.version");
String jvmSpecVersion = runtime.getSpecVersion();
String nd4jBackendClass = Nd4j.getNDArrayFactory().getClass().getName();
String nd4jDataTypeName = DataTypeUtil.getDtypeFromContext().name();
String hostname = System.getenv("COMPUTERNAME");
if (hostname == null || hostname.isEmpty()) {
try {
Process proc = Runtime.getRuntime().exec("hostname");
try (InputStream stream = proc.getInputStream()) {
hostname = IOUtils.toString(stream);
}
} catch (Exception e) {
}
}
Properties p = Nd4j.getExecutioner().getEnvironmentInformation();
Map<String, String> envInfo = new HashMap<>();
for (Map.Entry<Object, Object> e : p.entrySet()) {
Object v = e.getValue();
String value = (v == null ? "" : v.toString());
envInfo.put(e.getKey().toString(), value);
}
initReport.reportSoftwareInfo(arch, osName, jvmName, jvmVersion, jvmSpecVersion, nd4jBackendClass, nd4jDataTypeName, hostname, UIDProvider.getJVMUID(), envInfo);
}
if (initConfig.collectHardwareInfo()) {
int availableProcessors = Runtime.getRuntime().availableProcessors();
NativeOps nativeOps = NativeOpsHolder.getInstance().getDeviceNativeOps();
int nDevices = nativeOps.getAvailableDevices();
long[] deviceTotalMem = null;
//TODO
String[] deviceDescription = null;
if (nDevices > 0) {
deviceTotalMem = new long[nDevices];
deviceDescription = new String[nDevices];
for (int i = 0; i < nDevices; i++) {
try {
Pointer p = getDevicePointer(i);
if (p == null) {
deviceTotalMem[i] = 0;
deviceDescription[i] = "Device(" + i + ")";
} else {
deviceTotalMem[i] = nativeOps.getDeviceTotalMemory(p);
deviceDescription[i] = nativeOps.getDeviceName(p);
if (nDevices > 1) {
deviceDescription[i] = deviceDescription[i] + " (" + i + ")";
}
}
} catch (Exception e) {
log.debug("Error getting device info", e);
}
}
}
long jvmMaxMemory = Runtime.getRuntime().maxMemory();
long offheapMaxMemory = Pointer.maxBytes();
initReport.reportHardwareInfo(availableProcessors, nDevices, jvmMaxMemory, offheapMaxMemory, deviceTotalMem, deviceDescription, UIDProvider.getHardwareUID());
}
if (initConfig.collectModelInfo()) {
String jsonConf;
int numLayers;
int numParams;
if (model instanceof MultiLayerNetwork) {
MultiLayerNetwork net = ((MultiLayerNetwork) model);
jsonConf = net.getLayerWiseConfigurations().toJson();
numLayers = net.getnLayers();
numParams = net.numParams();
} else if (model instanceof ComputationGraph) {
ComputationGraph cg = ((ComputationGraph) model);
jsonConf = cg.getConfiguration().toJson();
numLayers = cg.getNumLayers();
numParams = cg.numParams();
} else if (model instanceof Layer) {
Layer l = (Layer) model;
jsonConf = l.conf().toJson();
numLayers = 1;
numParams = l.numParams();
} else {
throw new RuntimeException("Invalid model: Expected MultiLayerNetwork or ComputationGraph. Got: " + (model == null ? null : model.getClass()));
}
Map<String, INDArray> paramMap = model.paramTable(backpropParamsOnly);
String[] paramNames = new String[paramMap.size()];
int i = 0;
for (String s : paramMap.keySet()) {
//Assuming sensible iteration order - LinkedHashMaps are used in MLN/CG for example
paramNames[i++] = s;
}
initReport.reportModelInfo(model.getClass().getName(), jsonConf, paramNames, numLayers, numParams);
}
StorageMetaData meta = getNewStorageMetaData(initTime, getSessionID(model), workerID);
router.putStorageMetaData(meta);
//TODO error handling
router.putStaticInfo(initReport);
}
Aggregations