use of org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration in project nd4j by deeplearning4j.
the class WorkspaceProviderTests method testUnboundedLoop2.
/**
* This simple test checks for over-time learning with coefficient applied
*
* @throws Exception
*/
@Test
public void testUnboundedLoop2() throws Exception {
WorkspaceConfiguration configuration = WorkspaceConfiguration.builder().initialSize(0).policyReset(ResetPolicy.ENDOFBUFFER_REACHED).policyAllocation(AllocationPolicy.OVERALLOCATE).overallocationLimit(4.0).policyLearning(LearningPolicy.OVER_TIME).cyclesBeforeInitialization(5).build();
Nd4jWorkspace ws1 = (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "ITER");
long requiredMemory = 100 * Nd4j.sizeOfDataType();
long shiftedSize = ((long) (requiredMemory * 1.3)) + (8 - (((long) (requiredMemory * 1.3)) % 8));
for (int x = 0; x < 100; x++) {
try (Nd4jWorkspace wsI = (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "ITER").notifyScopeEntered()) {
INDArray array = Nd4j.create(100);
}
// only checking after workspace is initialized
if (x > 4) {
assertEquals(shiftedSize, ws1.getInitialBlockSize());
assertEquals(5 * shiftedSize, ws1.getCurrentSize());
} else if (x < 4) {
// we're making sure we're not initialize early
assertEquals("Failed on iteration " + x, 0, ws1.getCurrentSize());
}
}
// maximum allocation amount is 100 elements during learning, and additional coefficient is 4.0. result is workspace of 500 elements
assertEquals(5 * shiftedSize, ws1.getCurrentSize());
assertNull(Nd4j.getMemoryManager().getCurrentWorkspace());
}
use of org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration in project nd4j by deeplearning4j.
the class WorkspaceProviderTests method testUnboundedLoop1.
@Test
public void testUnboundedLoop1() throws Exception {
WorkspaceConfiguration configuration = WorkspaceConfiguration.builder().initialSize(100 * 100 * Nd4j.sizeOfDataType()).policyReset(ResetPolicy.ENDOFBUFFER_REACHED).policyAllocation(AllocationPolicy.STRICT).build();
for (int x = 0; x < 100; x++) {
try (Nd4jWorkspace ws1 = (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "ITER").notifyScopeEntered()) {
INDArray array = Nd4j.create(100);
}
Nd4jWorkspace ws1 = (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "ITER");
assertEquals((x + 1) * 100 * Nd4j.sizeOfDataType(), ws1.getHostOffset());
}
Nd4jWorkspace ws1 = (Nd4jWorkspace) Nd4j.getWorkspaceManager().getWorkspaceForCurrentThread(configuration, "ITER");
assertEquals(100 * 100 * Nd4j.sizeOfDataType(), ws1.getHostOffset());
// just to trigger reset
ws1.notifyScopeEntered();
// confirming reset
// assertEquals(0, ws1.getHostOffset());
ws1.notifyScopeLeft();
assertNull(Nd4j.getMemoryManager().getCurrentWorkspace());
}
use of org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration in project nd4j by deeplearning4j.
the class DoubleDataBufferTest method testReallocationWorkspace.
@Test
public void testReallocationWorkspace() {
WorkspaceConfiguration initialConfig = WorkspaceConfiguration.builder().initialSize(10 * 1024L * 1024L).policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.NONE).build();
MemoryWorkspace workspace = Nd4j.getWorkspaceManager().getAndActivateWorkspace(initialConfig, "SOME_ID");
DataBuffer buffer = Nd4j.createBuffer(new double[] { 1, 2, 3, 4 });
double[] old = buffer.asDouble();
assertTrue(buffer.isAttached());
assertEquals(4, buffer.capacity());
buffer.reallocate(6);
assertEquals(6, buffer.capacity());
assertArrayEquals(old, buffer.asDouble(), 1e-1);
workspace.close();
}
use of org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration in project nd4j by deeplearning4j.
the class BasicWorkspaceTests method testAllocation4.
@Test
public void testAllocation4() throws Exception {
WorkspaceConfiguration failConfig = WorkspaceConfiguration.builder().initialSize(1024 * 1024).maxSize(1024 * 1024).overallocationLimit(0.1).policyAllocation(AllocationPolicy.STRICT).policyLearning(LearningPolicy.FIRST_LOOP).policyMirroring(MirroringPolicy.FULL).policySpill(SpillPolicy.FAIL).build();
Nd4jWorkspace workspace = (Nd4jWorkspace) Nd4j.getWorkspaceManager().createNewWorkspace(failConfig);
Nd4j.getMemoryManager().setCurrentWorkspace(workspace);
assertNotEquals(null, Nd4j.getMemoryManager().getCurrentWorkspace());
assertEquals(0, workspace.getHostOffset());
INDArray array = Nd4j.create(new int[] { 1, 5 }, 'c');
// checking if allocation actually happened
long reqMem = 5 * Nd4j.sizeOfDataType();
assertEquals(reqMem + reqMem % 8, workspace.getHostOffset());
try {
INDArray array2 = Nd4j.create(10000000);
assertTrue(false);
} catch (ND4JIllegalStateException e) {
assertTrue(true);
}
assertEquals(reqMem + reqMem % 8, workspace.getHostOffset());
INDArray array2 = Nd4j.create(new int[] { 1, 5 }, 'c');
assertEquals((reqMem + reqMem % 8) * 2, workspace.getHostOffset());
}
use of org.nd4j.linalg.api.memory.conf.WorkspaceConfiguration in project nd4j by deeplearning4j.
the class BasicWorkspaceTests method testMmap1.
@Test
public void testMmap1() throws Exception {
// we don't support MMAP on cuda yet
if (Nd4j.getExecutioner().getClass().getName().toLowerCase().contains("cuda"))
return;
WorkspaceConfiguration mmap = WorkspaceConfiguration.builder().initialSize(1000000).policyLocation(LocationPolicy.MMAP).build();
MemoryWorkspace ws = Nd4j.getWorkspaceManager().getAndActivateWorkspace(mmap, "M2");
INDArray mArray = Nd4j.create(100);
mArray.assign(10f);
assertEquals(1000f, mArray.sumNumber().floatValue(), 1e-5);
ws.close();
ws.notifyScopeEntered();
INDArray mArrayR = Nd4j.createUninitialized(100);
assertEquals(1000f, mArrayR.sumNumber().floatValue(), 1e-5);
ws.close();
}
Aggregations