use of org.apache.hadoop.mapreduce.checkpoint.CheckpointService.CheckpointWriteChannel in project hadoop by apache.
the class TestFSCheckpointService method checkpointCreate.
public void checkpointCreate(ByteBuffer b) throws Exception {
int WRITES = 128;
FileSystem fs = mock(FileSystem.class);
DataOutputBuffer dob = new DataOutputBuffer();
FSDataOutputStream hdfs = spy(new FSDataOutputStream(dob, null));
// backed by array
@SuppressWarnings("resource") DataOutputBuffer verif = new DataOutputBuffer();
when(fs.create(isA(Path.class), eq((short) 1))).thenReturn(hdfs);
when(fs.rename(isA(Path.class), isA(Path.class))).thenReturn(true);
Path base = new Path("/chk");
Path finalLoc = new Path("/chk/checkpoint_chk0");
Path tmp = FSCheckpointService.tmpfile(finalLoc);
FSCheckpointService chk = new FSCheckpointService(fs, base, new SimpleNamingService("chk0"), (short) 1);
CheckpointWriteChannel out = chk.create();
Random r = new Random();
final byte[] randBytes = new byte[BUFSIZE];
for (int i = 0; i < WRITES; ++i) {
r.nextBytes(randBytes);
int s = r.nextInt(BUFSIZE - 1);
int e = r.nextInt(BUFSIZE - s) + 1;
verif.write(randBytes, s, e);
b.clear();
b.put(randBytes).flip();
b.position(s).limit(b.position() + e);
out.write(b);
}
verify(fs, never()).rename(any(Path.class), eq(finalLoc));
CheckpointID cid = chk.commit(out);
verify(hdfs).close();
verify(fs).rename(eq(tmp), eq(finalLoc));
assertArrayEquals(Arrays.copyOfRange(verif.getData(), 0, verif.getLength()), Arrays.copyOfRange(dob.getData(), 0, dob.getLength()));
}
Aggregations