use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestCloseContainerHandlingByClient method testMultiBlockWrites3.
@Test
public void testMultiBlockWrites3() throws Exception {
String keyName = getKeyName();
int keyLen = 4 * blockSize;
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, keyLen);
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
Assert.assertEquals(4, keyOutputStream.getStreamEntries().size());
// write data 4 blocks and one more chunk
byte[] writtenData = ContainerTestHelper.getFixedLengthString(keyString, keyLen).getBytes(UTF_8);
byte[] data = Arrays.copyOfRange(writtenData, 0, 3 * blockSize + chunkSize);
Assert.assertEquals(data.length, 3 * blockSize + chunkSize);
key.write(data);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(THREE)).setKeyName(keyName).setRefreshPipeline(true).build();
waitForContainerClose(key);
// write 3 more chunks worth of data. It will fail and new block will be
// allocated. This write completes 4 blocks worth of data written to key
data = Arrays.copyOfRange(writtenData, 3 * blockSize + chunkSize, keyLen);
key.write(data);
key.close();
// read the key from OM again and match the length and data.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
OzoneVolume volume = objectStore.getVolume(volumeName);
OzoneBucket bucket = volume.getBucket(bucketName);
OzoneInputStream inputStream = bucket.readKey(keyName);
byte[] readData = new byte[keyLen];
inputStream.read(readData);
Assert.assertArrayEquals(writtenData, readData);
// Though we have written only block initially, the close will hit
// closeContainerException and remaining data in the chunkOutputStream
// buffer will be copied into a different allocated block and will be
// committed.
long length = 0;
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
length += locationInfo.getLength();
}
Assert.assertEquals(4 * blockSize, length);
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneFSWithObjectStoreCreate method createKey.
private void createKey(OzoneBucket ozoneBucket, String key, int length, byte[] input) throws Exception {
OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, length);
ozoneOutputStream.write(input);
ozoneOutputStream.write(input, 0, 10);
ozoneOutputStream.close();
// Read the key with given key name.
OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
byte[] read = new byte[length];
ozoneInputStream.read(read, 0, length);
ozoneInputStream.close();
String inputString = new String(input, UTF_8);
Assert.assertEquals(inputString, new String(read, UTF_8));
// Read using filesystem.
FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
read = new byte[length];
fsDataInputStream.read(read, 0, length);
ozoneInputStream.close();
Assert.assertEquals(inputString, new String(read, UTF_8));
}
Aggregations