use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestDatanodeHddsVolumeFailureDetection method testHddsVolumeFailureOnDbFileCorrupt.
@Test
public void testHddsVolumeFailureOnDbFileCorrupt() throws Exception {
// write a file, will create container1
String keyName = UUID.randomUUID().toString();
String value = "sample value";
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
// close container1
HddsDatanodeService dn = datanodes.get(0);
OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
Container c1 = oc.getContainerSet().getContainer(1);
c1.close();
// create container2, and container1 is kicked out of cache
ContainerWithPipeline c2 = scmClient.createContainer(HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
Assert.assertTrue(c2.getContainerInfo().getState().equals(HddsProtos.LifeCycleState.OPEN));
// corrupt db by rename dir->file
File metadataDir = new File(c1.getContainerFile().getParent());
File dbDir = new File(metadataDir, "1" + OzoneConsts.DN_CONTAINER_DB);
DatanodeTestUtils.injectDataDirFailure(dbDir);
// simulate bad volume by removing write permission on root dir
// refer to HddsVolume.check()
MutableVolumeSet volSet = oc.getVolumeSet();
StorageVolume vol0 = volSet.getVolumesList().get(0);
DatanodeTestUtils.simulateBadVolume(vol0);
// read written file to trigger checkVolumeAsync
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
try {
is.read(fileContent);
Assert.fail();
} catch (Exception e) {
Assert.assertTrue(e instanceof IOException);
} finally {
is.close();
}
// should trigger CheckVolumeAsync and
// a failed volume should be detected
DatanodeTestUtils.waitForCheckVolume(volSet, 1L);
DatanodeTestUtils.waitForHandleFailedVolume(volSet, 1);
// restore all
DatanodeTestUtils.restoreBadVolume(vol0);
DatanodeTestUtils.restoreDataDirFromFailure(dbDir);
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestHelper method validateData.
public static void validateData(String keyName, byte[] data, ObjectStore objectStore, String volumeName, String bucketName) throws Exception {
byte[] readData = new byte[data.length];
OzoneInputStream is = objectStore.getVolume(volumeName).getBucket(bucketName).readKey(keyName);
is.read(readData);
MessageDigest sha1 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
sha1.update(data);
MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
sha2.update(readData);
Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest()));
is.close();
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class ReadReplicas method downloadReplicasAndCreateManifest.
private void downloadReplicasAndCreateManifest(String keyName, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicasWithoutChecksum, String directoryName, JsonArray blocks) throws IOException {
int blockIndex = 0;
for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> block : replicas.entrySet()) {
JsonObject blockJson = new JsonObject();
JsonArray replicasJson = new JsonArray();
blockIndex += 1;
blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex);
blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, block.getKey().getContainerID());
blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, block.getKey().getLocalID());
blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, block.getKey().getLength());
blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, block.getKey().getOffset());
for (Map.Entry<DatanodeDetails, OzoneInputStream> replica : block.getValue().entrySet()) {
JsonObject replicaJson = new JsonObject();
replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, replica.getKey().getHostName());
replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, replica.getKey().getUuidString());
OzoneInputStream is = replica.getValue();
String fileName = keyName + "_block" + blockIndex + "_" + replica.getKey().getHostName();
System.out.println("Writing : " + fileName);
File replicaFile = new File(outputDir + "/" + directoryName + "/" + fileName);
try {
Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} catch (IOException e) {
Throwable cause = e.getCause();
replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage());
if (cause instanceof OzoneChecksumException) {
BlockID blockID = block.getKey().getBlockID();
String datanodeUUID = replica.getKey().getUuidString();
is = getInputStreamWithoutChecksum(replicasWithoutChecksum, datanodeUUID, blockID);
Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
} else if (cause instanceof StatusRuntimeException) {
break;
}
} finally {
is.close();
}
replicasJson.add(replicaJson);
}
blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson);
blocks.add(blockJson);
}
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneManagerHA method testCreateFile.
/**
* This method createFile and verifies the file is successfully created or
* not.
*
* @param ozoneBucket
* @param keyName
* @param data
* @param recursive
* @param overwrite
* @throws Exception
*/
protected void testCreateFile(OzoneBucket ozoneBucket, String keyName, String data, boolean recursive, boolean overwrite) throws Exception {
OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, overwrite, recursive);
ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
ozoneOutputStream.close();
OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName);
Assert.assertEquals(keyName, ozoneKeyDetails.getName());
Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName());
Assert.assertEquals(ozoneBucket.getVolumeName(), ozoneKeyDetails.getVolumeName());
Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize());
OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
byte[] fileContent = new byte[data.getBytes(UTF_8).length];
ozoneInputStream.read(fileContent);
Assert.assertEquals(data, new String(fileContent, UTF_8));
}
use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.
the class TestOzoneManagerHAWithData method createMultipartKeyAndReadKey.
private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, String keyName, String uploadID) throws Exception {
String value = "random data";
OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(keyName, value.length(), 1, uploadID);
ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
ozoneOutputStream.close();
Map<Integer, String> partsMap = new HashMap<>();
partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap);
Assert.assertTrue(omMultipartUploadCompleteInfo != null);
Assert.assertTrue(omMultipartUploadCompleteInfo.getHash() != null);
OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
ozoneInputStream.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
}
Aggregations