Search in sources :

Example 26 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestDatanodeHddsVolumeFailureDetection method testHddsVolumeFailureOnDbFileCorrupt.

@Test
public void testHddsVolumeFailureOnDbFileCorrupt() throws Exception {
    // write a file, will create container1
    String keyName = UUID.randomUUID().toString();
    String value = "sample value";
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    // close container1
    HddsDatanodeService dn = datanodes.get(0);
    OzoneContainer oc = dn.getDatanodeStateMachine().getContainer();
    Container c1 = oc.getContainerSet().getContainer(1);
    c1.close();
    // create container2, and container1 is kicked out of cache
    ContainerWithPipeline c2 = scmClient.createContainer(HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE, OzoneConsts.OZONE);
    Assert.assertTrue(c2.getContainerInfo().getState().equals(HddsProtos.LifeCycleState.OPEN));
    // corrupt db by rename dir->file
    File metadataDir = new File(c1.getContainerFile().getParent());
    File dbDir = new File(metadataDir, "1" + OzoneConsts.DN_CONTAINER_DB);
    DatanodeTestUtils.injectDataDirFailure(dbDir);
    // simulate bad volume by removing write permission on root dir
    // refer to HddsVolume.check()
    MutableVolumeSet volSet = oc.getVolumeSet();
    StorageVolume vol0 = volSet.getVolumesList().get(0);
    DatanodeTestUtils.simulateBadVolume(vol0);
    // read written file to trigger checkVolumeAsync
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    try {
        is.read(fileContent);
        Assert.fail();
    } catch (Exception e) {
        Assert.assertTrue(e instanceof IOException);
    } finally {
        is.close();
    }
    // should trigger CheckVolumeAsync and
    // a failed volume should be detected
    DatanodeTestUtils.waitForCheckVolume(volSet, 1L);
    DatanodeTestUtils.waitForHandleFailedVolume(volSet, 1);
    // restore all
    DatanodeTestUtils.restoreBadVolume(vol0);
    DatanodeTestUtils.restoreDataDirFromFailure(dbDir);
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) IOException(java.io.IOException) StorageVolume(org.apache.hadoop.ozone.container.common.volume.StorageVolume) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) OzoneContainer(org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer) File(java.io.File) Test(org.junit.Test)

Example 27 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestHelper method validateData.

public static void validateData(String keyName, byte[] data, ObjectStore objectStore, String volumeName, String bucketName) throws Exception {
    byte[] readData = new byte[data.length];
    OzoneInputStream is = objectStore.getVolume(volumeName).getBucket(bucketName).readKey(keyName);
    is.read(readData);
    MessageDigest sha1 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    sha1.update(data);
    MessageDigest sha2 = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    sha2.update(readData);
    Assert.assertTrue(Arrays.equals(sha1.digest(), sha2.digest()));
    is.close();
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) MessageDigest(java.security.MessageDigest)

Example 28 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class ReadReplicas method downloadReplicasAndCreateManifest.

private void downloadReplicasAndCreateManifest(String keyName, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas, Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicasWithoutChecksum, String directoryName, JsonArray blocks) throws IOException {
    int blockIndex = 0;
    for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> block : replicas.entrySet()) {
        JsonObject blockJson = new JsonObject();
        JsonArray replicasJson = new JsonArray();
        blockIndex += 1;
        blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex);
        blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, block.getKey().getContainerID());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, block.getKey().getLocalID());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, block.getKey().getLength());
        blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, block.getKey().getOffset());
        for (Map.Entry<DatanodeDetails, OzoneInputStream> replica : block.getValue().entrySet()) {
            JsonObject replicaJson = new JsonObject();
            replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, replica.getKey().getHostName());
            replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, replica.getKey().getUuidString());
            OzoneInputStream is = replica.getValue();
            String fileName = keyName + "_block" + blockIndex + "_" + replica.getKey().getHostName();
            System.out.println("Writing : " + fileName);
            File replicaFile = new File(outputDir + "/" + directoryName + "/" + fileName);
            try {
                Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
            } catch (IOException e) {
                Throwable cause = e.getCause();
                replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage());
                if (cause instanceof OzoneChecksumException) {
                    BlockID blockID = block.getKey().getBlockID();
                    String datanodeUUID = replica.getKey().getUuidString();
                    is = getInputStreamWithoutChecksum(replicasWithoutChecksum, datanodeUUID, blockID);
                    Files.copy(is, replicaFile.toPath(), StandardCopyOption.REPLACE_EXISTING);
                } else if (cause instanceof StatusRuntimeException) {
                    break;
                }
            } finally {
                is.close();
            }
            replicasJson.add(replicaJson);
        }
        blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson);
        blocks.add(blockJson);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) JsonObject(com.google.gson.JsonObject) IOException(java.io.IOException) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) JsonArray(com.google.gson.JsonArray) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) StatusRuntimeException(org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException) BlockID(org.apache.hadoop.hdds.client.BlockID) Map(java.util.Map) File(java.io.File)

Example 29 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneManagerHA method testCreateFile.

/**
 * This method createFile and verifies the file is successfully created or
 * not.
 *
 * @param ozoneBucket
 * @param keyName
 * @param data
 * @param recursive
 * @param overwrite
 * @throws Exception
 */
protected void testCreateFile(OzoneBucket ozoneBucket, String keyName, String data, boolean recursive, boolean overwrite) throws Exception {
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, overwrite, recursive);
    ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
    ozoneOutputStream.close();
    OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName);
    Assert.assertEquals(keyName, ozoneKeyDetails.getName());
    Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName());
    Assert.assertEquals(ozoneBucket.getVolumeName(), ozoneKeyDetails.getVolumeName());
    Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize());
    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
    byte[] fileContent = new byte[data.getBytes(UTF_8).length];
    ozoneInputStream.read(fileContent);
    Assert.assertEquals(data, new String(fileContent, UTF_8));
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream)

Example 30 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneManagerHAWithData method createMultipartKeyAndReadKey.

private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, String keyName, String uploadID) throws Exception {
    String value = "random data";
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey(keyName, value.length(), 1, uploadID);
    ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
    ozoneOutputStream.close();
    Map<Integer, String> partsMap = new HashMap<>();
    partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(keyName, uploadID, partsMap);
    Assert.assertTrue(omMultipartUploadCompleteInfo != null);
    Assert.assertTrue(omMultipartUploadCompleteInfo.getHash() != null);
    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    ozoneInputStream.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) HashMap(java.util.HashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream)

Aggregations

OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)47 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)26 Test (org.junit.Test)26 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)22 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)17 IOException (java.io.IOException)15 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)13 Instant (java.time.Instant)12 HashMap (java.util.HashMap)11 LinkedHashMap (java.util.LinkedHashMap)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)8 ArrayList (java.util.ArrayList)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)7 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)6 File (java.io.File)5 HttpHeaders (javax.ws.rs.core.HttpHeaders)5