Search in sources :

Example 21 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class ContainerUtils method verifyChecksum.

/**
 * Verify that the checksum stored in containerData is equal to the
 * computed checksum.
 */
public static void verifyChecksum(ContainerData containerData, ConfigurationSource conf) throws IOException {
    boolean enabled = conf.getBoolean(HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED, HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT);
    if (enabled) {
        String storedChecksum = containerData.getChecksum();
        Yaml yaml = ContainerDataYaml.getYamlForContainerType(containerData.getContainerType(), containerData instanceof KeyValueContainerData && ((KeyValueContainerData) containerData).getReplicaIndex() > 0);
        containerData.computeAndSetChecksum(yaml);
        String computedChecksum = containerData.getChecksum();
        if (storedChecksum == null || !storedChecksum.equals(computedChecksum)) {
            throw new StorageContainerException("Container checksum error for " + "ContainerID: " + containerData.getContainerID() + ". " + "\nStored Checksum: " + storedChecksum + "\nExpected Checksum: " + computedChecksum, CONTAINER_CHECKSUM_ERROR);
        }
    }
}
Also used : StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) ContainerDataYaml(org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml) Yaml(org.yaml.snakeyaml.Yaml) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)

Example 22 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestContainerDataYaml method testIncorrectChecksum.

/**
 * Test to verify incorrect checksum is detected.
 */
@Test
public void testIncorrectChecksum() {
    try {
        KeyValueContainerData kvData = getKeyValueContainerData();
        ContainerUtils.verifyChecksum(kvData, conf);
        fail("testIncorrectChecksum failed");
    } catch (Exception ex) {
        GenericTestUtils.assertExceptionContains("Container checksum error for " + "ContainerID:", ex);
    }
}
Also used : KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) IOException(java.io.IOException) Test(org.junit.Test)

Example 23 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestContainerDataYaml method testChecksumInContainerFile.

/**
 * Test to verify {@link ContainerUtils#verifyChecksum(ContainerData)}.
 */
@Test
public void testChecksumInContainerFile() throws IOException {
    long containerID = testContainerID++;
    File containerFile = createContainerFile(containerID, 0);
    // Read from .container file, and verify data.
    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml.readContainerFile(containerFile);
    ContainerUtils.verifyChecksum(kvData, conf);
    cleanup();
}
Also used : File(java.io.File) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) Test(org.junit.Test)

Example 24 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestContainerDeletionChoosingPolicy method testRandomChoosingPolicy.

@Test
public void testRandomChoosingPolicy() throws IOException {
    File containerDir = new File(path);
    if (containerDir.exists()) {
        FileUtils.deleteDirectory(new File(path));
    }
    Assert.assertTrue(containerDir.mkdirs());
    conf.set(ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, RandomContainerDeletionChoosingPolicy.class.getName());
    List<StorageLocation> pathLists = new LinkedList<>();
    pathLists.add(StorageLocation.parse(containerDir.getAbsolutePath()));
    containerSet = new ContainerSet();
    int numContainers = 10;
    for (int i = 0; i < numContainers; i++) {
        KeyValueContainerData data = new KeyValueContainerData(i, layout, ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), UUID.randomUUID().toString());
        data.incrPendingDeletionBlocks(20);
        data.closeContainer();
        KeyValueContainer container = new KeyValueContainer(data, conf);
        containerSet.addContainer(container);
        Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(data.getContainerID()));
    }
    blockDeletingService = getBlockDeletingService();
    int blockLimitPerInterval = 5;
    ContainerDeletionChoosingPolicy deletionPolicy = new RandomContainerDeletionChoosingPolicy();
    List<ContainerBlockInfo> result0 = blockDeletingService.chooseContainerForBlockDeletion(blockLimitPerInterval, deletionPolicy);
    long totPendingBlocks = 0;
    for (ContainerBlockInfo pr : result0) {
        totPendingBlocks += pr.getBlocks();
    }
    Assert.assertTrue(totPendingBlocks >= blockLimitPerInterval);
    // We expect different order at least once.
    for (int j = 0; j < 100; j++) {
        List<ContainerBlockInfo> result1 = blockDeletingService.chooseContainerForBlockDeletion(50, deletionPolicy);
        List<ContainerBlockInfo> result2 = blockDeletingService.chooseContainerForBlockDeletion(50, deletionPolicy);
        boolean hasShuffled = false;
        for (int i = 0; i < result1.size(); i++) {
            if (result1.get(i).getContainerData().getContainerID() != result2.get(i).getContainerData().getContainerID()) {
                return;
            }
        }
    }
    Assert.fail("Chosen container results were same 100 times");
}
Also used : LinkedList(java.util.LinkedList) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) ContainerBlockInfo(org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService.ContainerBlockInfo) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ContainerDeletionChoosingPolicy(org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy) Test(org.junit.Test)

Example 25 with KeyValueContainerData

use of org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData in project ozone by apache.

the class TestContainerPersistence method testUpdateContainer.

/**
 * Tries to update an existing and non-existing container. Verifies container
 * map and persistent data both updated.
 *
 * @throws IOException
 */
@Test
public void testUpdateContainer() throws IOException {
    long testContainerID = ContainerTestHelper.getTestContainerID();
    KeyValueContainer container = (KeyValueContainer) addContainer(containerSet, testContainerID);
    File orgContainerFile = container.getContainerFile();
    Assert.assertTrue(orgContainerFile.exists());
    Map<String, String> newMetadata = Maps.newHashMap();
    newMetadata.put("VOLUME", "shire_new");
    newMetadata.put("owner", "bilbo_new");
    container.update(newMetadata, false);
    Assert.assertEquals(1, containerSet.getContainerMapCopy().size());
    Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(testContainerID));
    // Verify in-memory map
    KeyValueContainerData actualNewData = (KeyValueContainerData) containerSet.getContainer(testContainerID).getContainerData();
    Assert.assertEquals("shire_new", actualNewData.getMetadata().get("VOLUME"));
    Assert.assertEquals("bilbo_new", actualNewData.getMetadata().get("owner"));
    // Verify container data on disk
    File containerBaseDir = new File(actualNewData.getMetadataPath()).getParentFile();
    File newContainerFile = ContainerUtils.getContainerFile(containerBaseDir);
    Assert.assertTrue("Container file should exist.", newContainerFile.exists());
    Assert.assertEquals("Container file should be in same location.", orgContainerFile.getAbsolutePath(), newContainerFile.getAbsolutePath());
    ContainerData actualContainerData = ContainerDataYaml.readContainerFile(newContainerFile);
    Assert.assertEquals("shire_new", actualContainerData.getMetadata().get("VOLUME"));
    Assert.assertEquals("bilbo_new", actualContainerData.getMetadata().get("owner"));
    // Test force update flag.
    // Close the container and then try to update without force update flag.
    container.close();
    try {
        container.update(newMetadata, false);
    } catch (StorageContainerException ex) {
        Assert.assertEquals("Updating a closed container without " + "force option is not allowed. ContainerID: " + testContainerID, ex.getMessage());
    }
    // Update with force flag, it should be success.
    newMetadata.put("VOLUME", "shire_new_1");
    newMetadata.put("owner", "bilbo_new_1");
    container.update(newMetadata, true);
    // Verify in-memory map
    actualNewData = (KeyValueContainerData) containerSet.getContainer(testContainerID).getContainerData();
    Assert.assertEquals("shire_new_1", actualNewData.getMetadata().get("VOLUME"));
    Assert.assertEquals("bilbo_new_1", actualNewData.getMetadata().get("owner"));
}
Also used : StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) Test(org.junit.Test)

Aggregations

KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)87 Test (org.junit.Test)37 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)33 DBHandle (org.apache.hadoop.ozone.container.common.interfaces.DBHandle)31 File (java.io.File)27 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)19 IOException (java.io.IOException)17 ArrayList (java.util.ArrayList)15 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)12 HddsVolume (org.apache.hadoop.ozone.container.common.volume.HddsVolume)12 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)11 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)11 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)11 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)10 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)10 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)10 Table (org.apache.hadoop.hdds.utils.db.Table)9 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)9 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)9 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)9