Search in sources :

Example 11 with OzoneClientConfig

use of org.apache.hadoop.hdds.scm.OzoneClientConfig in project ozone by apache.

the class TestInputStreamBase method init.

/**
 * Create a MiniDFSCluster for testing.
 * @throws IOException
 */
@Before
public void init() throws Exception {
    OzoneClientConfig config = new OzoneClientConfig();
    config.setBytesPerChecksum(BYTES_PER_CHECKSUM);
    conf.setFromObject(config);
    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS);
    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
    conf.setQuietMode(false);
    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB);
    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, containerLayout.toString());
    ReplicationManagerConfiguration repConf = conf.getObject(ReplicationManagerConfiguration.class);
    repConf.setInterval(Duration.ofSeconds(1));
    conf.setFromObject(repConf);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).setTotalPipelineNumLimit(5).setBlockSize(BLOCK_SIZE).setChunkSize(CHUNK_SIZE).setStreamBufferFlushSize(FLUSH_SIZE).setStreamBufferMaxSize(MAX_FLUSH_SIZE).setStreamBufferSizeUnit(StorageUnit.BYTES).build();
    cluster.waitForClusterToBeReady();
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    volumeName = UUID.randomUUID().toString();
    bucketName = UUID.randomUUID().toString();
    keyString = UUID.randomUUID().toString();
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
}
Also used : ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) Before(org.junit.Before)

Example 12 with OzoneClientConfig

use of org.apache.hadoop.hdds.scm.OzoneClientConfig in project ozone by apache.

the class TestOzoneClientRetriesOnExceptions method init.

/**
 * Create a MiniDFSCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
@Before
public void init() throws Exception {
    chunkSize = 100;
    flushSize = 2 * chunkSize;
    maxFlushSize = 2 * flushSize;
    blockSize = 2 * maxFlushSize;
    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
    clientConfig.setMaxRetryCount(MAX_RETRIES);
    clientConfig.setChecksumType(ChecksumType.NONE);
    clientConfig.setStreamBufferFlushDelay(false);
    conf.setFromObject(clientConfig);
    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
    conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 3);
    conf.setQuietMode(false);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7).setTotalPipelineNumLimit(10).setBlockSize(blockSize).setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize).setStreamBufferMaxSize(maxFlushSize).setStreamBufferSizeUnit(StorageUnit.BYTES).build();
    cluster.waitForClusterToBeReady();
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    xceiverClientManager = new XceiverClientManager(conf);
    keyString = UUID.randomUUID().toString();
    volumeName = "testblockoutputstreamwithretries";
    bucketName = volumeName;
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
}
Also used : OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) Before(org.junit.Before)

Example 13 with OzoneClientConfig

use of org.apache.hadoop.hdds.scm.OzoneClientConfig in project ozone by apache.

the class TestContainerStateMachine method setup.

/**
 * Create a MiniDFSCluster for testing.
 *
 * @throws IOException
 */
@Before
public void setup() throws Exception {
    path = GenericTestUtils.getTempPath(TestContainerStateMachine.class.getSimpleName());
    File baseDir = new File(path);
    baseDir.mkdirs();
    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
    conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true);
    // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
    conf.setQuietMode(false);
    OzoneManager.setTestSecureOmFlag(true);
    conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
    clientConfig.setStreamBufferFlushDelay(false);
    conf.setFromObject(clientConfig);
    // conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.toString());
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200).setCertificateClient(new CertificateClientTestImpl(conf)).build();
    cluster.setWaitForClusterToBeReadyTimeout(300000);
    cluster.waitForClusterToBeReady();
    cluster.getOzoneManager().startSecretManager();
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    volumeName = "testcontainerstatemachinefailures";
    bucketName = volumeName;
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
}
Also used : CertificateClientTestImpl(org.apache.hadoop.ozone.client.CertificateClientTestImpl) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) File(java.io.File) Before(org.junit.Before)

Example 14 with OzoneClientConfig

use of org.apache.hadoop.hdds.scm.OzoneClientConfig in project ozone by apache.

the class TestContainerStateMachineFailures method init.

/**
 * Create a MiniDFSCluster for testing.
 *
 * @throws IOException
 */
@BeforeAll
public static void init() throws Exception {
    conf = new OzoneConfiguration();
    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
    clientConfig.setStreamBufferFlushDelay(false);
    conf.setFromObject(clientConfig);
    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS);
    RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class);
    ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(20));
    ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(20));
    conf.setFromObject(ratisClientConfig);
    DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class);
    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
    conf.setFromObject(ratisServerConfig);
    RatisClientConfig.RaftConfig raftClientConfig = conf.getObject(RatisClientConfig.RaftConfig.class);
    raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
    raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20));
    conf.setFromObject(raftClientConfig);
    conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
    conf.setQuietMode(false);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setHbInterval(200).build();
    cluster.waitForClusterToBeReady();
    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    xceiverClientManager = new XceiverClientManager(conf);
    volumeName = "testcontainerstatemachinefailures";
    bucketName = volumeName;
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
    random = new Random();
}
Also used : RatisClientConfig(org.apache.hadoop.hdds.ratis.conf.RatisClientConfig) Random(java.util.Random) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) DatanodeRatisServerConfig(org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig) BeforeAll(org.junit.jupiter.api.BeforeAll)

Example 15 with OzoneClientConfig

use of org.apache.hadoop.hdds.scm.OzoneClientConfig in project ozone by apache.

the class TestECKeyOutputStream method init.

/**
 * Create a MiniDFSCluster for testing.
 */
@BeforeClass
public static void init() throws Exception {
    chunkSize = 1024;
    flushSize = 2 * chunkSize;
    maxFlushSize = 2 * flushSize;
    blockSize = 2 * maxFlushSize;
    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
    clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE);
    clientConfig.setStreamBufferFlushDelay(false);
    conf.setFromObject(clientConfig);
    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
    // If SCM detects dead node too quickly, then container would be moved to
    // closed state and all in progress writes will get exception. To avoid
    // that, we are just keeping higher timeout and none of the tests depending
    // on deadnode detection timeout currently.
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 60, TimeUnit.SECONDS);
    conf.setTimeDuration("hdds.ratis.raft.server.rpc.slowness.timeout", 300, TimeUnit.SECONDS);
    conf.setTimeDuration("hdds.ratis.raft.server.notification.no-leader.timeout", 300, TimeUnit.SECONDS);
    conf.setQuietMode(false);
    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB);
    conf.setTimeDuration(HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL, 500, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(10).setTotalPipelineNumLimit(10).setBlockSize(blockSize).setChunkSize(chunkSize).setStreamBufferFlushSize(flushSize).setStreamBufferMaxSize(maxFlushSize).setStreamBufferSizeUnit(StorageUnit.BYTES).build();
    cluster.waitForClusterToBeReady();
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    keyString = UUID.randomUUID().toString();
    volumeName = "testeckeyoutputstream";
    bucketName = volumeName;
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
    initInputChunks();
}
Also used : OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) BeforeClass(org.junit.BeforeClass)

Aggregations

OzoneClientConfig (org.apache.hadoop.hdds.scm.OzoneClientConfig)18 DatanodeRatisServerConfig (org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig)7 RatisClientConfig (org.apache.hadoop.hdds.ratis.conf.RatisClientConfig)7 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)5 XceiverClientManager (org.apache.hadoop.hdds.scm.XceiverClientManager)5 Before (org.junit.Before)5 BeforeClass (org.junit.BeforeClass)5 BeforeEach (org.junit.jupiter.api.BeforeEach)3 File (java.io.File)2 IOException (java.io.IOException)1 NoSuchFileException (java.nio.file.NoSuchFileException)1 Path (java.nio.file.Path)1 Properties (java.util.Properties)1 Random (java.util.Random)1 BlockID (org.apache.hadoop.hdds.client.BlockID)1 ReplicationManagerConfiguration (org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration)1 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 CertificateClientTestImpl (org.apache.hadoop.ozone.client.CertificateClientTestImpl)1 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)1