use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class TestDecommissionAndMaintenance method setUp.
@Before
public void setUp() throws Exception {
cluster = clusterProvider.provide();
setManagers();
bucket = TestDataUtil.createVolumeAndBucket(cluster, volName, bucketName);
scmClient = new ContainerOperationClient(cluster.getConf());
}
use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class TestQueryNode method setUp.
@Before
public void setUp() throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
final int interval = 100;
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(numOfDatanodes).setTotalPipelineNumLimit(numOfDatanodes + numOfDatanodes / 2).build();
cluster.waitForClusterToBeReady();
scmClient = new ContainerOperationClient(conf);
}
Aggregations