use of org.apache.hadoop.hdds.scm.ScmConfig in project ozone by apache.
the class TestDeleteWithSlowFollower method init.
/**
* Create a MiniDFSCluster for testing.
*
* @throws IOException
*/
@BeforeClass
public static void init() throws Exception {
conf = new OzoneConfiguration();
path = GenericTestUtils.getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
File baseDir = new File(path);
baseDir.mkdirs();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
// Make the stale, dead and server failure timeout higher so that a dead
// node is not detecte at SCM as well as the pipeline close action
// never gets initiated early at Datanode in the test.
conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 1000, TimeUnit.SECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 2000, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS);
conf.setTimeDuration(OZONE_SCM_PIPELINE_CREATION_INTERVAL, 1000, TimeUnit.SECONDS);
DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class);
ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(1000));
ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
conf.setFromObject(ratisServerConfig);
RatisClientConfig.RaftConfig raftClientConfig = conf.getObject(RatisClientConfig.RaftConfig.class);
raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
conf.setFromObject(raftClientConfig);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1));
conf.setFromObject(scmConfig);
DatanodeConfiguration datanodeConfiguration = conf.getObject(DatanodeConfiguration.class);
datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(100));
conf.setFromObject(datanodeConfiguration);
RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class);
ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30));
ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30));
conf.setFromObject(ratisClientConfig);
conf.setQuietMode(false);
int numOfDatanodes = 3;
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(numOfDatanodes).setTotalPipelineNumLimit(numOfDatanodes + FACTOR_THREE_PIPELINE_COUNT).setHbInterval(100).build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(THREE, 60000);
// the easiest way to create an open container is creating a key
client = OzoneClientFactory.getRpcClient(conf);
objectStore = client.getObjectStore();
xceiverClientManager = new XceiverClientManager(conf);
volumeName = "testcontainerstatemachinefailures";
bucketName = volumeName;
objectStore.createVolume(volumeName);
objectStore.getVolume(volumeName).createBucket(bucketName);
}
use of org.apache.hadoop.hdds.scm.ScmConfig in project ozone by apache.
the class TestStorageContainerManager method testBlockDeletionTransactions.
@Test
public void testBlockDeletionTransactions() throws Exception {
int numKeys = 5;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 100, TimeUnit.MILLISECONDS);
conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 100, TimeUnit.MILLISECONDS);
conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS);
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1));
conf.setFromObject(scmConfig);
// Reset container provision size, otherwise only one container
// is created by default.
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys);
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setHbInterval(100).build();
cluster.waitForClusterToBeReady();
try {
DeletedBlockLog delLog = cluster.getStorageContainerManager().getScmBlockManager().getDeletedBlockLog();
Assert.assertEquals(0, delLog.getNumOfValidTransactions());
// Create {numKeys} random names keys.
TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
// Wait for container report
Thread.sleep(1000);
for (OmKeyInfo keyInfo : keyLocations.values()) {
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), cluster.getStorageContainerManager());
}
Map<Long, List<Long>> containerBlocks = createDeleteTXLog(cluster.getStorageContainerManager(), delLog, keyLocations, helper);
// Verify a few TX gets created in the TX log.
Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
// Once TXs are written into the log, SCM starts to fetch TX
// entries from the log and schedule block deletions in HB interval,
// after sometime, all the TX should be proceed and by then
// the number of containerBlocks of all known containers will be
// empty again.
GenericTestUtils.waitFor(() -> {
try {
if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
cluster.getStorageContainerManager().getScmHAManager().asSCMHADBTransactionBuffer().flush();
}
return delLog.getNumOfValidTransactions() == 0;
} catch (IOException e) {
return false;
}
}, 1000, 10000);
Assert.assertTrue(helper.verifyBlocksWithTxnTable(containerBlocks));
// but unknown block IDs.
for (Long containerID : containerBlocks.keySet()) {
// Add 2 TXs per container.
Map<Long, List<Long>> deletedBlocks = new HashMap<>();
List<Long> blocks = new ArrayList<>();
blocks.add(RandomUtils.nextLong());
blocks.add(RandomUtils.nextLong());
deletedBlocks.put(containerID, blocks);
addTransactions(cluster.getStorageContainerManager(), delLog, deletedBlocks);
}
// Verify a few TX gets created in the TX log.
Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
// These blocks cannot be found in the container, skip deleting them
// eventually these TX will success.
GenericTestUtils.waitFor(() -> {
try {
if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) {
cluster.getStorageContainerManager().getScmHAManager().asSCMHADBTransactionBuffer().flush();
}
return delLog.getFailedTransactions().size() == 0;
} catch (IOException e) {
return false;
}
}, 1000, 20000);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdds.scm.ScmConfig in project ozone by apache.
the class StorageContainerManager method loginAsSCMUserIfSecurityEnabled.
/**
* Login as the configured user for SCM.
*
* @param conf
*/
private static void loginAsSCMUserIfSecurityEnabled(SCMHANodeDetails scmhaNodeDetails, ConfigurationSource conf) throws IOException, AuthenticationException {
if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
if (LOG.isDebugEnabled()) {
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
LOG.debug("Ozone security is enabled. Attempting login for SCM user. " + "Principal: {}, keytab: {}", scmConfig.getKerberosPrincipal(), scmConfig.getKerberosKeytab());
}
Configuration hadoopConf = LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
if (SecurityUtil.getAuthenticationMethod(hadoopConf).equals(AuthenticationMethod.KERBEROS)) {
UserGroupInformation.setConfiguration(hadoopConf);
InetSocketAddress socketAddress = getScmAddress(scmhaNodeDetails, conf);
SecurityUtil.login(hadoopConf, ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socketAddress.getHostName());
} else {
throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(hadoopConf) + " authentication method not support. " + "SCM user login failed.");
}
LOG.info("SCM login successful.");
}
}
use of org.apache.hadoop.hdds.scm.ScmConfig in project ozone by apache.
the class PipelineChoosePolicyFactory method getPolicy.
public static PipelineChoosePolicy getPolicy(ConfigurationSource conf) throws SCMException {
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
Class<? extends PipelineChoosePolicy> policyClass = getClass(scmConfig.getPipelineChoosePolicyName(), PipelineChoosePolicy.class);
try {
return createPipelineChoosePolicyFromClass(policyClass);
} catch (Exception e) {
if (policyClass != OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT) {
LOG.error("Met an exception while create pipeline choose policy " + "for the given class " + policyClass.getName() + ". Fallback to the default pipeline choose policy " + OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, e);
return createPipelineChoosePolicyFromClass(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT);
}
throw e;
}
}
use of org.apache.hadoop.hdds.scm.ScmConfig in project ozone by apache.
the class TestPipelineChoosePolicyFactory method setup.
@Before
public void setup() {
// initialize network topology instance
conf = new OzoneConfiguration();
scmConfig = conf.getObject(ScmConfig.class);
}
Aggregations