use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class HDInsight40DefaultConfigurationTest method testHBase_NotSupport.
@Test
public void testHBase_NotSupport() {
HadoopComponent hadoopComponent = getHadoopComponent();
String defaultValue = hadoopComponent.getDefaultConfig(hadoopComponent.getDistribution(), EHadoopCategory.HBASE.getName());
assertTrue("Should not support for HBase", defaultValue == null);
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class DistributionHelperTest method test_doSupportService_SparkStreamingComponent.
@Test
public void test_doSupportService_SparkStreamingComponent() {
HadoopComponent hadoopComponent = Mockito.mock(HadoopComponent.class);
DistributionBean distribution = Mockito.mock(DistributionBean.class);
Assert.assertFalse(DistributionHelper.doSupportService(new DistributionVersion(hadoopComponent, distribution, "ABC_10", "Abc 1.0"), IHDConstants.SERVICE_SPARK_STREAMING));
SparkComponent sparkComponent = Mockito.mock(SparkComponent.class);
Assert.assertFalse(DistributionHelper.doSupportService(new DistributionVersion(sparkComponent, distribution, "ABC_10", "Abc 1.0"), IHDConstants.SERVICE_SPARK_STREAMING));
SparkStreamingComponent sparkStreamingComponent = Mockito.mock(SparkStreamingComponent.class);
Assert.assertTrue(DistributionHelper.doSupportService(new DistributionVersion(sparkStreamingComponent, distribution, "ABC_10", "Abc 1.0"), IHDConstants.SERVICE_SPARK_STREAMING));
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class EMR5290DistributionTest method testEMR5290Distribution.
@Test
public void testEMR5290Distribution() throws Exception {
HadoopComponent distribution = new EMR5290Distribution();
assertNotNull(distribution.getDistributionName());
assertNotNull(distribution.getVersionName(null));
assertTrue(distribution.doSupportS3());
assertEquals(EMR5290Distribution.DISTRIBUTION_NAME, distribution.getDistribution());
assertEquals(EMR5290Distribution.VERSION, distribution.getVersion());
assertEquals(EHadoopVersion.HADOOP_2, distribution.getHadoopVersion());
assertTrue(distribution.doSupportKerberos());
assertTrue(distribution.doSupportUseDatanodeHostname());
assertFalse(distribution.doSupportGroup());
assertFalse(distribution.doSupportOldImportMode());
assertTrue(((HDFSComponent) distribution).doSupportSequenceFileShortType());
assertFalse(((MRComponent) distribution).isExecutedThroughWebHCat());
assertTrue(((MRComponent) distribution).doSupportCrossPlatformSubmission());
assertTrue(((MRComponent) distribution).doSupportImpersonation());
assertEquals(((MRComponent) distribution).getYarnApplicationClasspath(), DEFAULT_YARN_APPLICATION_CLASSPATH);
assertTrue(distribution instanceof HBaseComponent);
assertTrue(distribution instanceof SqoopComponent);
assertFalse(((HiveComponent) distribution).doSupportEmbeddedMode());
assertTrue(((HiveComponent) distribution).doSupportStandaloneMode());
assertFalse(((HiveComponent) distribution).doSupportHive1());
assertTrue(((HiveComponent) distribution).doSupportHive2());
assertFalse(((HiveComponent) distribution).doSupportTezForHive());
assertFalse(((HiveComponent) distribution).doSupportHBaseForHive());
assertTrue(((HiveComponent) distribution).doSupportSSL());
assertTrue(((HiveComponent) distribution).doSupportORCFormat());
assertTrue(((HiveComponent) distribution).doSupportAvroFormat());
assertTrue(((HiveComponent) distribution).doSupportParquetFormat());
assertTrue(((HiveComponent) distribution).doSupportStoreAsParquet());
assertFalse(((HiveComponent) distribution).doSupportClouderaNavigator());
assertTrue(distribution instanceof HCatalogComponent);
assertFalse(distribution instanceof ImpalaComponent);
assertTrue(((SqoopComponent) distribution).doJavaAPISqoopImportAllTablesSupportExcludeTable());
assertTrue(((SqoopComponent) distribution).doJavaAPISqoopImportSupportDeleteTargetDir());
assertTrue(((SqoopComponent) distribution).doJavaAPISupportStorePasswordInFile());
assertTrue(((HBaseComponent) distribution).doSupportNewHBaseAPI());
assertFalse(distribution.doSupportAzureDataLakeStorage());
assertTrue(distribution.doSupportWebHDFS());
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class CustomDistributionTest method testCustomDistribution.
@Test
public void testCustomDistribution() throws Exception {
HadoopComponent distribution = new CustomDistribution();
assertNotNull(distribution.getDistributionName());
assertNull(distribution.getVersionName(null));
assertTrue(distribution.doSupportS3());
assertEquals(CustomDistribution.DISTRIBUTION_NAME, distribution.getDistribution());
assertNull(distribution.getVersion());
assertNull(distribution.getHadoopVersion());
assertTrue(distribution.doSupportKerberos());
assertTrue(distribution.doSupportUseDatanodeHostname());
assertFalse(distribution.doSupportGroup());
assertTrue(distribution.doSupportOldImportMode());
assertTrue(((HDFSComponent) distribution).doSupportSequenceFileShortType());
assertFalse(((MRComponent) distribution).isExecutedThroughWebHCat());
assertFalse(((MRComponent) distribution).doSupportCrossPlatformSubmission());
assertTrue(((MRComponent) distribution).doSupportImpersonation());
assertEquals(DEFAULT_YARN_APPLICATION_CLASSPATH, ((MRComponent) distribution).getYarnApplicationClasspath());
assertFalse(((HBaseComponent) distribution).doSupportNewHBaseAPI());
assertTrue(((SqoopComponent) distribution).doJavaAPISupportStorePasswordInFile());
assertFalse(((SqoopComponent) distribution).doJavaAPISqoopImportSupportDeleteTargetDir());
assertTrue(((SqoopComponent) distribution).doJavaAPISqoopImportAllTablesSupportExcludeTable());
assertTrue(((HiveComponent) distribution).doSupportEmbeddedMode());
assertTrue(((HiveComponent) distribution).doSupportStandaloneMode());
assertTrue(((HiveComponent) distribution).doSupportHive1());
assertTrue(((HiveComponent) distribution).doSupportHive2());
assertTrue(((HiveComponent) distribution).doSupportTezForHive());
assertTrue(((HiveComponent) distribution).doSupportHBaseForHive());
assertTrue(((HiveComponent) distribution).doSupportSSL());
assertTrue(((HiveComponent) distribution).doSupportORCFormat());
assertTrue(((HiveComponent) distribution).doSupportAvroFormat());
assertTrue(((HiveComponent) distribution).doSupportParquetFormat());
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_0));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_6));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_5));
assertFalse(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_4));
assertTrue(((SparkBatchComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_3));
assertTrue(((SparkBatchComponent) distribution).doSupportDynamicMemoryAllocation());
assertFalse(((SparkBatchComponent) distribution).isExecutedThroughSparkJobServer());
assertTrue(((SparkBatchComponent) distribution).doSupportSparkStandaloneMode());
assertTrue(((SparkBatchComponent) distribution).doSupportSparkYarnClientMode());
assertFalse(((SparkStreamingComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_2_0));
assertFalse(((SparkStreamingComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_6));
assertFalse(((SparkStreamingComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_5));
assertFalse(((SparkStreamingComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_4));
assertTrue(((SparkStreamingComponent) distribution).getSparkVersions().contains(ESparkVersion.SPARK_1_3));
assertTrue(((SparkStreamingComponent) distribution).doSupportDynamicMemoryAllocation());
assertFalse(((SparkStreamingComponent) distribution).isExecutedThroughSparkJobServer());
assertTrue(((SparkStreamingComponent) distribution).doSupportCheckpointing());
assertTrue(((SparkStreamingComponent) distribution).doSupportSparkStandaloneMode());
assertTrue(((SparkStreamingComponent) distribution).doSupportSparkYarnClientMode());
assertTrue(((SparkStreamingComponent) distribution).doSupportBackpressure());
assertFalse(((HiveComponent) distribution).doSupportStoreAsParquet());
assertFalse(((HiveComponent) distribution).doSupportClouderaNavigator());
assertTrue(distribution instanceof HCatalogComponent);
assertTrue(distribution instanceof ImpalaComponent);
assertTrue(distribution.doSupportCreateServiceConnection());
assertTrue((distribution.getNecessaryServiceName() == null ? 0 : distribution.getNecessaryServiceName().size()) == 0);
assertFalse(distribution.doSupportAzureDataLakeStorage());
assertFalse(distribution.doSupportWebHDFS());
}
use of org.talend.hadoop.distribution.component.HadoopComponent in project tbd-studio-se by Talend.
the class MapRStreamsCreateStreamProblem method check.
@Override
public void check(Node node) {
String currentComponentName = node.getComponent().getName();
if (MAPRSTREAMS_COMPONENTS.equals(currentComponentName)) {
// $NON-NLS-1$
INode tMapRStreamsConnection = ElementParameterParser.getLinkedNodeValue(node, "__CONNECTION__");
if (tMapRStreamsConnection != null) {
// $NON-NLS-1$
IElementParameter distributionParameter = tMapRStreamsConnection.getElementParameter("DISTRIBUTION");
// $NON-NLS-1$
IElementParameter versionParameter = tMapRStreamsConnection.getElementParameter("MAPRSTREAMS_VERSION");
if (distributionParameter != null && versionParameter != null) {
String distribution = (String) distributionParameter.getValue();
String version = (String) versionParameter.getValue();
try {
HadoopComponent hadoopDistrib = DistributionFactory.buildDistribution(distribution, version);
if (distributionDoesNotSupportMapRStreamsAdminAPI(hadoopDistrib)) {
Problems.add(ProblemStatus.ERROR, node, // $NON-NLS-1$;
Messages.getString("Node.checkMapRStreamsCreateStreamVersion"));
}
} catch (java.lang.Exception e) {
CommonExceptionHandler.process(e);
}
}
}
}
}
Aggregations