Search in sources :

Example 1 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class Hive2CarbonExpressionTest method testEqualOrGreaterThanHiveFilter.

@Test
public void testEqualOrGreaterThanHiveFilter() throws IOException {
    ExprNodeDesc column1 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    List<ExprNodeDesc> children1 = Lists.newArrayList();
    ExprNodeDesc constant1 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "0");
    children1.add(column1);
    children1.add(constant1);
    ExprNodeGenericFuncDesc node1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPEqualOrGreaterThan(), children1);
    Configuration configuration = new Configuration();
    CarbonInputFormat.setFilterPredicates(configuration, new IndexFilter(table, Hive2CarbonExpression.convertExprHive2Carbon(node1)));
    final Job job = new Job(new JobConf(configuration));
    final CarbonFileInputFormat format = new CarbonFileInputFormat();
    format.setTableInfo(job.getConfiguration(), table.getTableInfo());
    format.setTablePath(job.getConfiguration(), table.getTablePath());
    format.setTableName(job.getConfiguration(), table.getTableName());
    format.setDatabaseName(job.getConfiguration(), table.getDatabaseName());
    List<InputSplit> list = format.getSplits(job);
    Assert.assertEquals(1, list.size());
}
Also used : ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) Configuration(org.apache.hadoop.conf.Configuration) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) CarbonFileInputFormat(org.apache.carbondata.hadoop.api.CarbonFileInputFormat) GenericUDFOPEqualOrGreaterThan(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 2 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class Hive2CarbonExpressionTest method testLessThanEqualsHiveFilter.

@Test
public void testLessThanEqualsHiveFilter() throws IOException {
    ExprNodeDesc column1 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    List<ExprNodeDesc> children1 = Lists.newArrayList();
    ExprNodeDesc constant1 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "0");
    children1.add(column1);
    children1.add(constant1);
    ExprNodeGenericFuncDesc node1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPLessThan(), children1);
    Configuration configuration = new Configuration();
    CarbonInputFormat.setFilterPredicates(configuration, new IndexFilter(table, Hive2CarbonExpression.convertExprHive2Carbon(node1)));
    final Job job = new Job(new JobConf(configuration));
    final CarbonFileInputFormat format = new CarbonFileInputFormat();
    format.setTableInfo(job.getConfiguration(), table.getTableInfo());
    format.setTablePath(job.getConfiguration(), table.getTablePath());
    format.setTableName(job.getConfiguration(), table.getTableName());
    format.setDatabaseName(job.getConfiguration(), table.getDatabaseName());
    List<InputSplit> list = format.getSplits(job);
    Assert.assertEquals(0, list.size());
}
Also used : ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) Configuration(org.apache.hadoop.conf.Configuration) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) CarbonFileInputFormat(org.apache.carbondata.hadoop.api.CarbonFileInputFormat) GenericUDFOPLessThan(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 3 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class Hive2CarbonExpressionTest method testOrHiveFilter.

@Test
public void testOrHiveFilter() throws IOException {
    ExprNodeDesc column1 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    ExprNodeDesc constant1 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "500");
    List<ExprNodeDesc> children1 = Lists.newArrayList();
    children1.add(column1);
    children1.add(constant1);
    ExprNodeGenericFuncDesc node1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPEqual(), children1);
    ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "4999999");
    List<ExprNodeDesc> children2 = Lists.newArrayList();
    children2.add(column2);
    children2.add(constant2);
    List<ExprNodeDesc> children3 = Lists.newArrayList();
    ExprNodeGenericFuncDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPEqual(), children2);
    children3.add(node1);
    children3.add(node2);
    ExprNodeGenericFuncDesc node3 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPOr(), children3);
    Configuration configuration = new Configuration();
    CarbonInputFormat.setFilterPredicates(configuration, new IndexFilter(table, Hive2CarbonExpression.convertExprHive2Carbon(node3)));
    final Job job = new Job(new JobConf(configuration));
    final CarbonFileInputFormat format = new CarbonFileInputFormat();
    format.setTableInfo(job.getConfiguration(), table.getTableInfo());
    format.setTablePath(job.getConfiguration(), table.getTablePath());
    format.setTableName(job.getConfiguration(), table.getTableName());
    format.setDatabaseName(job.getConfiguration(), table.getDatabaseName());
    List<InputSplit> list = format.getSplits(job);
    Assert.assertEquals(1, list.size());
}
Also used : ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) Configuration(org.apache.hadoop.conf.Configuration) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) CarbonFileInputFormat(org.apache.carbondata.hadoop.api.CarbonFileInputFormat) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) GenericUDFOPEqual(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) GenericUDFOPOr(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 4 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class Hive2CarbonExpressionTest method testEqualOrLessThanEqualsHiveFilter.

@Test
public void testEqualOrLessThanEqualsHiveFilter() throws IOException {
    ExprNodeDesc column1 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    List<ExprNodeDesc> children1 = Lists.newArrayList();
    ExprNodeDesc constant1 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "1000");
    children1.add(column1);
    children1.add(constant1);
    ExprNodeGenericFuncDesc node1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPEqualOrLessThan(), children1);
    Configuration configuration = new Configuration();
    CarbonInputFormat.setFilterPredicates(configuration, new IndexFilter(table, Hive2CarbonExpression.convertExprHive2Carbon(node1)));
    final Job job = new Job(new JobConf(configuration));
    final CarbonFileInputFormat format = new CarbonFileInputFormat();
    format.setTableInfo(job.getConfiguration(), table.getTableInfo());
    format.setTablePath(job.getConfiguration(), table.getTablePath());
    format.setTableName(job.getConfiguration(), table.getTableName());
    format.setDatabaseName(job.getConfiguration(), table.getDatabaseName());
    List<InputSplit> list = format.getSplits(job);
    Assert.assertEquals(1, list.size());
}
Also used : ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) GenericUDFOPEqualOrLessThan(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan) Configuration(org.apache.hadoop.conf.Configuration) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) CarbonFileInputFormat(org.apache.carbondata.hadoop.api.CarbonFileInputFormat) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Example 5 with IndexFilter

use of org.apache.carbondata.core.index.IndexFilter in project carbondata by apache.

the class Hive2CarbonExpressionTest method testGreaterThanHiveFilter.

@Test
public void testGreaterThanHiveFilter() throws IOException {
    ExprNodeDesc column1 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "id", null, false);
    List<ExprNodeDesc> children1 = Lists.newArrayList();
    ExprNodeDesc constant1 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, "1001");
    children1.add(column1);
    children1.add(constant1);
    ExprNodeGenericFuncDesc node1 = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, new GenericUDFOPGreaterThan(), children1);
    Configuration configuration = new Configuration();
    CarbonInputFormat.setFilterPredicates(configuration, new IndexFilter(table, Hive2CarbonExpression.convertExprHive2Carbon(node1)));
    final Job job = new Job(new JobConf(configuration));
    final CarbonFileInputFormat format = new CarbonFileInputFormat();
    format.setTableInfo(job.getConfiguration(), table.getTableInfo());
    format.setTablePath(job.getConfiguration(), table.getTablePath());
    format.setTableName(job.getConfiguration(), table.getTableName());
    format.setDatabaseName(job.getConfiguration(), table.getDatabaseName());
    List<InputSplit> list = format.getSplits(job);
    Assert.assertEquals(0, list.size());
}
Also used : GenericUDFOPGreaterThan(org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan) ExprNodeConstantDesc(org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc) Configuration(org.apache.hadoop.conf.Configuration) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) CarbonFileInputFormat(org.apache.carbondata.hadoop.api.CarbonFileInputFormat) ExprNodeColumnDesc(org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) IndexFilter(org.apache.carbondata.core.index.IndexFilter) Job(org.apache.hadoop.mapreduce.Job) JobConf(org.apache.hadoop.mapred.JobConf) InputSplit(org.apache.hadoop.mapreduce.InputSplit) Test(org.junit.Test)

Aggregations

IndexFilter (org.apache.carbondata.core.index.IndexFilter)27 Configuration (org.apache.hadoop.conf.Configuration)16 InputSplit (org.apache.hadoop.mapreduce.InputSplit)16 JobConf (org.apache.hadoop.mapred.JobConf)15 Job (org.apache.hadoop.mapreduce.Job)15 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)12 Test (org.junit.Test)12 CarbonFileInputFormat (org.apache.carbondata.hadoop.api.CarbonFileInputFormat)11 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)11 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)11 IOException (java.io.IOException)9 ExprNodeConstantDesc (org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc)9 ArrayList (java.util.ArrayList)8 List (java.util.List)5 CarbonTable (org.apache.carbondata.core.metadata.schema.table.CarbonTable)5 CarbonInputSplit (org.apache.carbondata.hadoop.CarbonInputSplit)5 CarbonTableInputFormat (org.apache.carbondata.hadoop.api.CarbonTableInputFormat)5 HashMap (java.util.HashMap)4 CarbonTablePath (org.apache.carbondata.core.util.path.CarbonTablePath)4 Map (java.util.Map)3