use of org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx in project hive by apache.
the class TestFileSinkOperator method getFileSink.
private FileSinkOperator getFileSink(AcidUtils.Operation writeType, boolean dynamic, long txnId) throws IOException, HiveException {
TableDesc tableDesc = null;
switch(writeType) {
case DELETE:
case UPDATE:
case INSERT:
tableDesc = acidTableDescriptor;
break;
case NOT_ACID:
tableDesc = nonAcidTableDescriptor;
break;
}
FileSinkDesc desc = null;
if (dynamic) {
ArrayList<ExprNodeDesc> partCols = new ArrayList<ExprNodeDesc>(1);
partCols.add(new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, PARTCOL_NAME, "a", true));
Map<String, String> partColMap = new LinkedHashMap<String, String>(1);
partColMap.put(PARTCOL_NAME, null);
DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(null, partColMap, "Sunday", 100);
//todo: does this need the finalDestination?
desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null);
} else {
desc = new FileSinkDesc(basePath, tableDesc, false);
}
desc.setWriteType(writeType);
desc.setGatherStats(true);
if (txnId > 0)
desc.setTransactionId(txnId);
if (writeType != AcidUtils.Operation.NOT_ACID)
desc.setTransactionId(1L);
FileSinkOperator op = (FileSinkOperator) OperatorFactory.get(new CompilationOpContext(), FileSinkDesc.class);
op.setConf(desc);
op.initialize(jc, new ObjectInspector[] { inspector });
return op;
}
use of org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx in project hive by apache.
the class TestUtilities method getDynamicPartitionCtx.
private DynamicPartitionCtx getDynamicPartitionCtx(boolean dPEnabled) {
DynamicPartitionCtx dpCtx = null;
if (dPEnabled) {
dpCtx = mock(DynamicPartitionCtx.class);
when(dpCtx.getNumDPCols()).thenReturn(0);
when(dpCtx.getNumBuckets()).thenReturn(NUM_BUCKETS);
}
return dpCtx;
}
use of org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx in project hive by apache.
the class TestUtilities method runRemoveTempOrDuplicateFilesTestCase.
private List<Path> runRemoveTempOrDuplicateFilesTestCase(String executionEngine, boolean dPEnabled) throws Exception {
Configuration hconf = new HiveConf(this.getClass());
// do this to verify that Utilities.removeTempOrDuplicateFiles does not revert to default scheme information
hconf.set("fs.defaultFS", "hdfs://should-not-be-used/");
hconf.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, executionEngine);
FileSystem localFs = FileSystem.getLocal(hconf);
DynamicPartitionCtx dpCtx = getDynamicPartitionCtx(dPEnabled);
Path tempDirPath = setupTempDirWithSingleOutputFile(hconf);
FileSinkDesc conf = getFileSinkDesc(tempDirPath);
List<Path> paths = Utilities.removeTempOrDuplicateFiles(localFs, tempDirPath, dpCtx, conf, hconf);
String expectedScheme = tempDirPath.toUri().getScheme();
String expectedAuthority = tempDirPath.toUri().getAuthority();
assertPathsMatchSchemeAndAuthority(expectedScheme, expectedAuthority, paths);
return paths;
}
Aggregations