use of org.apache.hadoop.hive.metastore.api.Order in project hive by apache.
the class TestWorker method sortedPartition.
@Test
public void sortedPartition() throws Exception {
List<Order> sortCols = new ArrayList<Order>(1);
sortCols.add(new Order("b", 1));
Table t = newTable("default", "sp", true, new HashMap<String, String>(), sortCols, false);
Partition p = newPartition(t, "today", sortCols);
addBaseFile(t, p, 20L, 20);
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
startWorker();
// There should still be four directories in the location.
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(p.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
}
use of org.apache.hadoop.hive.metastore.api.Order in project hive by apache.
the class TestWorker method sortedTable.
@Test
public void sortedTable() throws Exception {
List<Order> sortCols = new ArrayList<Order>(1);
sortCols.add(new Order("b", 1));
Table t = newTable("default", "st", false, new HashMap<String, String>(), sortCols, false);
addBaseFile(t, null, 20L, 20);
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
burnThroughTransactions(25);
CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR);
txnHandler.compact(rqst);
startWorker();
// There should still be four directories in the location.
FileSystem fs = FileSystem.get(conf);
FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
Assert.assertEquals(4, stat.length);
}
use of org.apache.hadoop.hive.metastore.api.Order in project hive by apache.
the class TestHBaseStore method dropIndex.
@Test
public void dropIndex() throws Exception {
String tableName = "mytable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
store.createTable(table);
String indexName = "myindex";
Index index = new Index(indexName, null, "default", tableName, startTime, startTime, tableName + "__" + indexName + "__", sd, emptyParameters, false);
store.addIndex(index);
store.dropIndex("default", tableName, indexName);
}
use of org.apache.hadoop.hive.metastore.api.Order in project hive by apache.
the class TestHBaseStoreBitVector method createMockTableAndPartition.
private Table createMockTableAndPartition(String partType, String partVal) throws Exception {
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", partType, ""));
List<String> vals = new ArrayList<String>();
vals.add(partVal);
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int) (System.currentTimeMillis() / 1000);
Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null);
store.createTable(table);
Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters);
store.addPartition(part);
return table;
}
use of org.apache.hadoop.hive.metastore.api.Order in project hive by apache.
the class TestHBaseStore method createTable.
@Test
public void createTable() throws Exception {
String tableName = "mytable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
store.createTable(table);
Table t = store.getTable("default", tableName);
Assert.assertEquals(1, t.getSd().getColsSize());
Assert.assertEquals("col1", t.getSd().getCols().get(0).getName());
Assert.assertEquals("int", t.getSd().getCols().get(0).getType());
Assert.assertEquals("", t.getSd().getCols().get(0).getComment());
Assert.assertEquals("serde", t.getSd().getSerdeInfo().getName());
Assert.assertEquals("seriallib", t.getSd().getSerdeInfo().getSerializationLib());
Assert.assertEquals("file:/tmp", t.getSd().getLocation());
Assert.assertEquals("input", t.getSd().getInputFormat());
Assert.assertEquals("output", t.getSd().getOutputFormat());
Assert.assertFalse(t.getSd().isCompressed());
Assert.assertEquals(17, t.getSd().getNumBuckets());
Assert.assertEquals(1, t.getSd().getBucketColsSize());
Assert.assertEquals("bucketcol", t.getSd().getBucketCols().get(0));
Assert.assertEquals(1, t.getSd().getSortColsSize());
Assert.assertEquals("sortcol", t.getSd().getSortCols().get(0).getCol());
Assert.assertEquals(1, t.getSd().getSortCols().get(0).getOrder());
Assert.assertEquals(1, t.getSd().getParametersSize());
Assert.assertEquals("value", t.getSd().getParameters().get("key"));
Assert.assertEquals("me", t.getOwner());
Assert.assertEquals("default", t.getDbName());
Assert.assertEquals(tableName, t.getTableName());
Assert.assertEquals(0, t.getParametersSize());
}
Aggregations