use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class ObjectStore method convertToMIndex.
private MIndex convertToMIndex(Index index) throws InvalidObjectException, MetaException {
StorageDescriptor sd = index.getSd();
if (sd == null) {
throw new InvalidObjectException("Storage descriptor is not defined for index.");
}
MStorageDescriptor msd = this.convertToMStorageDescriptor(sd);
MTable origTable = getMTable(index.getDbName(), index.getOrigTableName());
if (origTable == null) {
throw new InvalidObjectException("Original table does not exist for the given index.");
}
String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName());
MTable indexTable = getMTable(qualified[0], qualified[1]);
if (indexTable == null) {
throw new InvalidObjectException("Underlying index table does not exist for the given index.");
}
return new MIndex(HiveStringUtils.normalizeIdentifier(index.getIndexName()), origTable, index.getCreateTime(), index.getLastAccessTime(), index.getParameters(), indexTable, msd, index.getIndexHandlerClass(), index.isDeferredRebuild());
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class TestHBaseStore method dropTable.
@Test
public void dropTable() throws Exception {
String tableName = "dtable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
store.createTable(table);
Table t = store.getTable("default", tableName);
Assert.assertNotNull(t);
store.dropTable("default", tableName);
Assert.assertNull(store.getTable("default", tableName));
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class TestHBaseStore method createPartition.
@Test
public void createPartition() throws Exception {
String tableName = "myparttable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("pc", "string", ""));
Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
store.createTable(table);
List<String> vals = Arrays.asList("fred");
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/pc=fred");
Partition part = new Partition(vals, DB, tableName, startTime, startTime, psd, emptyParameters);
store.addPartition(part);
Partition p = store.getPartition(DB, tableName, vals);
Assert.assertEquals(1, p.getSd().getColsSize());
Assert.assertEquals("col1", p.getSd().getCols().get(0).getName());
Assert.assertEquals("int", p.getSd().getCols().get(0).getType());
Assert.assertEquals("nocomment", p.getSd().getCols().get(0).getComment());
Assert.assertEquals("serde", p.getSd().getSerdeInfo().getName());
Assert.assertEquals("seriallib", p.getSd().getSerdeInfo().getSerializationLib());
Assert.assertEquals("file:/tmp/pc=fred", p.getSd().getLocation());
Assert.assertEquals("input", p.getSd().getInputFormat());
Assert.assertEquals("output", p.getSd().getOutputFormat());
Assert.assertEquals(DB, p.getDbName());
Assert.assertEquals(tableName, p.getTableName());
Assert.assertEquals(1, p.getValuesSize());
Assert.assertEquals("fred", p.getValues().get(0));
Assert.assertTrue(store.doesPartitionExist(DB, tableName, vals));
Assert.assertFalse(store.doesPartitionExist(DB, tableName, Arrays.asList("bob")));
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class TestHBaseStore method dropIndex.
@Test
public void dropIndex() throws Exception {
String tableName = "mytable";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", ""));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, emptyParameters, null, null, null);
store.createTable(table);
String indexName = "myindex";
Index index = new Index(indexName, null, "default", tableName, startTime, startTime, tableName + "__" + indexName + "__", sd, emptyParameters, false);
store.addIndex(index);
store.dropIndex("default", tableName, indexName);
}
use of org.apache.hadoop.hive.metastore.api.StorageDescriptor in project hive by apache.
the class TestHBaseStoreBitVector method createMockTableAndPartition.
private Table createMockTableAndPartition(String partType, String partVal) throws Exception {
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", partType, ""));
List<String> vals = new ArrayList<String>();
vals.add(partVal);
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
Map<String, String> params = new HashMap<String, String>();
params.put("key", "value");
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params);
int currentTime = (int) (System.currentTimeMillis() / 1000);
Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, emptyParameters, null, null, null);
store.createTable(table);
Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters);
store.addPartition(part);
return table;
}
Aggregations