use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseStoreIntegration method alterPartitions.
@Test
public void alterPartitions() throws Exception {
String dbName = "default";
String tableName = "alterParts";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("pc", "string", ""));
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
store.createTable(table);
List<String> partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan");
List<Partition> partitions = new ArrayList<Partition>();
List<List<String>> allVals = new ArrayList<List<String>>();
for (String val : partVals) {
List<String> vals = new ArrayList<String>();
allVals.add(vals);
vals.add(val);
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/pc=" + val);
Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, emptyParameters);
partitions.add(part);
}
store.addPartitions(dbName, tableName, partitions);
for (Partition p : partitions) p.setLastAccessTime(startTime + 10);
store.alterPartitions(dbName, tableName, allVals, partitions);
partitions = store.getPartitions(dbName, tableName, -1);
for (Partition part : partitions) {
Assert.assertEquals(startTime + 10, part.getLastAccessTime());
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseStoreIntegration method getPartitions.
@Test
public void getPartitions() throws Exception {
String dbName = "default";
String tableName = "manyParts";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("pc", "string", ""));
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
store.createTable(table);
List<String> partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan");
for (String val : partVals) {
List<String> vals = new ArrayList<String>();
vals.add(val);
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/pc=" + val);
Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, emptyParameters);
store.addPartition(part);
Partition p = store.getPartition(dbName, tableName, vals);
Assert.assertEquals("file:/tmp/pc=" + val, p.getSd().getLocation());
}
List<Partition> parts = store.getPartitions(dbName, tableName, -1);
Assert.assertEquals(5, parts.size());
String[] pv = new String[5];
for (int i = 0; i < 5; i++) pv[i] = parts.get(i).getValues().get(0);
Arrays.sort(pv);
Assert.assertArrayEquals(pv, partVals.toArray(new String[5]));
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseStoreIntegration method addPartitions.
@Test
public void addPartitions() throws Exception {
String dbName = "default";
String tableName = "addParts";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("pc", "string", ""));
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
store.createTable(table);
List<String> partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan");
List<Partition> partitions = new ArrayList<Partition>();
for (String val : partVals) {
List<String> vals = new ArrayList<String>();
vals.add(val);
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/pc=" + val);
Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, emptyParameters);
partitions.add(part);
}
store.addPartitions(dbName, tableName, partitions);
List<String> partNames = store.listPartitionNames(dbName, tableName, (short) -1);
Assert.assertEquals(5, partNames.size());
String[] names = partNames.toArray(new String[partNames.size()]);
Arrays.sort(names);
String[] canonicalNames = partVals.toArray(new String[partVals.size()]);
for (int i = 0; i < canonicalNames.length; i++) canonicalNames[i] = "pc=" + canonicalNames[i];
Assert.assertArrayEquals(canonicalNames, names);
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseImport method importAll.
@Test
public void importAll() throws Exception {
RawStore rdbms;
rdbms = new ObjectStore();
rdbms.setConf(conf);
String[] dbNames = new String[] { "alldb1", "alldb2" };
String[] roles = new String[] { "allrole1", "allrole2" };
String[] tokenIds = new String[] { "alltokenid1", "alltokenid2" };
String[] tokens = new String[] { "alltoken1", "alltoken2" };
String[] masterKeys = new String[] { "allmk1", "allmk2" };
int now = (int) System.currentTimeMillis() / 1000;
setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now);
int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size();
int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size();
HBaseImport importer = new HBaseImport("-a");
importer.setConnections(rdbms, store);
importer.run();
for (int i = 0; i < roles.length; i++) {
Role role = store.getRole(roles[i]);
Assert.assertNotNull(role);
Assert.assertEquals(roles[i], role.getRoleName());
}
// Make sure there aren't any extra roles
Assert.assertEquals(baseNumRoles + 2, store.listRoleNames().size());
for (int i = 0; i < dbNames.length; i++) {
Database db = store.getDatabase(dbNames[i]);
Assert.assertNotNull(db);
// check one random value in the db rather than every value
Assert.assertEquals("file:/tmp", db.getLocationUri());
Table table = store.getTable(db.getName(), tableNames[0]);
Assert.assertNotNull(table);
Assert.assertEquals(now, table.getLastAccessTime());
Assert.assertEquals("input", table.getSd().getInputFormat());
table = store.getTable(db.getName(), tableNames[1]);
Assert.assertNotNull(table);
for (int j = 0; j < partVals.length; j++) {
Partition part = store.getPartition(dbNames[i], tableNames[1], Arrays.asList(partVals[j]));
Assert.assertNotNull(part);
Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation());
}
Assert.assertEquals(4, store.getPartitions(dbNames[i], tableNames[1], -1).size());
// Including two index table
Assert.assertEquals(4, store.getAllTables(dbNames[i]).size());
Assert.assertEquals(2, store.getIndexes(dbNames[i], tableNames[0], -1).size());
Assert.assertEquals(0, store.getIndexes(dbNames[i], tableNames[1], -1).size());
Assert.assertEquals(2, store.getFunctions(dbNames[i], "*").size());
for (int j = 0; j < funcNames.length; j++) {
Assert.assertNotNull(store.getFunction(dbNames[i], funcNames[j]));
}
}
Assert.assertEquals(baseNumDbs + 2, store.getAllDatabases().size());
// guarantee.
for (int i = 0; i < tokenIds.length; i++) {
Assert.assertEquals(tokens[i], store.getToken(tokenIds[i]));
}
String[] hbaseKeys = store.getMasterKeys();
Set<String> keys = new HashSet<>(Arrays.asList(hbaseKeys));
for (int i = 0; i < masterKeys.length; i++) {
Assert.assertTrue(keys.contains(masterKeys[i]));
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseImport method importOneDb.
@Test
public void importOneDb() throws Exception {
RawStore rdbms;
rdbms = new ObjectStore();
rdbms.setConf(conf);
String[] dbNames = new String[] { "onedbdb1", "onedbdb2" };
String[] roles = new String[] { "onedbrole1", "onedbrole2" };
String[] tokenIds = new String[] { "onedbtokenid1", "onedbtokenid2" };
String[] tokens = new String[] { "onedbtoken1", "onedbtoken2" };
String[] masterKeys = new String[] { "onedbmk1", "onedbmk2" };
int now = (int) System.currentTimeMillis() / 1000;
setupObjectStore(rdbms, roles, dbNames, tokenIds, tokens, masterKeys, now);
int baseNumRoles = store.listRoleNames() == null ? 0 : store.listRoleNames().size();
int baseNumDbs = store.getAllDatabases() == null ? 0 : store.getAllDatabases().size();
int baseNumToks = store.getAllTokenIdentifiers() == null ? 0 : store.getAllTokenIdentifiers().size();
int baseNumKeys = store.getMasterKeys() == null ? 0 : store.getMasterKeys().length;
HBaseImport importer = new HBaseImport("-d", dbNames[0]);
importer.setConnections(rdbms, store);
importer.run();
// Make sure there aren't any extra roles
Assert.assertEquals(baseNumRoles, store.listRoleNames().size());
Database db = store.getDatabase(dbNames[0]);
Assert.assertNotNull(db);
// check one random value in the db rather than every value
Assert.assertEquals("file:/tmp", db.getLocationUri());
Table table = store.getTable(db.getName(), tableNames[0]);
Assert.assertNotNull(table);
Assert.assertEquals(now, table.getLastAccessTime());
Assert.assertEquals("input", table.getSd().getInputFormat());
table = store.getTable(db.getName(), tableNames[1]);
Assert.assertNotNull(table);
for (int j = 0; j < partVals.length; j++) {
Partition part = store.getPartition(dbNames[0], tableNames[1], Arrays.asList(partVals[j]));
Assert.assertNotNull(part);
Assert.assertEquals("file:/tmp/region=" + partVals[j], part.getSd().getLocation());
}
Assert.assertEquals(4, store.getPartitions(dbNames[0], tableNames[1], -1).size());
// Including two index table
Assert.assertEquals(4, store.getAllTables(dbNames[0]).size());
Assert.assertEquals(2, store.getIndexes(dbNames[0], tableNames[0], -1).size());
Assert.assertEquals(0, store.getIndexes(dbNames[0], tableNames[1], -1).size());
Assert.assertEquals(2, store.getFunctions(dbNames[0], "*").size());
for (int j = 0; j < funcNames.length; j++) {
Assert.assertNotNull(store.getFunction(dbNames[0], funcNames[j]));
}
Assert.assertEquals(baseNumDbs + 1, store.getAllDatabases().size());
Assert.assertEquals(baseNumToks, store.getAllTokenIdentifiers().size());
String[] hbaseKeys = store.getMasterKeys();
Assert.assertEquals(baseNumKeys, hbaseKeys.length);
// Have to do this last as it will throw an exception
thrown.expect(NoSuchObjectException.class);
store.getDatabase(dbNames[1]);
}
Aggregations