use of org.apache.hadoop.hive.metastore.api.FieldSchema in project hive by apache.
the class TestHBaseStoreCached method listGetDropPartitionNames.
@Test
public void listGetDropPartitionNames() throws Exception {
String dbName = "default";
String tableName = "listParts";
int startTime = (int) (System.currentTimeMillis() / 1000);
List<FieldSchema> cols = new ArrayList<FieldSchema>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<FieldSchema>();
partCols.add(new FieldSchema("pc", "string", ""));
partCols.add(new FieldSchema("region", "string", ""));
Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, emptyParameters, null, null, null);
store.createTable(table);
String[][] partVals = new String[][] { { "today", "north america" }, { "tomorrow", "europe" } };
for (String[] pv : partVals) {
List<String> vals = new ArrayList<String>();
for (String v : pv) vals.add(v);
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/pc=" + pv[0] + "/region=" + pv[1]);
Partition part = new Partition(vals, dbName, tableName, startTime, startTime, psd, emptyParameters);
store.addPartition(part);
}
List<String> names = store.listPartitionNames(dbName, tableName, (short) -1);
Assert.assertEquals(2, names.size());
String[] resultNames = names.toArray(new String[names.size()]);
Arrays.sort(resultNames);
Assert.assertArrayEquals(resultNames, new String[] { "pc=today/region=north america", "pc=tomorrow/region=europe" });
List<Partition> parts = store.getPartitionsByNames(dbName, tableName, names);
Assert.assertArrayEquals(partVals[0], parts.get(0).getValues().toArray(new String[2]));
Assert.assertArrayEquals(partVals[1], parts.get(1).getValues().toArray(new String[2]));
store.dropPartitions(dbName, tableName, names);
List<Partition> afterDropParts = store.getPartitions(dbName, tableName, -1);
Assert.assertEquals(0, afterDropParts.size());
}
use of org.apache.hadoop.hive.metastore.api.FieldSchema in project hive by apache.
the class TestHiveMetaStoreChecker method createPartitionsDirectoriesOnFS.
/**
* Creates partition sub-directories for a given table on the file system. Used to test the
* use-cases when partitions for the table are not present in the metastore db
*
* @param table - Table which provides the base locations and partition specs for creating the
* sub-directories
* @param numPartitions - Number of partitions to be created
* @throws IOException
*/
private void createPartitionsDirectoriesOnFS(Table table, int numPartitions) throws IOException {
String path = table.getDataLocation().toString();
fs = table.getPath().getFileSystem(hive.getConf());
int numPartKeys = table.getPartitionKeys().size();
for (int i = 0; i < numPartitions; i++) {
StringBuilder partPath = new StringBuilder(path);
partPath.append(Path.SEPARATOR);
for (int j = 0; j < numPartKeys; j++) {
FieldSchema field = table.getPartitionKeys().get(j);
partPath.append(field.getName());
partPath.append('=');
partPath.append("val_");
partPath.append(i);
if (j < (numPartKeys - 1)) {
partPath.append(Path.SEPARATOR);
}
}
createDirectory(partPath.toString());
}
}
use of org.apache.hadoop.hive.metastore.api.FieldSchema in project hive by apache.
the class TestHive method testIndex.
/**
* Tests creating a simple index on a simple table.
*
* @throws Throwable
*/
public void testIndex() throws Throwable {
try {
// create a simple table
String tableName = "table_for_testindex";
String qTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + tableName;
try {
hm.dropTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
} catch (HiveException e) {
e.printStackTrace();
assertTrue("Unable to drop table", false);
}
Table tbl = new Table(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
List<FieldSchema> fields = tbl.getCols();
fields.add(new FieldSchema("col1", serdeConstants.INT_TYPE_NAME, "int -- first column"));
fields.add(new FieldSchema("col2", serdeConstants.STRING_TYPE_NAME, "string -- second column"));
fields.add(new FieldSchema("col3", serdeConstants.DOUBLE_TYPE_NAME, "double -- thrift column"));
tbl.setFields(fields);
tbl.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
tbl.setInputFormatClass(SequenceFileInputFormat.class);
// create table
try {
hm.createTable(tbl);
} catch (HiveException e) {
e.printStackTrace();
assertTrue("Unable to create table: " + tableName, false);
}
// Create a simple index
String indexName = "index_on_table_for_testindex";
String indexHandlerClass = HiveIndex.IndexType.COMPACT_SUMMARY_TABLE.getHandlerClsName();
List<String> indexedCols = new ArrayList<String>();
indexedCols.add("col1");
String indexTableName = "index_on_table_for_testindex_table";
String qIndexTableName = MetaStoreUtils.DEFAULT_DATABASE_NAME + "." + indexTableName;
boolean deferredRebuild = true;
String inputFormat = SequenceFileInputFormat.class.getName();
String outputFormat = SequenceFileOutputFormat.class.getName();
String serde = null;
String storageHandler = null;
String location = null;
String collItemDelim = null;
String fieldDelim = null;
String fieldEscape = null;
String lineDelim = null;
String mapKeyDelim = null;
String indexComment = null;
Map<String, String> indexProps = null;
Map<String, String> tableProps = null;
Map<String, String> serdeProps = new HashMap<String, String>();
hm.createIndex(qTableName, indexName, indexHandlerClass, indexedCols, qIndexTableName, deferredRebuild, inputFormat, outputFormat, serde, storageHandler, location, indexProps, tableProps, serdeProps, collItemDelim, fieldDelim, fieldEscape, lineDelim, mapKeyDelim, indexComment);
// Retrieve and validate the index
Index index = null;
try {
index = hm.getIndex(tableName, indexName);
assertNotNull("Unable to fetch index", index);
index.validate();
assertEquals("Index names don't match for index: " + indexName, indexName, index.getIndexName());
assertEquals("Table names don't match for index: " + indexName, tableName, index.getOrigTableName());
assertEquals("Index table names didn't match for index: " + indexName, indexTableName, index.getIndexTableName());
assertEquals("Index handler classes didn't match for index: " + indexName, indexHandlerClass, index.getIndexHandlerClass());
assertEquals("Deferred rebuild didn't match for index: " + indexName, deferredRebuild, index.isDeferredRebuild());
} catch (HiveException e) {
System.err.println(StringUtils.stringifyException(e));
assertTrue("Unable to fetch index correctly: " + indexName, false);
}
// Drop index
try {
hm.dropIndex(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName, indexName, false, true);
} catch (HiveException e) {
System.err.println(StringUtils.stringifyException(e));
assertTrue("Unable to drop index: " + indexName, false);
}
boolean dropIndexException = false;
try {
hm.getIndex(tableName, indexName);
} catch (HiveException e) {
// Expected since it was just dropped
dropIndexException = true;
}
assertTrue("Unable to drop index: " + indexName, dropIndexException);
// Drop table
try {
hm.dropTable(tableName);
Table droppedTable = hm.getTable(tableName, false);
assertNull("Unable to drop table " + tableName, droppedTable);
} catch (HiveException e) {
System.err.println(StringUtils.stringifyException(e));
assertTrue("Unable to drop table: " + tableName, false);
}
} catch (Throwable e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("testIndex failed");
throw e;
}
}
use of org.apache.hadoop.hive.metastore.api.FieldSchema in project hive by apache.
the class DelimitedInputWriter method getCols.
private ArrayList<String> getCols(Table table) {
List<FieldSchema> cols = table.getSd().getCols();
ArrayList<String> colNames = new ArrayList<String>(cols.size());
for (FieldSchema col : cols) {
colNames.add(col.getName().toLowerCase());
}
return colNames;
}
use of org.apache.hadoop.hive.metastore.api.FieldSchema in project hive by apache.
the class TestMetastoreAuthorizationProvider method testSimplePrivileges.
public void testSimplePrivileges() throws Exception {
if (!isTestEnabled()) {
System.out.println("Skipping test " + this.getClass().getName());
return;
}
String dbName = getTestDbName();
String tblName = getTestTableName();
String userName = setupUser();
allowCreateDatabase(userName);
CommandProcessorResponse ret = driver.run("create database " + dbName);
assertEquals(0, ret.getResponseCode());
Database db = msc.getDatabase(dbName);
String dbLocn = db.getLocationUri();
validateCreateDb(db, dbName);
disallowCreateInDb(dbName, userName, dbLocn);
disallowCreateDatabase(userName);
driver.run("use " + dbName);
ret = driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
assertEquals(1, ret.getResponseCode());
// Even if table location is specified table creation should fail
String tblNameLoc = tblName + "_loc";
String tblLocation = new Path(dbLocn).getParent().toUri() + "/" + tblNameLoc;
driver.run("use " + dbName);
ret = driver.run(String.format("create table %s (a string) partitioned by (b string) location '" + tblLocation + "'", tblNameLoc));
assertEquals(1, ret.getResponseCode());
// failure from not having permissions to create table
ArrayList<FieldSchema> fields = new ArrayList<FieldSchema>(2);
fields.add(new FieldSchema("a", serdeConstants.STRING_TYPE_NAME, ""));
Table ttbl = new Table();
ttbl.setDbName(dbName);
ttbl.setTableName(tblName);
StorageDescriptor sd = new StorageDescriptor();
ttbl.setSd(sd);
sd.setCols(fields);
sd.setParameters(new HashMap<String, String>());
sd.getParameters().put("test_param_1", "Use this for comments etc");
sd.setSerdeInfo(new SerDeInfo());
sd.getSerdeInfo().setName(ttbl.getTableName());
sd.getSerdeInfo().setParameters(new HashMap<String, String>());
sd.getSerdeInfo().getParameters().put(org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT, "1");
sd.getSerdeInfo().setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
sd.setInputFormat(HiveInputFormat.class.getName());
sd.setOutputFormat(HiveOutputFormat.class.getName());
ttbl.setPartitionKeys(new ArrayList<FieldSchema>());
MetaException me = null;
try {
msc.createTable(ttbl);
} catch (MetaException e) {
me = e;
}
assertNoPrivileges(me);
allowCreateInDb(dbName, userName, dbLocn);
driver.run("use " + dbName);
ret = driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName));
// now it succeeds.
assertEquals(0, ret.getResponseCode());
Table tbl = msc.getTable(dbName, tblName);
validateCreateTable(tbl, tblName, dbName);
// Table creation should succeed even if location is specified
driver.run("use " + dbName);
ret = driver.run(String.format("create table %s (a string) partitioned by (b string) location '" + tblLocation + "'", tblNameLoc));
assertEquals(0, ret.getResponseCode());
Table tblLoc = msc.getTable(dbName, tblNameLoc);
validateCreateTable(tblLoc, tblNameLoc, dbName);
String fakeUser = "mal";
List<String> fakeGroupNames = new ArrayList<String>();
fakeGroupNames.add("groupygroup");
InjectableDummyAuthenticator.injectUserName(fakeUser);
InjectableDummyAuthenticator.injectGroupNames(fakeGroupNames);
InjectableDummyAuthenticator.injectMode(true);
ret = driver.run(String.format("create table %s (a string) partitioned by (b string)", tblName + "mal"));
assertEquals(1, ret.getResponseCode());
ttbl.setTableName(tblName + "mal");
me = null;
try {
msc.createTable(ttbl);
} catch (MetaException e) {
me = e;
}
assertNoPrivileges(me);
disallowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation());
ret = driver.run("alter table " + tblName + " add partition (b='2011')");
assertEquals(1, ret.getResponseCode());
List<String> ptnVals = new ArrayList<String>();
ptnVals.add("b=2011");
Partition tpart = new Partition();
tpart.setDbName(dbName);
tpart.setTableName(tblName);
tpart.setValues(ptnVals);
tpart.setParameters(new HashMap<String, String>());
tpart.setSd(tbl.getSd().deepCopy());
tpart.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
tpart.getSd().setLocation(tbl.getSd().getLocation() + "/tpart");
me = null;
try {
msc.add_partition(tpart);
} catch (MetaException e) {
me = e;
}
assertNoPrivileges(me);
InjectableDummyAuthenticator.injectMode(false);
allowCreateInTbl(tbl.getTableName(), userName, tbl.getSd().getLocation());
ret = driver.run("alter table " + tblName + " add partition (b='2011')");
assertEquals(0, ret.getResponseCode());
allowDropOnTable(tblName, userName, tbl.getSd().getLocation());
allowDropOnDb(dbName, userName, db.getLocationUri());
ret = driver.run("drop database if exists " + getTestDbName() + " cascade");
assertEquals(0, ret.getResponseCode());
InjectableDummyAuthenticator.injectUserName(userName);
InjectableDummyAuthenticator.injectGroupNames(Arrays.asList(ugi.getGroupNames()));
InjectableDummyAuthenticator.injectMode(true);
allowCreateDatabase(userName);
driver.run("create database " + dbName);
allowCreateInDb(dbName, userName, dbLocn);
tbl.setTableType("EXTERNAL_TABLE");
msc.createTable(tbl);
disallowDropOnTable(tblName, userName, tbl.getSd().getLocation());
ret = driver.run("drop table " + tbl.getTableName());
assertEquals(1, ret.getResponseCode());
}
Aggregations