use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class RenameTable method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
Instance instance = master.getInstance();
Pair<String, String> qualifiedOldTableName = Tables.qualify(oldTableName);
Pair<String, String> qualifiedNewTableName = Tables.qualify(newTableName);
// ensure no attempt is made to rename across namespaces
if (newTableName.contains(".") && !namespaceId.equals(Namespaces.getNamespaceId(instance, qualifiedNewTableName.getFirst())))
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), oldTableName, TableOperation.RENAME, TableOperationExceptionType.INVALID_NAME, "Namespace in new table name does not match the old table name");
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
Utils.tableNameLock.lock();
try {
Utils.checkTableDoesNotExist(instance, newTableName, tableId, TableOperation.RENAME);
final String newName = qualifiedNewTableName.getSecond();
final String oldName = qualifiedOldTableName.getSecond();
final String tap = ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAME;
zoo.mutate(tap, null, null, new Mutator() {
@Override
public byte[] mutate(byte[] current) throws Exception {
final String currentName = new String(current, UTF_8);
if (currentName.equals(newName))
// assume in this case the operation is running again, so we are done
return null;
if (!currentName.equals(oldName)) {
throw new AcceptableThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND, "Name changed while processing");
}
return newName.getBytes(UTF_8);
}
});
Tables.clearCache(instance);
} finally {
Utils.tableNameLock.unlock();
Utils.unreserveTable(tableId, tid, true);
Utils.unreserveNamespace(namespaceId, tid, false);
}
LoggerFactory.getLogger(RenameTable.class).debug("Renamed table {} {} {}", tableId, oldTableName, newTableName);
return null;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class Utils method reserveTable.
public static long reserveTable(Table.ID tableId, long tid, boolean writeLock, boolean tableMustExist, TableOperation op) throws Exception {
if (getLock(tableId, tid, writeLock).tryLock()) {
if (tableMustExist) {
Instance instance = HdfsZooInstance.getInstance();
IZooReaderWriter zk = ZooReaderWriter.getInstance();
if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZTABLES + "/" + tableId))
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), "", op, TableOperationExceptionType.NOTFOUND, "Table does not exist");
}
log.info("table {} ({}) locked for {} operation: {}", tableId, Long.toHexString(tid), (writeLock ? "write" : "read"), op);
return 0;
} else
return 100;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class Utils method reserveNamespace.
public static long reserveNamespace(Namespace.ID namespaceId, long id, boolean writeLock, boolean mustExist, TableOperation op) throws Exception {
if (getLock(namespaceId, id, writeLock).tryLock()) {
if (mustExist) {
Instance instance = HdfsZooInstance.getInstance();
IZooReaderWriter zk = ZooReaderWriter.getInstance();
if (!zk.exists(ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId))
throw new AcceptableThriftTableOperationException(namespaceId.canonicalID(), "", op, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Namespace does not exist");
}
log.info("namespace {} ({}) locked for {} operation: {}", namespaceId, Long.toHexString(id), (writeLock ? "write" : "read"), op);
return 0;
} else
return 100;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class WriteExportFiles method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
try {
exportTable(master.getFileSystem(), master, tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
} catch (IOException ioe) {
throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Failed to create export files " + ioe.getMessage());
}
Utils.unreserveNamespace(tableInfo.namespaceID, tid, false);
Utils.unreserveTable(tableInfo.tableID, tid, false);
Utils.unreserveHdfsDirectory(new Path(tableInfo.exportDir).toString(), tid);
return null;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class WriteExportFiles method isReady.
@Override
public long isReady(long tid, Master master) throws Exception {
long reserved = Utils.reserveNamespace(tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT) + Utils.reserveTable(tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
if (reserved > 0)
return reserved;
Connector conn = master.getConnector();
checkOffline(conn);
Scanner metaScanner = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetadataRange());
// scan for locations
metaScanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
metaScanner.fetchColumnFamily(TabletsSection.FutureLocationColumnFamily.NAME);
if (metaScanner.iterator().hasNext()) {
return 500;
}
// use the same range to check for walogs that we used to check for hosted (or future hosted) tablets
// this is done as a separate scan after we check for locations, because walogs are okay only if there is no location
metaScanner.clearColumns();
metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
if (metaScanner.iterator().hasNext()) {
throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonicalID(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Write ahead logs found for table");
}
return 0;
}
Aggregations