use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class PopulateMetadataTable method call.
@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
Path path = new Path(tableInfo.exportFile);
VolumeManager fs = manager.getVolumeManager();
try (BatchWriter mbw = manager.getContext().createBatchWriter(MetadataTable.NAME);
ZipInputStream zis = new ZipInputStream(fs.open(path))) {
Map<String, String> fileNameMappings = new HashMap<>();
for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
log.info("importDir is " + dm.importDir);
// mappings are prefixed with the proper volume information, e.g:
// hdfs://localhost:8020/path/to/accumulo/tables/...
readMappingFile(fs, tableInfo, dm.importDir, fileNameMappings);
}
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
Key key = new Key();
Value val = new Value();
Mutation m = null;
Text currentRow = null;
int dirCount = 0;
while (true) {
key.readFields(in);
val.readFields(in);
Text endRow = KeyExtent.fromMetaRow(key.getRow()).endRow();
Text metadataRow = new KeyExtent(tableInfo.tableId, endRow, null).toMetaRow();
Text cq;
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String oldName = new Path(key.getColumnQualifier().toString()).getName();
String newName = fileNameMappings.get(oldName);
if (newName == null) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File " + oldName + " does not exist in import dir");
}
cq = new Text(newName);
} else {
cq = key.getColumnQualifier();
}
if (m == null || !currentRow.equals(metadataRow)) {
if (m != null) {
mbw.addMutation(m);
}
// Make a unique directory inside the table's dir. Cannot import multiple tables
// into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
m = new Mutation(metadataRow);
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(tabletDir));
currentRow = metadataRow;
}
m.put(key.getColumnFamily(), cq, val);
if (endRow == null && TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
mbw.addMutation(m);
// its the last column in the last row
break;
}
}
break;
}
}
return new MoveExportedFiles(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading " + path + " " + ioe.getMessage());
}
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class ImportPopulateZookeeper method call.
@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
// reserve the table name in zookeeper or fail
Utils.getTableNameLock().lock();
try {
// write tableName & tableId to zookeeper
Utils.checkTableDoesNotExist(env.getContext(), tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
String namespace = TableNameUtil.qualify(tableInfo.tableName).getFirst();
NamespaceId namespaceId = Namespaces.getNamespaceId(env.getContext(), namespace);
env.getTableManager().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName);
env.getContext().clearTableListCache();
} finally {
Utils.getTableNameLock().unlock();
}
VolumeManager volMan = env.getVolumeManager();
for (Entry<String, String> entry : getExportedProps(volMan).entrySet()) if (!TablePropUtil.setTableProperty(env.getContext(), tableInfo.tableId, entry.getKey(), entry.getValue())) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Invalid table property " + entry.getKey());
}
return new CreateImportDir(tableInfo);
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class ImportTable method checkVersions.
@SuppressFBWarnings(value = "OS_OPEN_STREAM", justification = "closing intermediate readers would close the ZipInputStream")
public void checkVersions(Manager env) throws AcceptableThriftTableOperationException {
Set<String> exportDirs = tableInfo.directories.stream().map(dm -> dm.exportDir).collect(Collectors.toSet());
log.debug("Searching for export file in {}", exportDirs);
Integer exportVersion = null;
Integer dataVersion = null;
try {
Path exportFilePath = TableOperationsImpl.findExportFile(env.getContext(), exportDirs);
tableInfo.exportFile = exportFilePath.toString();
log.info("Export file is {}", tableInfo.exportFile);
ZipInputStream zis = new ZipInputStream(env.getVolumeManager().open(exportFilePath));
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_INFO_FILE)) {
BufferedReader in = new BufferedReader(new InputStreamReader(zis, UTF_8));
String line = null;
while ((line = in.readLine()) != null) {
String[] sa = line.split(":", 2);
if (sa[0].equals(ExportTable.EXPORT_VERSION_PROP)) {
exportVersion = Integer.parseInt(sa[1]);
} else if (sa[0].equals(ExportTable.DATA_VERSION_PROP)) {
dataVersion = Integer.parseInt(sa[1]);
}
}
break;
}
}
} catch (IOException | AccumuloException e) {
log.warn("{}", e.getMessage(), e);
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Failed to read export metadata " + e.getMessage());
}
if (exportVersion == null || exportVersion > ExportTable.VERSION)
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible export version " + exportVersion);
if (dataVersion == null || dataVersion > AccumuloDataVersion.get())
throw new AcceptableThriftTableOperationException(null, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Incompatible data version " + dataVersion);
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class RenameTable method call.
@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
Pair<String, String> qualifiedOldTableName = TableNameUtil.qualify(oldTableName);
Pair<String, String> qualifiedNewTableName = TableNameUtil.qualify(newTableName);
// ensure no attempt is made to rename across namespaces
if (newTableName.contains(".") && !namespaceId.equals(Namespaces.getNamespaceId(manager.getContext(), qualifiedNewTableName.getFirst())))
throw new AcceptableThriftTableOperationException(tableId.canonical(), oldTableName, TableOperation.RENAME, TableOperationExceptionType.INVALID_NAME, "Namespace in new table name does not match the old table name");
ZooReaderWriter zoo = manager.getContext().getZooReaderWriter();
Utils.getTableNameLock().lock();
try {
Utils.checkTableDoesNotExist(manager.getContext(), newTableName, tableId, TableOperation.RENAME);
final String newName = qualifiedNewTableName.getSecond();
final String oldName = qualifiedOldTableName.getSecond();
final String tap = manager.getZooKeeperRoot() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_NAME;
zoo.mutateExisting(tap, current -> {
final String currentName = new String(current, UTF_8);
if (currentName.equals(newName))
// assume in this case the operation is running again, so we are done
return null;
if (!currentName.equals(oldName)) {
throw new AcceptableThriftTableOperationException(null, oldTableName, TableOperation.RENAME, TableOperationExceptionType.NOTFOUND, "Name changed while processing");
}
return newName.getBytes(UTF_8);
});
manager.getContext().clearTableListCache();
} finally {
Utils.getTableNameLock().unlock();
Utils.unreserveTable(manager, tableId, tid, true);
Utils.unreserveNamespace(manager, namespaceId, tid, false);
}
LoggerFactory.getLogger(RenameTable.class).debug("Renamed table {} {} {}", tableId, oldTableName, newTableName);
return null;
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class Utils method getNextId.
public static <T extends AbstractId<T>> T getNextId(String name, ServerContext context, Function<String, T> newIdFunction) throws AcceptableThriftTableOperationException {
try {
ZooReaderWriter zoo = context.getZooReaderWriter();
final String ntp = context.getZooKeeperRoot() + Constants.ZTABLES;
byte[] nid = zoo.mutateOrCreate(ntp, ZERO_BYTE, currentValue -> {
BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX);
nextId = nextId.add(BigInteger.ONE);
return nextId.toString(Character.MAX_RADIX).getBytes(UTF_8);
});
return newIdFunction.apply(new String(nid, UTF_8));
} catch (Exception e1) {
log.error("Failed to assign id to " + name, e1);
throw new AcceptableThriftTableOperationException(null, name, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
}
}
Aggregations