use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class WriteExportFiles method call.
@Override
public Repo<Manager> call(long tid, Manager manager) throws Exception {
try {
exportTable(manager.getVolumeManager(), manager.getContext(), tableInfo.tableName, tableInfo.tableID, tableInfo.exportDir);
} catch (IOException ioe) {
throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonical(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Failed to create export files " + ioe.getMessage());
}
Utils.unreserveNamespace(manager, tableInfo.namespaceID, tid, false);
Utils.unreserveTable(manager, tableInfo.tableID, tid, false);
Utils.unreserveHdfsDirectory(manager, new Path(tableInfo.exportDir).toString(), tid);
return null;
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class WriteExportFiles method isReady.
@Override
public long isReady(long tid, Manager manager) throws Exception {
long reserved = Utils.reserveNamespace(manager, tableInfo.namespaceID, tid, false, true, TableOperation.EXPORT) + Utils.reserveTable(manager, tableInfo.tableID, tid, false, true, TableOperation.EXPORT);
if (reserved > 0)
return reserved;
AccumuloClient client = manager.getContext();
checkOffline(manager.getContext());
Scanner metaScanner = client.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
metaScanner.setRange(new KeyExtent(tableInfo.tableID, null, null).toMetaRange());
// scan for locations
metaScanner.fetchColumnFamily(CurrentLocationColumnFamily.NAME);
metaScanner.fetchColumnFamily(FutureLocationColumnFamily.NAME);
if (metaScanner.iterator().hasNext()) {
return 500;
}
// use the same range to check for walogs that we used to check for hosted (or future hosted)
// tablets
// this is done as a separate scan after we check for locations, because walogs are okay only if
// there is no location
metaScanner.clearColumns();
metaScanner.fetchColumnFamily(LogColumnFamily.NAME);
if (metaScanner.iterator().hasNext()) {
throw new AcceptableThriftTableOperationException(tableInfo.tableID.canonical(), tableInfo.tableName, TableOperation.EXPORT, TableOperationExceptionType.OTHER, "Write ahead logs found for table");
}
return 0;
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class TableRangeOp method call.
@Override
public Repo<Manager> call(long tid, Manager env) throws Exception {
if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME);
}
Text start = startRow.length == 0 ? null : new Text(startRow);
Text end = endRow.length == 0 ? null : new Text(endRow);
if (start != null && end != null)
if (start.compareTo(end) >= 0)
throw new AcceptableThriftTableOperationException(tableId.canonical(), null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
env.mustBeOnline(tableId);
MergeInfo info = env.getMergeInfo(tableId);
if (info.getState() == MergeState.NONE) {
KeyExtent range = new KeyExtent(tableId, end, start);
env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
}
return new TableRangeOpWait(namespaceId, tableId);
}
use of org.apache.accumulo.core.clientImpl.AcceptableThriftTableOperationException in project accumulo by apache.
the class MapImportFileNames method call.
@Override
public Repo<Manager> call(long tid, Manager environment) throws Exception {
for (ImportedTableInfo.DirectoryMapping dm : tableInfo.directories) {
Path path = new Path(dm.importDir, "mappings.txt");
BufferedWriter mappingsWriter = null;
try {
VolumeManager fs = environment.getVolumeManager();
fs.mkdirs(new Path(dm.importDir));
FileStatus[] files = fs.listStatus(new Path(dm.exportDir));
UniqueNameAllocator namer = environment.getContext().getUniqueNameAllocator();
mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
for (FileStatus fileStatus : files) {
String fileName = fileStatus.getPath().getName();
log.info("filename " + fileStatus.getPath());
String[] sa = fileName.split("\\.");
String extension = "";
if (sa.length > 1) {
extension = sa[sa.length - 1];
if (!FileOperations.getValidExtensions().contains(extension)) {
continue;
}
} else {
// assume it is a map file
extension = Constants.MAPFILE_EXTENSION;
}
String newName = "I" + namer.getNextName() + "." + extension;
mappingsWriter.append(fileName);
mappingsWriter.append(':');
mappingsWriter.append(newName);
mappingsWriter.newLine();
}
mappingsWriter.close();
mappingsWriter = null;
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonical(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
} finally {
if (mappingsWriter != null)
try {
mappingsWriter.close();
} catch (IOException ioe) {
log.warn("Failed to close " + path, ioe);
}
}
}
return new PopulateMetadataTable(tableInfo);
}
Aggregations