use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class RenameNamespace method call.
@Override
public Repo<Master> call(long id, Master master) throws Exception {
Instance instance = master.getInstance();
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
Utils.tableNameLock.lock();
try {
Utils.checkNamespaceDoesNotExist(instance, newName, namespaceId, TableOperation.RENAME);
final String tap = ZooUtil.getRoot(instance) + Constants.ZNAMESPACES + "/" + namespaceId + Constants.ZNAMESPACE_NAME;
zoo.mutate(tap, null, null, new Mutator() {
@Override
public byte[] mutate(byte[] current) throws Exception {
final String currentName = new String(current);
if (currentName.equals(newName))
// assume in this case the operation is running again, so we are done
return null;
if (!currentName.equals(oldName)) {
throw new AcceptableThriftTableOperationException(null, oldName, TableOperation.RENAME, TableOperationExceptionType.NAMESPACE_NOTFOUND, "Name changed while processing");
}
return newName.getBytes();
}
});
Tables.clearCache(instance);
} finally {
Utils.tableNameLock.unlock();
Utils.unreserveNamespace(namespaceId, id, true);
}
LoggerFactory.getLogger(RenameNamespace.class).debug("Renamed namespace {} {} {}", namespaceId, oldName, newName);
return null;
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class TableRangeOp method call.
@Override
public Repo<Master> call(long tid, Master env) throws Exception {
if (RootTable.ID.equals(tableId) && Operation.MERGE.equals(op)) {
log.warn("Attempt to merge tablets for {} does nothing. It is not splittable.", RootTable.NAME);
}
Text start = startRow.length == 0 ? null : new Text(startRow);
Text end = endRow.length == 0 ? null : new Text(endRow);
if (start != null && end != null)
if (start.compareTo(end) >= 0)
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.MERGE, TableOperationExceptionType.BAD_RANGE, "start row must be less than end row");
env.mustBeOnline(tableId);
MergeInfo info = env.getMergeInfo(tableId);
if (info.getState() == MergeState.NONE) {
KeyExtent range = new KeyExtent(tableId, end, start);
env.setMergeState(new MergeInfo(range, op), MergeState.STARTED);
}
return new TableRangeOpWait(namespaceId, tableId);
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class Utils method getNextId.
static <T extends AbstractId> T getNextId(String name, Instance instance, Function<String, T> newIdFunction) throws AcceptableThriftTableOperationException {
try {
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
final String ntp = ZooUtil.getRoot(instance) + Constants.ZTABLES;
byte[] nid = zoo.mutate(ntp, ZERO_BYTE, ZooUtil.PUBLIC, currentValue -> {
BigInteger nextId = new BigInteger(new String(currentValue, UTF_8), Character.MAX_RADIX);
nextId = nextId.add(BigInteger.ONE);
return nextId.toString(Character.MAX_RADIX).getBytes(UTF_8);
});
return newIdFunction.apply(new String(nid, UTF_8));
} catch (Exception e1) {
log.error("Failed to assign id to " + name, e1);
throw new AcceptableThriftTableOperationException(null, name, TableOperation.CREATE, TableOperationExceptionType.OTHER, e1.getMessage());
}
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class CompactRange method call.
@Override
public Repo<Master> call(final long tid, Master env) throws Exception {
String zTablePath = Constants.ZROOT + "/" + env.getInstance().getInstanceID() + Constants.ZTABLES + "/" + tableId + Constants.ZTABLE_COMPACT_ID;
IZooReaderWriter zoo = ZooReaderWriter.getInstance();
byte[] cid;
try {
cid = zoo.mutate(zTablePath, null, null, new Mutator() {
@Override
public byte[] mutate(byte[] currentValue) throws Exception {
String cvs = new String(currentValue, UTF_8);
String[] tokens = cvs.split(",");
long flushID = Long.parseLong(tokens[0]);
flushID++;
String txidString = String.format("%016x", tid);
for (int i = 1; i < tokens.length; i++) {
if (tokens[i].startsWith(txidString))
// skip self
continue;
log.debug("txidString : {}", txidString);
log.debug("tokens[{}] : {}", i, tokens[i]);
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.OTHER, "Another compaction with iterators and/or a compaction strategy is running");
}
StringBuilder encodedIterators = new StringBuilder();
if (config != null) {
Hex hex = new Hex();
encodedIterators.append(",");
encodedIterators.append(txidString);
encodedIterators.append("=");
encodedIterators.append(new String(hex.encode(config), UTF_8));
}
return (Long.toString(flushID) + encodedIterators).getBytes(UTF_8);
}
});
return new CompactionDriver(Long.parseLong(new String(cid, UTF_8).split(",")[0]), namespaceId, tableId, startRow, endRow);
} catch (NoNodeException nne) {
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.COMPACT, TableOperationExceptionType.NOTFOUND, null);
}
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class MapImportFileNames method call.
@Override
public Repo<Master> call(long tid, Master environment) throws Exception {
Path path = new Path(tableInfo.importDir, "mappings.txt");
BufferedWriter mappingsWriter = null;
try {
VolumeManager fs = environment.getFileSystem();
fs.mkdirs(new Path(tableInfo.importDir));
FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
mappingsWriter = new BufferedWriter(new OutputStreamWriter(fs.create(path), UTF_8));
for (FileStatus fileStatus : files) {
String fileName = fileStatus.getPath().getName();
log.info("filename " + fileStatus.getPath().toString());
String[] sa = fileName.split("\\.");
String extension = "";
if (sa.length > 1) {
extension = sa[sa.length - 1];
if (!FileOperations.getValidExtensions().contains(extension)) {
continue;
}
} else {
// assume it is a map file
extension = Constants.MAPFILE_EXTENSION;
}
String newName = "I" + namer.getNextName() + "." + extension;
mappingsWriter.append(fileName);
mappingsWriter.append(':');
mappingsWriter.append(newName);
mappingsWriter.newLine();
}
mappingsWriter.close();
mappingsWriter = null;
return new PopulateMetadataTable(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error writing mapping file " + path + " " + ioe.getMessage());
} finally {
if (mappingsWriter != null)
try {
mappingsWriter.close();
} catch (IOException ioe) {
log.warn("Failed to close " + path, ioe);
}
}
}
Aggregations