use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class ImportPopulateZookeeper method call.
@Override
public Repo<Master> call(long tid, Master env) throws Exception {
// reserve the table name in zookeeper or fail
Utils.tableNameLock.lock();
try {
// write tableName & tableId to zookeeper
Instance instance = env.getInstance();
Utils.checkTableDoesNotExist(instance, tableInfo.tableName, tableInfo.tableId, TableOperation.CREATE);
String namespace = Tables.qualify(tableInfo.tableName).getFirst();
Namespace.ID namespaceId = Namespaces.getNamespaceId(instance, namespace);
TableManager.getInstance().addTable(tableInfo.tableId, namespaceId, tableInfo.tableName, NodeExistsPolicy.OVERWRITE);
Tables.clearCache(instance);
} finally {
Utils.tableNameLock.unlock();
}
for (Entry<String, String> entry : getExportedProps(env.getFileSystem()).entrySet()) if (!TablePropUtil.setTableProperty(tableInfo.tableId, entry.getKey(), entry.getValue())) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Invalid table property " + entry.getKey());
}
return new CreateImportDir(tableInfo);
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class BulkImport method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
log.debug(" tid {} sourceDir {}", tid, sourceDir);
Utils.getReadLock(tableId, tid).lock();
// check that the error directory exists and is empty
VolumeManager fs = master.getFileSystem();
Path errorPath = new Path(errorDir);
FileStatus errorStatus = null;
try {
errorStatus = fs.getFileStatus(errorPath);
} catch (FileNotFoundException ex) {
// ignored
}
if (errorStatus == null)
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " does not exist");
if (!errorStatus.isDirectory())
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not a directory");
if (fs.listStatus(errorPath).length != 0)
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, errorDir + " is not empty");
ZooArbitrator.start(Constants.BULK_ARBITRATOR_TYPE, tid);
master.updateBulkImportStatus(sourceDir, BulkImportState.MOVING);
// move the files into the directory
try {
String bulkDir = prepareBulkImport(master, fs, sourceDir, tableId);
log.debug(" tid {} bulkDir {}", tid, bulkDir);
return new LoadFiles(tableId, sourceDir, bulkDir, errorDir, setTime);
} catch (IOException ex) {
log.error("error preparing the bulk import directory", ex);
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_INPUT_DIRECTORY, sourceDir + ": " + ex);
}
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class LoadFiles method call.
@Override
public Repo<Master> call(final long tid, final Master master) throws Exception {
master.updateBulkImportStatus(source, BulkImportState.LOADING);
ExecutorService executor = getThreadPool(master);
final AccumuloConfiguration conf = master.getConfiguration();
VolumeManager fs = master.getFileSystem();
List<FileStatus> files = new ArrayList<>();
for (FileStatus entry : fs.listStatus(new Path(bulk))) {
files.add(entry);
}
log.debug("tid " + tid + " importing " + files.size() + " files");
Path writable = new Path(this.errorDir, ".iswritable");
if (!fs.createNewFile(writable)) {
// Maybe this is a re-try... clear the flag and try again
fs.delete(writable);
if (!fs.createNewFile(writable))
throw new AcceptableThriftTableOperationException(tableId.canonicalID(), null, TableOperation.BULK_IMPORT, TableOperationExceptionType.BULK_BAD_ERROR_DIRECTORY, "Unable to write to " + this.errorDir);
}
fs.delete(writable);
final Set<String> filesToLoad = Collections.synchronizedSet(new HashSet<String>());
for (FileStatus f : files) filesToLoad.add(f.getPath().toString());
final int RETRIES = Math.max(1, conf.getCount(Property.MASTER_BULK_RETRIES));
for (int attempt = 0; attempt < RETRIES && filesToLoad.size() > 0; attempt++) {
List<Future<List<String>>> results = new ArrayList<>();
if (master.onlineTabletServers().size() == 0)
log.warn("There are no tablet server to process bulk import, waiting (tid = " + tid + ")");
while (master.onlineTabletServers().size() == 0) {
sleepUninterruptibly(500, TimeUnit.MILLISECONDS);
}
// Use the threadpool to assign files one-at-a-time to the server
final List<String> loaded = Collections.synchronizedList(new ArrayList<String>());
final Random random = new Random();
final TServerInstance[] servers;
String prop = conf.get(Property.MASTER_BULK_TSERVER_REGEX);
if (null == prop || "".equals(prop)) {
servers = master.onlineTabletServers().toArray(new TServerInstance[0]);
} else {
Pattern regex = Pattern.compile(prop);
List<TServerInstance> subset = new ArrayList<>();
master.onlineTabletServers().forEach(t -> {
if (regex.matcher(t.host()).matches()) {
subset.add(t);
}
});
if (0 == subset.size()) {
log.warn("There are no tablet servers online that match supplied regex: {}", conf.get(Property.MASTER_BULK_TSERVER_REGEX));
}
servers = subset.toArray(new TServerInstance[0]);
}
if (servers.length > 0) {
for (final String file : filesToLoad) {
results.add(executor.submit(new Callable<List<String>>() {
@Override
public List<String> call() {
List<String> failures = new ArrayList<>();
ClientService.Client client = null;
HostAndPort server = null;
try {
// get a connection to a random tablet server, do not prefer cached connections because
// this is running on the master and there are lots of connections to tablet servers
// serving the metadata tablets
long timeInMillis = master.getConfiguration().getTimeInMillis(Property.MASTER_BULK_TIMEOUT);
// Pair<String,Client> pair = ServerClient.getConnection(master, false, timeInMillis);
server = servers[random.nextInt(servers.length)].getLocation();
client = ThriftUtil.getTServerClient(server, master, timeInMillis);
List<String> attempt = Collections.singletonList(file);
log.debug("Asking " + server + " to bulk import " + file);
List<String> fail = client.bulkImportFiles(Tracer.traceInfo(), master.rpcCreds(), tid, tableId.canonicalID(), attempt, errorDir, setTime);
if (fail.isEmpty()) {
loaded.add(file);
} else {
failures.addAll(fail);
}
} catch (Exception ex) {
log.error("rpc failed server:" + server + ", tid:" + tid + " " + ex);
} finally {
ThriftUtil.returnClient(client);
}
return failures;
}
}));
}
}
Set<String> failures = new HashSet<>();
for (Future<List<String>> f : results) failures.addAll(f.get());
filesToLoad.removeAll(loaded);
if (filesToLoad.size() > 0) {
log.debug("tid " + tid + " attempt " + (attempt + 1) + " " + sampleList(filesToLoad, 10) + " failed");
sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
}
}
FSDataOutputStream failFile = fs.create(new Path(errorDir, BulkImport.FAILURES_TXT), true);
try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter(failFile, UTF_8))) {
for (String f : filesToLoad) {
out.write(f);
out.write("\n");
}
}
// return the next step, which will perform cleanup
return new CompleteBulkImport(tableId, source, bulk, errorDir);
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class MoveExportedFiles method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
try {
VolumeManager fs = master.getFileSystem();
Map<String, String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
for (String oldFileName : fileNameMappings.keySet()) {
if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File referenced by exported table does not exists " + oldFileName);
}
}
FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
for (FileStatus fileStatus : files) {
String newName = fileNameMappings.get(fileStatus.getPath().getName());
if (newName != null)
fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
}
return new FinishImportTable(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error renaming files " + ioe.getMessage());
}
}
use of org.apache.accumulo.core.client.impl.AcceptableThriftTableOperationException in project accumulo by apache.
the class PopulateMetadataTable method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
BatchWriter mbw = null;
ZipInputStream zis = null;
try {
VolumeManager fs = master.getFileSystem();
mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
zis = new ZipInputStream(fs.open(path));
Map<String, String> fileNameMappings = readMappingFile(fs, tableInfo);
log.info("importDir is " + tableInfo.importDir);
// This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
final String bulkDir = tableInfo.importDir;
final String[] tableDirs = ServerConstants.getTablesDirs();
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
Key key = new Key();
Value val = new Value();
Mutation m = null;
Text currentRow = null;
int dirCount = 0;
while (true) {
key.readFields(in);
val.readFields(in);
Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
Text metadataRow = new KeyExtent(tableInfo.tableId, endRow, null).getMetadataEntry();
Text cq;
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String oldName = new Path(key.getColumnQualifier().toString()).getName();
String newName = fileNameMappings.get(oldName);
if (newName == null) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File " + oldName + " does not exist in import dir");
}
cq = new Text(bulkDir + "/" + newName);
} else {
cq = key.getColumnQualifier();
}
if (m == null) {
// Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
// Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
m = new Mutation(metadataRow);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
currentRow = metadataRow;
}
if (!currentRow.equals(metadataRow)) {
mbw.addMutation(m);
// Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
// Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
m = new Mutation(metadataRow);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
}
m.put(key.getColumnFamily(), cq, val);
if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
mbw.addMutation(m);
// its the last column in the last row
break;
}
}
break;
}
}
return new MoveExportedFiles(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading " + path + " " + ioe.getMessage());
} finally {
if (zis != null) {
try {
zis.close();
} catch (IOException ioe) {
log.warn("Failed to close zip file ", ioe);
}
}
if (mbw != null) {
mbw.close();
}
}
}
Aggregations