use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class MoveExportedFiles method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
try {
VolumeManager fs = master.getFileSystem();
Map<String, String> fileNameMappings = PopulateMetadataTable.readMappingFile(fs, tableInfo);
for (String oldFileName : fileNameMappings.keySet()) {
if (!fs.exists(new Path(tableInfo.exportDir, oldFileName))) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File referenced by exported table does not exists " + oldFileName);
}
}
FileStatus[] files = fs.listStatus(new Path(tableInfo.exportDir));
for (FileStatus fileStatus : files) {
String newName = fileNameMappings.get(fileStatus.getPath().getName());
if (newName != null)
fs.rename(fileStatus.getPath(), new Path(tableInfo.importDir, newName));
}
return new FinishImportTable(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error renaming files " + ioe.getMessage());
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class PopulateMetadataTable method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
BatchWriter mbw = null;
ZipInputStream zis = null;
try {
VolumeManager fs = master.getFileSystem();
mbw = master.getConnector().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
zis = new ZipInputStream(fs.open(path));
Map<String, String> fileNameMappings = readMappingFile(fs, tableInfo);
log.info("importDir is " + tableInfo.importDir);
// This is a directory already prefixed with proper volume information e.g. hdfs://localhost:8020/path/to/accumulo/tables/...
final String bulkDir = tableInfo.importDir;
final String[] tableDirs = ServerConstants.getTablesDirs();
ZipEntry zipEntry;
while ((zipEntry = zis.getNextEntry()) != null) {
if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
Key key = new Key();
Value val = new Value();
Mutation m = null;
Text currentRow = null;
int dirCount = 0;
while (true) {
key.readFields(in);
val.readFields(in);
Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
Text metadataRow = new KeyExtent(tableInfo.tableId, endRow, null).getMetadataEntry();
Text cq;
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
String oldName = new Path(key.getColumnQualifier().toString()).getName();
String newName = fileNameMappings.get(oldName);
if (newName == null) {
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "File " + oldName + " does not exist in import dir");
}
cq = new Text(bulkDir + "/" + newName);
} else {
cq = key.getColumnQualifier();
}
if (m == null) {
// Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
// Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
m = new Mutation(metadataRow);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
currentRow = metadataRow;
}
if (!currentRow.equals(metadataRow)) {
mbw.addMutation(m);
// Make a unique directory inside the table's dir. Cannot import multiple tables into one table, so don't need to use unique allocator
String tabletDir = new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES), UTF_8);
// Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
String absolutePath = getClonedTabletDir(master, tableDirs, tabletDir);
m = new Mutation(metadataRow);
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8)));
}
m.put(key.getColumnFamily(), cq, val);
if (endRow == null && TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key)) {
mbw.addMutation(m);
// its the last column in the last row
break;
}
}
break;
}
}
return new MoveExportedFiles(tableInfo);
} catch (IOException ioe) {
log.warn("{}", ioe.getMessage(), ioe);
throw new AcceptableThriftTableOperationException(tableInfo.tableId.canonicalID(), tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER, "Error reading " + path + " " + ioe.getMessage());
} finally {
if (zis != null) {
try {
zis.close();
} catch (IOException ioe) {
log.warn("Failed to close zip file ", ioe);
}
}
if (mbw != null) {
mbw.close();
}
}
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class CopyFailed method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
// This needs to execute after the arbiter is stopped
master.updateBulkImportStatus(source, BulkImportState.COPY_FILES);
VolumeManager fs = master.getFileSystem();
if (!fs.exists(new Path(error, BulkImport.FAILURES_TXT)))
return new CleanUpBulkImport(tableId, source, bulk, error);
HashMap<FileRef, String> failures = new HashMap<>();
HashMap<FileRef, String> loadedFailures = new HashMap<>();
try (BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(error, BulkImport.FAILURES_TXT)), UTF_8))) {
String line = null;
while ((line = in.readLine()) != null) {
Path path = new Path(line);
if (!fs.exists(new Path(error, path.getName())))
failures.put(new FileRef(line, path), line);
}
}
/*
* I thought I could move files that have no file references in the table. However its possible a clone references a file. Therefore only move files that
* have no loaded markers.
*/
// determine which failed files were loaded
Connector conn = master.getConnector();
try (Scanner mscanner = new IsolatedScanner(conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY))) {
mscanner.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
mscanner.fetchColumnFamily(TabletsSection.BulkFileColumnFamily.NAME);
for (Entry<Key, Value> entry : mscanner) {
if (Long.parseLong(entry.getValue().toString()) == tid) {
FileRef loadedFile = new FileRef(fs, entry.getKey());
String absPath = failures.remove(loadedFile);
if (absPath != null) {
loadedFailures.put(loadedFile, absPath);
}
}
}
}
// move failed files that were not loaded
for (String failure : failures.values()) {
Path orig = new Path(failure);
Path dest = new Path(error, orig.getName());
fs.rename(orig, dest);
log.debug("tid " + tid + " renamed " + orig + " to " + dest + ": import failed");
}
if (loadedFailures.size() > 0) {
DistributedWorkQueue bifCopyQueue = new DistributedWorkQueue(Constants.ZROOT + "/" + master.getInstance().getInstanceID() + Constants.ZBULK_FAILED_COPYQ, master.getConfiguration());
HashSet<String> workIds = new HashSet<>();
for (String failure : loadedFailures.values()) {
Path orig = new Path(failure);
Path dest = new Path(error, orig.getName());
if (fs.exists(dest))
continue;
bifCopyQueue.addWork(orig.getName(), (failure + "," + dest).getBytes(UTF_8));
workIds.add(orig.getName());
log.debug("tid " + tid + " added to copyq: " + orig + " to " + dest + ": failed");
}
bifCopyQueue.waitUntilDone(workIds);
}
fs.deleteRecursively(new Path(error, BulkImport.FAILURES_TXT));
return new CleanUpBulkImport(tableId, source, bulk, error);
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class CreateDir method undo.
@Override
public void undo(long tid, Master master) throws Exception {
VolumeManager fs = master.getFileSystem();
fs.deleteRecursively(new Path(tableInfo.dir));
}
use of org.apache.accumulo.server.fs.VolumeManager in project accumulo by apache.
the class CreateDir method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
VolumeManager fs = master.getFileSystem();
fs.mkdirs(new Path(tableInfo.dir));
return new PopulateMetadata(tableInfo);
}
Aggregations