use of java.io.RandomAccessFile in project hadoop by apache.
the class Storage method writeProperties.
public static void writeProperties(File to, Properties props) throws IOException {
try (RandomAccessFile file = new RandomAccessFile(to, "rws");
FileOutputStream out = new FileOutputStream(file.getFD())) {
file.seek(0);
/*
* If server is interrupted before this line,
* the version file will remain unchanged.
*/
props.store(out, null);
/*
* Now the new fields are flushed to the head of the file, but file
* length can still be larger then required and therefore the file can
* contain whole or corrupted fields from its old contents in the end.
* If server is interrupted here and restarted later these extra fields
* either should not effect server behavior or should be handled
* by the server correctly.
*/
file.setLength(out.getChannel().position());
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class OfflineImageViewerPB method run.
public static int run(String[] args) throws Exception {
Options options = buildOptions();
if (args.length == 0) {
printUsage();
return 0;
}
// print help and exit with zero exit code
if (args.length == 1 && isHelpOption(args[0])) {
printUsage();
return 0;
}
CommandLineParser parser = new PosixParser();
CommandLine cmd;
try {
cmd = parser.parse(options, args);
} catch (ParseException e) {
System.out.println("Error parsing command-line options: ");
printUsage();
return -1;
}
if (cmd.hasOption("h")) {
// print help and exit with non zero exit code since
// it is not expected to give help and other options together.
printUsage();
return -1;
}
String inputFile = cmd.getOptionValue("i");
String processor = cmd.getOptionValue("p", "Web");
String outputFile = cmd.getOptionValue("o", "-");
String delimiter = cmd.getOptionValue("delimiter", PBImageDelimitedTextWriter.DEFAULT_DELIMITER);
String tempPath = cmd.getOptionValue("t", "");
Configuration conf = new Configuration();
try (PrintStream out = outputFile.equals("-") ? System.out : new PrintStream(outputFile, "UTF-8")) {
switch(processor) {
case "FileDistribution":
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
boolean formatOutput = cmd.hasOption("format");
new FileDistributionCalculator(conf, maxSize, step, formatOutput, out).visit(new RandomAccessFile(inputFile, "r"));
break;
case "XML":
new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile, "r"));
break;
case "ReverseXML":
try {
OfflineImageReconstructor.run(inputFile, outputFile);
} catch (Exception e) {
System.err.println("OfflineImageReconstructor failed: " + e.getMessage());
e.printStackTrace(System.err);
System.exit(1);
}
break;
case "Web":
String addr = cmd.getOptionValue("addr", "localhost:5978");
try (WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr(addr))) {
viewer.start(inputFile);
}
break;
case "Delimited":
try (PBImageDelimitedTextWriter writer = new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
writer.visit(new RandomAccessFile(inputFile, "r"));
}
break;
default:
System.err.println("Invalid processor specified : " + processor);
printUsage();
return -1;
}
return 0;
} catch (EOFException e) {
System.err.println("Input file ended unexpectedly. Exiting");
} catch (IOException e) {
System.err.println("Encountered exception. Exiting: " + e.getMessage());
e.printStackTrace(System.err);
}
return -1;
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class FSImageLoader method load.
/**
* Load fsimage into the memory.
* @param inputFile the filepath of the fsimage to load.
* @return FSImageLoader
* @throws IOException if failed to load fsimage.
*/
static FSImageLoader load(String inputFile) throws IOException {
Configuration conf = new Configuration();
RandomAccessFile file = new RandomAccessFile(inputFile, "r");
if (!FSImageUtil.checkFileFormat(file)) {
throw new IOException("Unrecognized FSImage");
}
FsImageProto.FileSummary summary = FSImageUtil.loadSummary(file);
try (FileInputStream fin = new FileInputStream(file.getFD())) {
// Map to record INodeReference to the referred id
ImmutableList<Long> refIdList = null;
String[] stringTable = null;
byte[][] inodes = null;
Map<Long, long[]> dirmap = null;
ArrayList<FsImageProto.FileSummary.Section> sections = Lists.newArrayList(summary.getSectionsList());
Collections.sort(sections, new Comparator<FsImageProto.FileSummary.Section>() {
@Override
public int compare(FsImageProto.FileSummary.Section s1, FsImageProto.FileSummary.Section s2) {
FSImageFormatProtobuf.SectionName n1 = FSImageFormatProtobuf.SectionName.fromString(s1.getName());
FSImageFormatProtobuf.SectionName n2 = FSImageFormatProtobuf.SectionName.fromString(s2.getName());
if (n1 == null) {
return n2 == null ? 0 : -1;
} else if (n2 == null) {
return -1;
} else {
return n1.ordinal() - n2.ordinal();
}
}
});
for (FsImageProto.FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf, summary.getCodec(), new BufferedInputStream(new LimitInputStream(fin, s.getLength())));
if (LOG.isDebugEnabled()) {
LOG.debug("Loading section " + s.getName() + " length: " + s.getLength());
}
switch(FSImageFormatProtobuf.SectionName.fromString(s.getName())) {
case STRING_TABLE:
stringTable = loadStringTable(is);
break;
case INODE:
inodes = loadINodeSection(is);
break;
case INODE_REFERENCE:
refIdList = loadINodeReferenceSection(is);
break;
case INODE_DIR:
dirmap = loadINodeDirectorySection(is, refIdList);
break;
default:
break;
}
}
return new FSImageLoader(stringTable, inodes, dirmap);
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class FSImageTestUtil method getImageFileMD5IgnoringTxId.
/**
* Calculate the md5sum of an image after zeroing out the transaction ID
* field in the header. This is useful for tests that want to verify
* that two checkpoints have identical namespaces.
*/
public static String getImageFileMD5IgnoringTxId(File imageFile) throws IOException {
File tmpFile = File.createTempFile("hadoop_imagefile_tmp", "fsimage");
tmpFile.deleteOnExit();
try {
Files.copy(imageFile, tmpFile);
RandomAccessFile raf = new RandomAccessFile(tmpFile, "rw");
try {
raf.seek(IMAGE_TXID_POS);
raf.writeLong(0);
} finally {
IOUtils.closeStream(raf);
}
return getFileMD5(tmpFile);
} finally {
tmpFile.delete();
}
}
use of java.io.RandomAccessFile in project hadoop by apache.
the class TestFadvisedFileRegion method testCustomShuffleTransfer.
@Test(timeout = 100000)
public void testCustomShuffleTransfer() throws IOException {
File absLogDir = new File("target", TestFadvisedFileRegion.class.getSimpleName() + "LocDir").getAbsoluteFile();
String testDirPath = StringUtils.join(Path.SEPARATOR, new String[] { absLogDir.getAbsolutePath(), "testCustomShuffleTransfer" });
File testDir = new File(testDirPath);
testDir.mkdirs();
System.out.println(testDir.getAbsolutePath());
File inFile = new File(testDir, "fileIn.out");
File outFile = new File(testDir, "fileOut.out");
//Initialize input file
byte[] initBuff = new byte[FILE_SIZE];
Random rand = new Random();
rand.nextBytes(initBuff);
FileOutputStream out = new FileOutputStream(inFile);
try {
out.write(initBuff);
} finally {
IOUtils.cleanup(LOG, out);
}
//define position and count to read from a file region.
int position = 2 * 1024 * 1024;
int count = 4 * 1024 * 1024 - 1;
RandomAccessFile inputFile = null;
RandomAccessFile targetFile = null;
WritableByteChannel target = null;
FadvisedFileRegion fileRegion = null;
try {
inputFile = new RandomAccessFile(inFile.getAbsolutePath(), "r");
targetFile = new RandomAccessFile(outFile.getAbsolutePath(), "rw");
target = targetFile.getChannel();
Assert.assertEquals(FILE_SIZE, inputFile.length());
//create FadvisedFileRegion
fileRegion = new FadvisedFileRegion(inputFile, position, count, false, 0, null, null, 1024, false);
//test corner cases
customShuffleTransferCornerCases(fileRegion, target, count);
long pos = 0;
long size;
while ((size = fileRegion.customShuffleTransfer(target, pos)) > 0) {
pos += size;
}
//assert size
Assert.assertEquals(count, (int) pos);
Assert.assertEquals(count, targetFile.length());
} finally {
if (fileRegion != null) {
fileRegion.releaseExternalResources();
}
IOUtils.cleanup(LOG, target);
IOUtils.cleanup(LOG, targetFile);
IOUtils.cleanup(LOG, inputFile);
}
//Read the target file and verify that copy is done correctly
byte[] buff = new byte[FILE_SIZE];
FileInputStream in = new FileInputStream(outFile);
try {
int total = in.read(buff, 0, count);
Assert.assertEquals(count, total);
for (int i = 0; i < count; i++) {
Assert.assertEquals(initBuff[position + i], buff[i]);
}
} finally {
IOUtils.cleanup(LOG, in);
}
//delete files and folders
inFile.delete();
outFile.delete();
testDir.delete();
absLogDir.delete();
}
Aggregations