use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestIFile method testIFileWriterWithCodec.
@Test
public /**
* Create an IFile.Writer using GzipCodec since this code does not
* have a compressor when run via the tests (ie no native libraries).
*/
void testIFileWriterWithCodec() throws Exception {
Configuration conf = new Configuration();
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem) localFs).getRaw();
Path path = new Path(new Path("build/test.ifile"), "data");
DefaultCodec codec = new GzipCodec();
codec.setConf(conf);
IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class, codec, null);
writer.close();
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestReduceTask method runValueIterator.
public void runValueIterator(Path tmpDir, Pair[] vals, Configuration conf, CompressionCodec codec) throws IOException {
FileSystem localFs = FileSystem.getLocal(conf);
FileSystem rfs = ((LocalFileSystem) localFs).getRaw();
Path path = new Path(tmpDir, "data.in");
IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, rfs.create(path), Text.class, Text.class, codec, null);
for (Pair p : vals) {
writer.append(new Text(p.key), new Text(p.value));
}
writer.close();
@SuppressWarnings("unchecked") RawKeyValueIterator rawItr = Merger.merge(conf, rfs, Text.class, Text.class, codec, new Path[] { path }, false, conf.getInt(JobContext.IO_SORT_FACTOR, 100), tmpDir, new Text.Comparator(), new NullProgress(), null, null, null);
// WritableComparators are not generic
@SuppressWarnings("unchecked") ReduceTask.ValuesIterator valItr = new ReduceTask.ValuesIterator<Text, Text>(rawItr, WritableComparator.get(Text.class), Text.class, Text.class, conf, new NullProgress());
int i = 0;
while (valItr.more()) {
Object key = valItr.getKey();
String keyString = key.toString();
// make sure it matches!
assertEquals(vals[i].key, keyString);
// must have at least 1 value!
assertTrue(valItr.hasNext());
while (valItr.hasNext()) {
String valueString = valItr.next().toString();
// make sure the values match
assertEquals(vals[i].value, valueString);
// make sure the keys match
assertEquals(vals[i].key, valItr.getKey().toString());
i += 1;
}
// make sure the key hasn't changed under the hood
assertEquals(keyString, valItr.getKey().toString());
valItr.nextKey();
}
assertEquals(vals.length, i);
// make sure we have progress equal to 1.0
assertEquals(1.0f, rawItr.getProgress().get(), 0.0000);
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class TestFSInputChecker method testFSInputChecker.
@Test
public void testFSInputChecker() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_SUM);
rand.nextBytes(expected);
// test DFS
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testSeekAndRead(fileSys);
} finally {
fileSys.close();
cluster.shutdown();
}
// test Local FS
fileSys = FileSystem.getLocal(conf);
try {
testChecker(fileSys, true);
testChecker(fileSys, false);
testFileCorruption((LocalFileSystem) fileSys);
testSeekAndRead(fileSys);
} finally {
fileSys.close();
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class S3AFileSystem method innerCopyFromLocalFile.
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
* @throws IOException IO problem
* @throws FileAlreadyExistsException the destination file exists and
* overwrite==false
* @throws AmazonClientException failure in the AWS SDK
*/
private void innerCopyFromLocalFile(boolean delSrc, boolean overwrite, Path src, Path dst) throws IOException, FileAlreadyExistsException, AmazonClientException {
incrementStatistic(INVOCATION_COPY_FROM_LOCAL_FILE);
final String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new FileAlreadyExistsException(dst + " already exists");
}
LOG.debug("Copying local file from {} to {}", src, dst);
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = newObjectMetadata(srcfile.length());
PutObjectRequest putObjectRequest = newPutObjectRequest(key, om, srcfile);
Upload up = putObject(putObjectRequest);
ProgressableProgressListener listener = new ProgressableProgressListener(this, key, up, null);
up.addProgressListener(listener);
try {
up.waitForUploadResult();
} catch (InterruptedException e) {
throw new InterruptedIOException("Interrupted copying " + src + " to " + dst + ", cancelling");
}
listener.uploadCompleted();
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
use of org.apache.hadoop.fs.LocalFileSystem in project hadoop by apache.
the class UpgradeUtilities method createBlockPoolStorageDirs.
/**
* Simulate the {@link DFSConfigKeys#DFS_DATANODE_DATA_DIR_KEY} of a
* populated DFS filesystem.
* This method populates for each parent directory, <code>parent/dirName</code>
* with the content of block pool storage directory that comes from a singleton
* datanode master (that contains version and block files). If the destination
* directory does not exist, it will be created. If the directory already
* exists, it will first be deleted.
*
* @param parents parent directory where {@code dirName} is created
* @param dirName directory under which storage directory is created
* @param bpid block pool id for which the storage directory is created.
* @return the array of created directories
*/
public static File[] createBlockPoolStorageDirs(String[] parents, String dirName, String bpid) throws Exception {
File[] retVal = new File[parents.length];
Path bpCurDir = new Path(MiniDFSCluster.getBPDir(datanodeStorage, bpid, Storage.STORAGE_DIR_CURRENT));
for (int i = 0; i < parents.length; i++) {
File newDir = new File(parents[i] + "/current/" + bpid, dirName);
createEmptyDirs(new String[] { newDir.toString() });
LocalFileSystem localFS = FileSystem.getLocal(new HdfsConfiguration());
localFS.copyToLocalFile(bpCurDir, new Path(newDir.toString()), false);
retVal[i] = newDir;
}
return retVal;
}
Aggregations