Search in sources :

Example 21 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestFsDatasetImpl method getDfsUsedValueOfNewVolume.

private long getDfsUsedValueOfNewVolume(long cacheDfsUsed, long waitIntervalTime) throws IOException, InterruptedException {
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, BLOCK_POOL_IDS[0], 1));
    String CURRENT_DIR = "current";
    String DU_CACHE_FILE = BlockPoolSlice.DU_CACHE_FILE;
    String path = BASE_DIR + "/newData0";
    String pathUri = new Path(path).toUri().toString();
    StorageLocation loc = StorageLocation.parse(pathUri);
    Storage.StorageDirectory sd = createStorageDirectory(new File(path));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    String cacheFilePath = String.format("%s/%s/%s/%s/%s", path, CURRENT_DIR, BLOCK_POOL_IDS[0], CURRENT_DIR, DU_CACHE_FILE);
    File outFile = new File(cacheFilePath);
    if (!outFile.getParentFile().exists()) {
        outFile.getParentFile().mkdirs();
    }
    if (outFile.exists()) {
        outFile.delete();
    }
    FakeTimer timer = new FakeTimer();
    try {
        try (Writer out = new OutputStreamWriter(new FileOutputStream(outFile), StandardCharsets.UTF_8)) {
            // Write the dfsUsed value and the time to cache file
            out.write(Long.toString(cacheDfsUsed) + " " + Long.toString(timer.now()));
            out.flush();
        }
    } catch (IOException ioe) {
    }
    dataset.setTimer(timer);
    timer.advance(waitIntervalTime);
    dataset.addVolume(loc, nsInfos);
    // Get the last volume which was just added before
    FsVolumeImpl newVolume;
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        newVolume = (FsVolumeImpl) volumes.get(volumes.size() - 1);
    }
    long dfsUsed = newVolume.getDfsUsed();
    return dfsUsed;
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) FileOutputStream(java.io.FileOutputStream) OutputStreamWriter(java.io.OutputStreamWriter) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) FakeTimer(org.apache.hadoop.util.FakeTimer) Writer(java.io.Writer) OutputStreamWriter(java.io.OutputStreamWriter)

Example 22 with Writer

use of java.io.Writer in project hadoop by apache.

the class KMSJSONWriter method writeTo.

@Override
public void writeTo(Object obj, Class<?> aClass, Type type, Annotation[] annotations, MediaType mediaType, MultivaluedMap<String, Object> stringObjectMultivaluedMap, OutputStream outputStream) throws IOException, WebApplicationException {
    Writer writer = new OutputStreamWriter(outputStream, Charset.forName("UTF-8"));
    ObjectMapper jsonMapper = new ObjectMapper();
    jsonMapper.writerWithDefaultPrettyPrinter().writeValue(writer, obj);
}
Also used : OutputStreamWriter(java.io.OutputStreamWriter) MessageBodyWriter(javax.ws.rs.ext.MessageBodyWriter) Writer(java.io.Writer) OutputStreamWriter(java.io.OutputStreamWriter) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper)

Example 23 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestHttpFSServer method createHttpFSServer.

private void createHttpFSServer(boolean addDelegationTokenAuthHandler) throws Exception {
    File homeDir = TestDirHelper.getTestDir();
    Assert.assertTrue(new File(homeDir, "conf").mkdir());
    Assert.assertTrue(new File(homeDir, "log").mkdir());
    Assert.assertTrue(new File(homeDir, "temp").mkdir());
    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
    File secretFile = new File(new File(homeDir, "conf"), "secret");
    Writer w = new FileWriter(secretFile);
    w.write("secret");
    w.close();
    //HDFS configuration
    File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
    hadoopConfDir.mkdirs();
    Configuration hdfsConf = TestHdfsHelper.getHdfsConf();
    // Http Server's conf should be based on HDFS's conf
    Configuration conf = new Configuration(hdfsConf);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
    File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
    OutputStream os = new FileOutputStream(hdfsSite);
    conf.writeXml(os);
    os.close();
    //HTTPFS configuration
    conf = new Configuration(false);
    if (addDelegationTokenAuthHandler) {
        conf.set("httpfs.authentication.type", HttpFSKerberosAuthenticationHandlerForTesting.class.getName());
    }
    conf.set("httpfs.services.ext", MockGroups.class.getName());
    conf.set("httpfs.admin.group", HadoopUsersConfTestHelper.getHadoopUserGroups(HadoopUsersConfTestHelper.getHadoopUsers()[0])[0]);
    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups", HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
    conf.set("httpfs.proxyuser." + HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts", HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
    conf.set("httpfs.authentication.signature.secret.file", secretFile.getAbsolutePath());
    conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
    File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
    os = new FileOutputStream(httpfsSite);
    conf.writeXml(os);
    os.close();
    ClassLoader cl = Thread.currentThread().getContextClassLoader();
    URL url = cl.getResource("webapp");
    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
    Server server = TestJettyHelper.getJettyServer();
    server.setHandler(context);
    server.start();
    if (addDelegationTokenAuthHandler) {
        HttpFSServerWebApp.get().setAuthority(TestJettyHelper.getAuthority());
    }
}
Also used : WebAppContext(org.eclipse.jetty.webapp.WebAppContext) Configuration(org.apache.hadoop.conf.Configuration) Server(org.eclipse.jetty.server.Server) FileWriter(java.io.FileWriter) OutputStream(java.io.OutputStream) FileOutputStream(java.io.FileOutputStream) FileOutputStream(java.io.FileOutputStream) File(java.io.File) Writer(java.io.Writer) FileWriter(java.io.FileWriter) URL(java.net.URL) AuthenticatedURL(org.apache.hadoop.security.authentication.client.AuthenticatedURL)

Example 24 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestFsck method testFsckFileNotFound.

/** Test fsck with FileNotFound. */
@Test
public void testFsckFileNotFound() throws Exception {
    // Number of replicas to actually start
    final short numReplicas = 1;
    NameNode namenode = mock(NameNode.class);
    NetworkTopology nettop = mock(NetworkTopology.class);
    Map<String, String[]> pmap = new HashMap<>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    FSNamesystem fsName = mock(FSNamesystem.class);
    FSDirectory fsd = mock(FSDirectory.class);
    BlockManager blockManager = mock(BlockManager.class);
    DatanodeManager dnManager = mock(DatanodeManager.class);
    INodesInPath iip = mock(INodesInPath.class);
    when(namenode.getNamesystem()).thenReturn(fsName);
    when(fsName.getBlockManager()).thenReturn(blockManager);
    when(fsName.getFSDirectory()).thenReturn(fsd);
    when(fsd.getFSNamesystem()).thenReturn(fsName);
    when(fsd.resolvePath(anyObject(), anyString(), any(DirOp.class))).thenReturn(iip);
    when(blockManager.getDatanodeManager()).thenReturn(dnManager);
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    String pathString = "/tmp/testFile";
    long length = 123L;
    boolean isDir = false;
    int blockReplication = 1;
    long blockSize = 128 * 1024L;
    long modTime = 123123123L;
    long accessTime = 123123120L;
    FsPermission perms = FsPermission.getDefault();
    String owner = "foo";
    String group = "bar";
    byte[] symlink = null;
    byte[] path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;
    byte storagePolicy = 0;
    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, blockSize, modTime, accessTime, perms, owner, group, symlink, path, fileId, numChildren, null, storagePolicy, null);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    try {
        fsck.check(pathString, file, replRes, ecRes);
    } catch (Exception e) {
        fail("Unexpected exception " + e.getMessage());
    }
    assertTrue(replRes.isHealthy());
}
Also used : DirOp(org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrintWriter(java.io.PrintWriter) IOException(java.io.IOException) ChecksumException(org.apache.hadoop.fs.ChecksumException) TimeoutException(java.util.concurrent.TimeoutException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) NetworkTopology(org.apache.hadoop.net.NetworkTopology) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) Test(org.junit.Test)

Example 25 with Writer

use of java.io.Writer in project hadoop by apache.

the class TestFsck method testFsckMissingReplicas.

/**
   * Tests that the # of missing block replicas and expected replicas is
   * correct.
   * @throws IOException
   */
@Test
public void testFsckMissingReplicas() throws IOException {
    // Desired replication factor
    // Set this higher than numReplicas so it's under-replicated
    final short replFactor = 2;
    // Number of replicas to actually start
    final short numReplicas = 1;
    // Number of blocks to write
    final short numBlocks = 3;
    // Set a small-ish blocksize
    final long blockSize = 512;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    DistributedFileSystem dfs = null;
    // Startup a minicluster
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numReplicas).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * numBlocks;
    DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1);
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager().getDatanodeManager().getNetworkTopology();
    Map<String, String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, numReplicas, remoteAddress);
    // Run the fsck and check the Result
    final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result replRes = new ReplicationResult(conf);
    Result ecRes = new ErasureCodingResult(conf);
    fsck.check(pathString, file, replRes, ecRes);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(replRes.missingReplicas, (numBlocks * replFactor) - (numBlocks * numReplicas));
    assertEquals(replRes.numExpectedReplicas, numBlocks * replFactor);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HashMap(java.util.HashMap) Matchers.anyString(org.mockito.Matchers.anyString) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) Result(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) ReplicationResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult) ErasureCodingResult(org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult) StringWriter(java.io.StringWriter) NetworkTopology(org.apache.hadoop.net.NetworkTopology) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) InetAddress(java.net.InetAddress) PrintWriter(java.io.PrintWriter) StringWriter(java.io.StringWriter) Writer(java.io.Writer) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Aggregations

Writer (java.io.Writer)1259 OutputStreamWriter (java.io.OutputStreamWriter)512 IOException (java.io.IOException)414 StringWriter (java.io.StringWriter)300 File (java.io.File)269 FileOutputStream (java.io.FileOutputStream)196 BufferedWriter (java.io.BufferedWriter)178 FileWriter (java.io.FileWriter)174 PrintWriter (java.io.PrintWriter)159 OutputStream (java.io.OutputStream)120 Test (org.junit.Test)109 InputStreamReader (java.io.InputStreamReader)71 ByteArrayOutputStream (java.io.ByteArrayOutputStream)64 BufferedReader (java.io.BufferedReader)62 Reader (java.io.Reader)62 HashMap (java.util.HashMap)59 Map (java.util.Map)59 ArrayList (java.util.ArrayList)58 InputStream (java.io.InputStream)54 Properties (java.util.Properties)39