Search in sources :

Example 76 with FileInputStream

use of java.io.FileInputStream in project hadoop by apache.

the class Journal method getPersistedPaxosData.

/**
   * Retrieve the persisted data for recovering the given segment from disk.
   */
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId) throws IOException {
    File f = storage.getPaxosFile(segmentTxId);
    if (!f.exists()) {
        // Default instance has no fields filled in (they're optional)
        return null;
    }
    InputStream in = new FileInputStream(f);
    try {
        PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
        Preconditions.checkState(ret != null && ret.getSegmentState().getStartTxId() == segmentTxId, "Bad persisted data for segment %s: %s", segmentTxId, ret);
        return ret;
    } finally {
        IOUtils.closeStream(in);
    }
}
Also used : PersistedRecoveryPaxosData(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) FileInputStream(java.io.FileInputStream) InputStream(java.io.InputStream) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) PersistentLongFile(org.apache.hadoop.hdfs.util.PersistentLongFile) BestEffortLongFile(org.apache.hadoop.hdfs.util.BestEffortLongFile) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 77 with FileInputStream

use of java.io.FileInputStream in project hadoop by apache.

the class TransferFsImage method writeFileToPutRequest.

private static void writeFileToPutRequest(Configuration conf, HttpURLConnection connection, File imageFile, Canceler canceler) throws IOException {
    connection.setRequestProperty(Util.CONTENT_TYPE, "application/octet-stream");
    connection.setRequestProperty(Util.CONTENT_TRANSFER_ENCODING, "binary");
    OutputStream output = connection.getOutputStream();
    FileInputStream input = new FileInputStream(imageFile);
    try {
        copyFileToStream(output, imageFile, input, ImageServlet.getThrottler(conf), canceler);
    } finally {
        IOUtils.closeStream(input);
        IOUtils.closeStream(output);
    }
}
Also used : OutputStream(java.io.OutputStream) FileInputStream(java.io.FileInputStream)

Example 78 with FileInputStream

use of java.io.FileInputStream in project hadoop by apache.

the class TestFSEditLogLoader method getNonTrailerLength.

/**
   * Return the length of bytes in the given file after subtracting
   * the trailer of 0xFF (OP_INVALID)s.
   * This seeks to the end of the file and reads chunks backwards until
   * it finds a non-0xFF byte.
   * @throws IOException if the file cannot be read
   */
private static long getNonTrailerLength(File f) throws IOException {
    final int chunkSizeToRead = 256 * 1024;
    FileInputStream fis = new FileInputStream(f);
    try {
        byte[] buf = new byte[chunkSizeToRead];
        FileChannel fc = fis.getChannel();
        long size = fc.size();
        long pos = size - (size % chunkSizeToRead);
        while (pos >= 0) {
            fc.position(pos);
            int readLen = (int) Math.min(size - pos, chunkSizeToRead);
            IOUtils.readFully(fis, buf, 0, readLen);
            for (int i = readLen - 1; i >= 0; i--) {
                if (buf[i] != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
                    // + 1 since we count this byte!
                    return pos + i + 1;
                }
            }
            pos -= chunkSizeToRead;
        }
        return 0;
    } finally {
        fis.close();
    }
}
Also used : FileChannel(java.nio.channels.FileChannel) FileInputStream(java.io.FileInputStream)

Example 79 with FileInputStream

use of java.io.FileInputStream in project hadoop by apache.

the class TestCredentials method testReadWriteStorage.

@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
    // create tokenStorage Object
    Credentials ts = new Credentials();
    Token<T> token1 = new Token();
    Token<T> token2 = new Token();
    Text service1 = new Text("service1");
    Text service2 = new Text("service2");
    Collection<Text> services = new ArrayList<Text>();
    services.add(service1);
    services.add(service2);
    token1.setService(service1);
    token2.setService(service2);
    ts.addToken(new Text("sometoken1"), token1);
    ts.addToken(new Text("sometoken2"), token2);
    // create keys and put it in
    final KeyGenerator kg = KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
    String alias = "alias";
    Map<Text, byte[]> m = new HashMap<Text, byte[]>(10);
    for (int i = 0; i < 10; i++) {
        Key key = kg.generateKey();
        m.put(new Text(alias + i), key.getEncoded());
        ts.addSecretKey(new Text(alias + i), key.getEncoded());
    }
    // create file to store
    File tmpFileName = new File(tmpDir, "tokenStorageTest");
    DataOutputStream dos = new DataOutputStream(new FileOutputStream(tmpFileName));
    ts.write(dos);
    dos.close();
    // open and read it back
    DataInputStream dis = new DataInputStream(new FileInputStream(tmpFileName));
    ts = new Credentials();
    ts.readFields(dis);
    dis.close();
    // get the tokens and compare the services
    Collection<Token<? extends TokenIdentifier>> list = ts.getAllTokens();
    assertEquals("getAllTokens should return collection of size 2", list.size(), 2);
    boolean foundFirst = false;
    boolean foundSecond = false;
    for (Token<? extends TokenIdentifier> token : list) {
        if (token.getService().equals(service1)) {
            foundFirst = true;
        }
        if (token.getService().equals(service2)) {
            foundSecond = true;
        }
    }
    assertTrue("Tokens for services service1 and service2 must be present", foundFirst && foundSecond);
    // compare secret keys
    int mapLen = m.size();
    assertEquals("wrong number of keys in the Storage", mapLen, ts.numberOfSecretKeys());
    for (Text a : m.keySet()) {
        byte[] kTS = ts.getSecretKey(a);
        byte[] kLocal = m.get(a);
        assertTrue("keys don't match for " + a, WritableComparator.compareBytes(kTS, 0, kTS.length, kLocal, 0, kLocal.length) == 0);
    }
    tmpFileName.delete();
}
Also used : TokenIdentifier(org.apache.hadoop.security.token.TokenIdentifier) HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) DataInputStream(java.io.DataInputStream) FileInputStream(java.io.FileInputStream) FileOutputStream(java.io.FileOutputStream) KeyGenerator(javax.crypto.KeyGenerator) File(java.io.File) Credentials(org.apache.hadoop.security.Credentials) Key(java.security.Key) Test(org.junit.Test)

Example 80 with FileInputStream

use of java.io.FileInputStream in project hadoop by apache.

the class BlockSender method sendPacket.

/**
   * Sends a packet with up to maxChunks chunks of data.
   * 
   * @param pkt buffer used for writing packet data
   * @param maxChunks maximum number of chunks to send
   * @param out stream to send data to
   * @param transferTo use transferTo to send data
   * @param throttler used for throttling data transfer bandwidth
   */
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, boolean transferTo, DataTransferThrottler throttler) throws IOException {
    int dataLen = (int) Math.min(endOffset - offset, (chunkSize * (long) maxChunks));
    // Number of chunks be sent in the packet
    int numChunks = numberOfChunks(dataLen);
    int checksumDataLen = numChunks * checksumSize;
    int packetLen = dataLen + checksumDataLen + 4;
    boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
    // The packet buffer is organized as follows:
    // _______HHHHCCCCD?D?D?D?
    //        ^   ^
    //        |   \ checksumOff
    //        \ headerOff
    // _ padding, since the header is variable-length
    // H = header and length prefixes
    // C = checksums
    // D? = data, if transferTo is false.
    int headerLen = writePacketHeader(pkt, dataLen, packetLen);
    // Per above, the header doesn't start at the beginning of the
    // buffer
    int headerOff = pkt.position() - headerLen;
    int checksumOff = pkt.position();
    byte[] buf = pkt.array();
    if (checksumSize > 0 && ris.getChecksumIn() != null) {
        readChecksum(buf, checksumOff, checksumDataLen);
        // write in progress that we need to use to get last checksum
        if (lastDataPacket && lastChunkChecksum != null) {
            int start = checksumOff + checksumDataLen - checksumSize;
            byte[] updatedChecksum = lastChunkChecksum.getChecksum();
            if (updatedChecksum != null) {
                System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
            }
        }
    }
    int dataOff = checksumOff + checksumDataLen;
    if (!transferTo) {
        // normal transfer
        ris.readDataFully(buf, dataOff, dataLen);
        if (verifyChecksum) {
            verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
        }
    }
    try {
        if (transferTo) {
            SocketOutputStream sockOut = (SocketOutputStream) out;
            // First write header and checksums
            sockOut.write(buf, headerOff, dataOff - headerOff);
            // no need to flush since we know out is not a buffered stream
            FileChannel fileCh = ((FileInputStream) ris.getDataIn()).getChannel();
            LongWritable waitTime = new LongWritable();
            LongWritable transferTime = new LongWritable();
            fileIoProvider.transferToSocketFully(ris.getVolumeRef().getVolume(), sockOut, fileCh, blockInPosition, dataLen, waitTime, transferTime);
            datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
            datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
            blockInPosition += dataLen;
        } else {
            // normal transfer
            out.write(buf, headerOff, dataOff + dataLen - headerOff);
        }
    } catch (IOException e) {
        if (e instanceof SocketTimeoutException) {
        /*
         * writing to client timed out.  This happens if the client reads
         * part of a block and then decides not to read the rest (but leaves
         * the socket open).
         * 
         * Reporting of this case is done in DataXceiver#run
         */
        } else {
            /* Exception while writing to the client. Connection closure from
         * the other end is mostly the case and we do not care much about
         * it. But other things can go wrong, especially in transferTo(),
         * which we do not want to ignore.
         *
         * The message parsing below should not be considered as a good
         * coding example. NEVER do it to drive a program logic. NEVER.
         * It was done here because the NIO throws an IOException for EPIPE.
         */
            String ioem = e.getMessage();
            if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
                LOG.error("BlockSender.sendChunks() exception: ", e);
                datanode.getBlockScanner().markSuspectBlock(ris.getVolumeRef().getVolume().getStorageID(), block);
            }
        }
        throw ioeToSocketException(e);
    }
    if (throttler != null) {
        // rebalancing so throttle
        throttler.throttle(packetLen);
    }
    return dataLen;
}
Also used : SocketOutputStream(org.apache.hadoop.net.SocketOutputStream) SocketTimeoutException(java.net.SocketTimeoutException) FileChannel(java.nio.channels.FileChannel) LongWritable(org.apache.hadoop.io.LongWritable) IOException(java.io.IOException) FileInputStream(java.io.FileInputStream)

Aggregations

FileInputStream (java.io.FileInputStream)5931 File (java.io.File)2701 IOException (java.io.IOException)2699 InputStream (java.io.InputStream)1514 FileOutputStream (java.io.FileOutputStream)922 FileNotFoundException (java.io.FileNotFoundException)876 BufferedInputStream (java.io.BufferedInputStream)744 InputStreamReader (java.io.InputStreamReader)680 BufferedReader (java.io.BufferedReader)558 Properties (java.util.Properties)552 Test (org.junit.Test)543 ArrayList (java.util.ArrayList)320 DataInputStream (java.io.DataInputStream)288 OutputStream (java.io.OutputStream)273 ByteArrayInputStream (java.io.ByteArrayInputStream)238 ZipEntry (java.util.zip.ZipEntry)221 XmlPullParserException (org.xmlpull.v1.XmlPullParserException)200 HashMap (java.util.HashMap)195 XmlPullParser (org.xmlpull.v1.XmlPullParser)184 ByteArrayOutputStream (java.io.ByteArrayOutputStream)177