use of java.io.FileInputStream in project hadoop by apache.
the class Journal method getPersistedPaxosData.
/**
* Retrieve the persisted data for recovering the given segment from disk.
*/
private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId) throws IOException {
File f = storage.getPaxosFile(segmentTxId);
if (!f.exists()) {
// Default instance has no fields filled in (they're optional)
return null;
}
InputStream in = new FileInputStream(f);
try {
PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in);
Preconditions.checkState(ret != null && ret.getSegmentState().getStartTxId() == segmentTxId, "Bad persisted data for segment %s: %s", segmentTxId, ret);
return ret;
} finally {
IOUtils.closeStream(in);
}
}
use of java.io.FileInputStream in project hadoop by apache.
the class TransferFsImage method writeFileToPutRequest.
private static void writeFileToPutRequest(Configuration conf, HttpURLConnection connection, File imageFile, Canceler canceler) throws IOException {
connection.setRequestProperty(Util.CONTENT_TYPE, "application/octet-stream");
connection.setRequestProperty(Util.CONTENT_TRANSFER_ENCODING, "binary");
OutputStream output = connection.getOutputStream();
FileInputStream input = new FileInputStream(imageFile);
try {
copyFileToStream(output, imageFile, input, ImageServlet.getThrottler(conf), canceler);
} finally {
IOUtils.closeStream(input);
IOUtils.closeStream(output);
}
}
use of java.io.FileInputStream in project hadoop by apache.
the class TestFSEditLogLoader method getNonTrailerLength.
/**
* Return the length of bytes in the given file after subtracting
* the trailer of 0xFF (OP_INVALID)s.
* This seeks to the end of the file and reads chunks backwards until
* it finds a non-0xFF byte.
* @throws IOException if the file cannot be read
*/
private static long getNonTrailerLength(File f) throws IOException {
final int chunkSizeToRead = 256 * 1024;
FileInputStream fis = new FileInputStream(f);
try {
byte[] buf = new byte[chunkSizeToRead];
FileChannel fc = fis.getChannel();
long size = fc.size();
long pos = size - (size % chunkSizeToRead);
while (pos >= 0) {
fc.position(pos);
int readLen = (int) Math.min(size - pos, chunkSizeToRead);
IOUtils.readFully(fis, buf, 0, readLen);
for (int i = readLen - 1; i >= 0; i--) {
if (buf[i] != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
// + 1 since we count this byte!
return pos + i + 1;
}
}
pos -= chunkSizeToRead;
}
return 0;
} finally {
fis.close();
}
}
use of java.io.FileInputStream in project hadoop by apache.
the class TestCredentials method testReadWriteStorage.
@SuppressWarnings("unchecked")
@Test
public <T extends TokenIdentifier> void testReadWriteStorage() throws IOException, NoSuchAlgorithmException {
// create tokenStorage Object
Credentials ts = new Credentials();
Token<T> token1 = new Token();
Token<T> token2 = new Token();
Text service1 = new Text("service1");
Text service2 = new Text("service2");
Collection<Text> services = new ArrayList<Text>();
services.add(service1);
services.add(service2);
token1.setService(service1);
token2.setService(service2);
ts.addToken(new Text("sometoken1"), token1);
ts.addToken(new Text("sometoken2"), token2);
// create keys and put it in
final KeyGenerator kg = KeyGenerator.getInstance(DEFAULT_HMAC_ALGORITHM);
String alias = "alias";
Map<Text, byte[]> m = new HashMap<Text, byte[]>(10);
for (int i = 0; i < 10; i++) {
Key key = kg.generateKey();
m.put(new Text(alias + i), key.getEncoded());
ts.addSecretKey(new Text(alias + i), key.getEncoded());
}
// create file to store
File tmpFileName = new File(tmpDir, "tokenStorageTest");
DataOutputStream dos = new DataOutputStream(new FileOutputStream(tmpFileName));
ts.write(dos);
dos.close();
// open and read it back
DataInputStream dis = new DataInputStream(new FileInputStream(tmpFileName));
ts = new Credentials();
ts.readFields(dis);
dis.close();
// get the tokens and compare the services
Collection<Token<? extends TokenIdentifier>> list = ts.getAllTokens();
assertEquals("getAllTokens should return collection of size 2", list.size(), 2);
boolean foundFirst = false;
boolean foundSecond = false;
for (Token<? extends TokenIdentifier> token : list) {
if (token.getService().equals(service1)) {
foundFirst = true;
}
if (token.getService().equals(service2)) {
foundSecond = true;
}
}
assertTrue("Tokens for services service1 and service2 must be present", foundFirst && foundSecond);
// compare secret keys
int mapLen = m.size();
assertEquals("wrong number of keys in the Storage", mapLen, ts.numberOfSecretKeys());
for (Text a : m.keySet()) {
byte[] kTS = ts.getSecretKey(a);
byte[] kLocal = m.get(a);
assertTrue("keys don't match for " + a, WritableComparator.compareBytes(kTS, 0, kTS.length, kLocal, 0, kLocal.length) == 0);
}
tmpFileName.delete();
}
use of java.io.FileInputStream in project hadoop by apache.
the class BlockSender method sendPacket.
/**
* Sends a packet with up to maxChunks chunks of data.
*
* @param pkt buffer used for writing packet data
* @param maxChunks maximum number of chunks to send
* @param out stream to send data to
* @param transferTo use transferTo to send data
* @param throttler used for throttling data transfer bandwidth
*/
private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, boolean transferTo, DataTransferThrottler throttler) throws IOException {
int dataLen = (int) Math.min(endOffset - offset, (chunkSize * (long) maxChunks));
// Number of chunks be sent in the packet
int numChunks = numberOfChunks(dataLen);
int checksumDataLen = numChunks * checksumSize;
int packetLen = dataLen + checksumDataLen + 4;
boolean lastDataPacket = offset + dataLen == endOffset && dataLen > 0;
// The packet buffer is organized as follows:
// _______HHHHCCCCD?D?D?D?
// ^ ^
// | \ checksumOff
// \ headerOff
// _ padding, since the header is variable-length
// H = header and length prefixes
// C = checksums
// D? = data, if transferTo is false.
int headerLen = writePacketHeader(pkt, dataLen, packetLen);
// Per above, the header doesn't start at the beginning of the
// buffer
int headerOff = pkt.position() - headerLen;
int checksumOff = pkt.position();
byte[] buf = pkt.array();
if (checksumSize > 0 && ris.getChecksumIn() != null) {
readChecksum(buf, checksumOff, checksumDataLen);
// write in progress that we need to use to get last checksum
if (lastDataPacket && lastChunkChecksum != null) {
int start = checksumOff + checksumDataLen - checksumSize;
byte[] updatedChecksum = lastChunkChecksum.getChecksum();
if (updatedChecksum != null) {
System.arraycopy(updatedChecksum, 0, buf, start, checksumSize);
}
}
}
int dataOff = checksumOff + checksumDataLen;
if (!transferTo) {
// normal transfer
ris.readDataFully(buf, dataOff, dataLen);
if (verifyChecksum) {
verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff);
}
}
try {
if (transferTo) {
SocketOutputStream sockOut = (SocketOutputStream) out;
// First write header and checksums
sockOut.write(buf, headerOff, dataOff - headerOff);
// no need to flush since we know out is not a buffered stream
FileChannel fileCh = ((FileInputStream) ris.getDataIn()).getChannel();
LongWritable waitTime = new LongWritable();
LongWritable transferTime = new LongWritable();
fileIoProvider.transferToSocketFully(ris.getVolumeRef().getVolume(), sockOut, fileCh, blockInPosition, dataLen, waitTime, transferTime);
datanode.metrics.addSendDataPacketBlockedOnNetworkNanos(waitTime.get());
datanode.metrics.addSendDataPacketTransferNanos(transferTime.get());
blockInPosition += dataLen;
} else {
// normal transfer
out.write(buf, headerOff, dataOff + dataLen - headerOff);
}
} catch (IOException e) {
if (e instanceof SocketTimeoutException) {
/*
* writing to client timed out. This happens if the client reads
* part of a block and then decides not to read the rest (but leaves
* the socket open).
*
* Reporting of this case is done in DataXceiver#run
*/
} else {
/* Exception while writing to the client. Connection closure from
* the other end is mostly the case and we do not care much about
* it. But other things can go wrong, especially in transferTo(),
* which we do not want to ignore.
*
* The message parsing below should not be considered as a good
* coding example. NEVER do it to drive a program logic. NEVER.
* It was done here because the NIO throws an IOException for EPIPE.
*/
String ioem = e.getMessage();
if (!ioem.startsWith("Broken pipe") && !ioem.startsWith("Connection reset")) {
LOG.error("BlockSender.sendChunks() exception: ", e);
datanode.getBlockScanner().markSuspectBlock(ris.getVolumeRef().getVolume().getStorageID(), block);
}
}
throw ioeToSocketException(e);
}
if (throttler != null) {
// rebalancing so throttle
throttler.throttle(packetLen);
}
return dataLen;
}
Aggregations