use of java.io.FileInputStream in project hadoop by apache.
the class SysInfoLinux method readDiskBlockInformation.
/**
* Read /sys/block/diskName/queue/hw_sector_size file, parse and calculate
* sector size for a specific disk.
* @return sector size of specified disk, or defSector
*/
int readDiskBlockInformation(String diskName, int defSector) {
assert perDiskSectorSize != null && diskName != null;
String procfsDiskSectorFile = "/sys/block/" + diskName + "/queue/hw_sector_size";
BufferedReader in;
try {
in = new BufferedReader(new InputStreamReader(new FileInputStream(procfsDiskSectorFile), Charset.forName("UTF-8")));
} catch (FileNotFoundException f) {
return defSector;
}
Matcher mat;
try {
String str = in.readLine();
while (str != null) {
mat = PROCFS_DISKSECTORFILE_FORMAT.matcher(str);
if (mat.find()) {
String secSize = mat.group(1);
if (secSize != null) {
return Integer.parseInt(secSize);
}
}
str = in.readLine();
}
return defSector;
} catch (IOException | NumberFormatException e) {
LOG.warn("Error reading the stream " + procfsDiskSectorFile, e);
return defSector;
} finally {
// Close the streams
try {
in.close();
} catch (IOException e) {
LOG.warn("Error closing the stream " + procfsDiskSectorFile, e);
}
}
}
use of java.io.FileInputStream in project flink by apache.
the class BlobServerConnection method get.
// --------------------------------------------------------------------------------------------
// Actions
// --------------------------------------------------------------------------------------------
/**
* Handles an incoming GET request from a BLOB client.
*
* @param inputStream
* the input stream to read incoming data from
* @param outputStream
* the output stream to send data back to the client
* @param buf
* an auxiliary buffer for data serialization/deserialization
* @throws IOException
* thrown if an I/O error occurs while reading/writing data from/to the respective streams
*/
private void get(InputStream inputStream, OutputStream outputStream, byte[] buf) throws IOException {
/**
* Retrieve the file from the (distributed?) BLOB store and store it
* locally, then send it to the service which requested it.
*
* Instead, we could send it from the distributed store directly but
* chances are high that if there is one request, there will be more
* so a local cache makes more sense.
*/
File blobFile;
try {
final int contentAddressable = inputStream.read();
if (contentAddressable < 0) {
throw new EOFException("Premature end of GET request");
}
if (contentAddressable == NAME_ADDRESSABLE) {
// Receive the job ID and key
byte[] jidBytes = new byte[JobID.SIZE];
readFully(inputStream, jidBytes, 0, JobID.SIZE, "JobID");
JobID jobID = JobID.fromByteArray(jidBytes);
String key = readKey(buf, inputStream);
blobFile = this.blobServer.getStorageLocation(jobID, key);
if (!blobFile.exists()) {
blobStore.get(jobID, key, blobFile);
}
} else if (contentAddressable == CONTENT_ADDRESSABLE) {
final BlobKey key = BlobKey.readFromInputStream(inputStream);
blobFile = blobServer.getStorageLocation(key);
if (!blobFile.exists()) {
blobStore.get(key, blobFile);
}
} else {
throw new IOException("Unknown type of BLOB addressing.");
}
// Check if BLOB exists
if (!blobFile.exists()) {
throw new IOException("Cannot find required BLOB at " + blobFile.getAbsolutePath());
}
if (blobFile.length() > Integer.MAX_VALUE) {
throw new IOException("BLOB size exceeds the maximum size (2 GB).");
}
outputStream.write(RETURN_OKAY);
// up to here, an error can give a good message
} catch (Throwable t) {
LOG.error("GET operation failed", t);
try {
writeErrorToStream(outputStream, t);
} catch (IOException e) {
// since we are in an exception case, it means not much that we could not send the error
// ignore this
}
clientSocket.close();
return;
}
// from here on, we started sending data, so all we can do is close the connection when something happens
try {
int blobLen = (int) blobFile.length();
writeLength(blobLen, outputStream);
try (FileInputStream fis = new FileInputStream(blobFile)) {
int bytesRemaining = blobLen;
while (bytesRemaining > 0) {
int read = fis.read(buf);
if (read < 0) {
throw new IOException("Premature end of BLOB file stream for " + blobFile.getAbsolutePath());
}
outputStream.write(buf, 0, read);
bytesRemaining -= read;
}
}
} catch (SocketException e) {
// happens when the other side disconnects
LOG.debug("Socket connection closed", e);
} catch (Throwable t) {
LOG.error("GET operation failed", t);
clientSocket.close();
}
}
use of java.io.FileInputStream in project flink by apache.
the class BlobClientSslTest method testRegularStream.
/**
* Tests the PUT/GET operations for regular (non-content-addressable) streams.
*/
@Test
public void testRegularStream() {
final JobID jobID = JobID.generate();
final String key = "testkey3";
try {
final File testFile = File.createTempFile("testfile", ".dat");
testFile.deleteOnExit();
prepareTestFile(testFile);
BlobClient client = null;
InputStream is = null;
try {
final InetSocketAddress serverAddress = new InetSocketAddress("localhost", BLOB_SSL_SERVER.getPort());
client = new BlobClient(serverAddress, sslClientConfig);
// Store the data
is = new FileInputStream(testFile);
client.put(jobID, key, is);
is.close();
is = null;
// Retrieve the data
is = client.get(jobID, key);
validateGet(is, testFile);
} finally {
if (is != null) {
is.close();
}
if (client != null) {
client.close();
}
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
use of java.io.FileInputStream in project flink by apache.
the class BlobClientTest method validateGet.
/**
* Validates the result of a GET operation by comparing the data from the retrieved input stream to the content of
* the specified file.
*
* @param inputStream
* the input stream returned from the GET operation
* @param file
* the file to compare the input stream's data to
* @throws IOException
* thrown if an I/O error occurs while reading the input stream or the file
*/
private static void validateGet(final InputStream inputStream, final File file) throws IOException {
InputStream inputStream2 = null;
try {
inputStream2 = new FileInputStream(file);
while (true) {
final int r1 = inputStream.read();
final int r2 = inputStream2.read();
assertEquals(r2, r1);
if (r1 < 0) {
break;
}
}
} finally {
if (inputStream2 != null) {
inputStream2.close();
}
}
}
use of java.io.FileInputStream in project flink by apache.
the class BlobClientTest method testContentAddressableStream.
/**
* Tests the PUT/GET operations for content-addressable streams.
*/
@Test
public void testContentAddressableStream() {
BlobClient client = null;
InputStream is = null;
try {
File testFile = File.createTempFile("testfile", ".dat");
testFile.deleteOnExit();
BlobKey origKey = prepareTestFile(testFile);
InetSocketAddress serverAddress = new InetSocketAddress("localhost", BLOB_SERVER.getPort());
client = new BlobClient(serverAddress, blobServiceConfig);
// Store the data
is = new FileInputStream(testFile);
BlobKey receivedKey = client.put(is);
assertEquals(origKey, receivedKey);
is.close();
is = null;
// Retrieve the data
is = client.get(receivedKey);
validateGet(is, testFile);
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
} finally {
if (is != null) {
try {
is.close();
} catch (Throwable t) {
}
}
if (client != null) {
try {
client.close();
} catch (Throwable t) {
}
}
}
}
Aggregations