use of org.apache.hadoop.security.AccessControlException in project incubator-crail by apache.
the class CrailHDFS method getFileBlockLocations.
@Override
public BlockLocation[] getFileBlockLocations(Path path, long start, long len) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
try {
CrailBlockLocation[] _locations = dfs.lookup(path.toUri().getRawPath()).get().asFile().getBlockLocations(start, len);
BlockLocation[] locations = new BlockLocation[_locations.length];
for (int i = 0; i < locations.length; i++) {
locations[i] = new BlockLocation();
locations[i].setOffset(_locations[i].getOffset());
locations[i].setLength(_locations[i].getLength());
locations[i].setNames(_locations[i].getNames());
locations[i].setHosts(_locations[i].getHosts());
locations[i].setTopologyPaths(_locations[i].getTopology());
}
return locations;
} catch (Exception e) {
throw new IOException(e);
}
}
use of org.apache.hadoop.security.AccessControlException in project testcases by coheigea.
the class HDFSAccessControlEnforcerTest method customPermissionsTest.
@org.junit.Test
public void customPermissionsTest() throws Exception {
FileSystem fileSystem = hdfsCluster.getFileSystem();
// Write a file - the AccessControlEnforcer won't be invoked as we are the "superuser"
final Path file = new Path("/tmp/tmpdir/data-file2");
FSDataOutputStream out = fileSystem.create(file);
for (int i = 0; i < 1024; ++i) {
out.write(("data" + i + "\n").getBytes("UTF-8"));
out.flush();
}
out.close();
// Now try to read the file as "bob" - this should be allowed
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Read the file
FSDataInputStream in = fs.open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream();
IOUtils.copy(in, output);
String content = new String(output.toByteArray());
Assert.assertTrue(content.startsWith("data0"));
fs.close();
return null;
}
});
// Now try to read the file as "eve" - this should not be allowed
ugi = UserGroupInformation.createRemoteUser("eve");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Read the file
try {
fs.open(file);
Assert.fail("Failure expected on an incorrect permission");
} catch (AccessControlException ex) {
// expected
}
fs.close();
return null;
}
});
// Write to the file as the owner, this should be allowed
out = fileSystem.append(file);
out.write(("new data\n").getBytes("UTF-8"));
out.flush();
out.close();
// Now try to write to the file as "bob" - this should not be allowed
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Write to the file
try {
fs.append(file);
Assert.fail("Failure expected on an incorrect permission");
} catch (AccessControlException ex) {
// expected
}
fs.close();
return null;
}
});
}
use of org.apache.hadoop.security.AccessControlException in project testcases by coheigea.
the class HDFSTest method testChangedPermissionsTest.
@org.junit.Test
public void testChangedPermissionsTest() throws Exception {
FileSystem fileSystem = hdfsCluster.getFileSystem();
// Write a file
final Path file = new Path("/tmp/tmpdir/data-file3");
FSDataOutputStream out = fileSystem.create(file);
for (int i = 0; i < 1024; ++i) {
out.write(("data" + i + "\n").getBytes("UTF-8"));
out.flush();
}
out.close();
// Change permissions to read-only
fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
// Now try to read the file as "bob" - this should fail
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Read the file
try {
FSDataInputStream in = fs.open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream();
IOUtils.copy(in, output);
Assert.fail("Failure expected on an incorrect permission");
} catch (AccessControlException ex) {
// expected
}
fs.close();
return null;
}
});
}
use of org.apache.hadoop.security.AccessControlException in project testcases by coheigea.
the class HDFSTest method defaultPermissionsTest.
@org.junit.Test
public void defaultPermissionsTest() throws Exception {
FileSystem fileSystem = hdfsCluster.getFileSystem();
// Write a file
final Path file = new Path("/tmp/tmpdir/data-file2");
FSDataOutputStream out = fileSystem.create(file);
for (int i = 0; i < 1024; ++i) {
out.write(("data" + i + "\n").getBytes("UTF-8"));
out.flush();
}
out.close();
// Check status
// FileStatus status = fileSystem.getFileStatus(file);
// System.out.println("OWNER: " + status.getOwner());
// System.out.println("GROUP: " + status.getGroup());
// System.out.println("PERM: " + status.getPermission().toString());
// fileSystem.setPermission(file, new FsPermission(FsAction.READ, FsAction.NONE, FsAction.NONE));
// fileSystem.setOwner(file, "bob", null);
// Now try to read the file as "bob" - this should be allowed
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("bob");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Read the file
FSDataInputStream in = fs.open(file);
ByteArrayOutputStream output = new ByteArrayOutputStream();
IOUtils.copy(in, output);
String content = new String(output.toByteArray());
Assert.assertTrue(content.startsWith("data0"));
fs.close();
return null;
}
});
// Write to the file as the owner, this should be allowed
out = fileSystem.append(file);
out.write(("new data\n").getBytes("UTF-8"));
out.flush();
out.close();
// Now try to write to the file as "bob" - this should not be allowed
ugi.doAs(new PrivilegedExceptionAction<Void>() {
public Void run() throws Exception {
Configuration conf = new Configuration();
conf.set("fs.defaultFS", defaultFs);
FileSystem fs = FileSystem.get(conf);
// Write to the file
try {
fs.append(file);
Assert.fail("Failure expected on an incorrect permission");
} catch (AccessControlException ex) {
// expected
}
fs.close();
return null;
}
});
}
use of org.apache.hadoop.security.AccessControlException in project presto by prestodb.
the class DeltaPageSourceProvider method createParquetPageSource.
private static ConnectorPageSource createParquetPageSource(HdfsEnvironment hdfsEnvironment, String user, Configuration configuration, Path path, long start, long length, long fileSize, List<DeltaColumnHandle> columns, SchemaTableName tableName, DataSize maxReadBlockSize, boolean batchReaderEnabled, boolean verificationEnabled, TypeManager typeManager, TupleDomain<DeltaColumnHandle> effectivePredicate, FileFormatDataSourceStats stats, boolean columnIndexFilterEnabled) {
AggregatedMemoryContext systemMemoryContext = newSimpleAggregatedMemoryContext();
ParquetDataSource dataSource = null;
try {
FSDataInputStream inputStream = hdfsEnvironment.getFileSystem(user, path, configuration).open(path);
dataSource = buildHdfsParquetDataSource(inputStream, path, stats);
ParquetMetadata parquetMetadata = MetadataReader.readFooter(dataSource, fileSize).getParquetMetadata();
FileMetaData fileMetaData = parquetMetadata.getFileMetaData();
MessageType fileSchema = fileMetaData.getSchema();
Optional<MessageType> message = columns.stream().filter(column -> column.getColumnType() == REGULAR || isPushedDownSubfield(column)).map(column -> getColumnType(typeManager.getType(column.getDataType()), fileSchema, column, tableName, path)).filter(Optional::isPresent).map(Optional::get).map(type -> new MessageType(fileSchema.getName(), type)).reduce(MessageType::union);
MessageType requestedSchema = message.orElse(new MessageType(fileSchema.getName(), ImmutableList.of()));
ImmutableList.Builder<BlockMetaData> footerBlocks = ImmutableList.builder();
for (BlockMetaData block : parquetMetadata.getBlocks()) {
long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
if (firstDataPage >= start && firstDataPage < start + length) {
footerBlocks.add(block);
}
}
Map<List<String>, RichColumnDescriptor> descriptorsByPath = getDescriptors(fileSchema, requestedSchema);
TupleDomain<ColumnDescriptor> parquetTupleDomain = getParquetTupleDomain(descriptorsByPath, effectivePredicate);
Predicate parquetPredicate = buildPredicate(requestedSchema, parquetTupleDomain, descriptorsByPath);
final ParquetDataSource finalDataSource = dataSource;
ImmutableList.Builder<BlockMetaData> blocks = ImmutableList.builder();
List<ColumnIndexStore> blockIndexStores = new ArrayList<>();
for (BlockMetaData block : footerBlocks.build()) {
Optional<ColumnIndexStore> columnIndexStore = ColumnIndexFilterUtils.getColumnIndexStore(parquetPredicate, finalDataSource, block, descriptorsByPath, columnIndexFilterEnabled);
if (predicateMatches(parquetPredicate, block, finalDataSource, descriptorsByPath, parquetTupleDomain, columnIndexStore, columnIndexFilterEnabled)) {
blocks.add(block);
blockIndexStores.add(columnIndexStore.orElse(null));
}
}
MessageColumnIO messageColumnIO = getColumnIO(fileSchema, requestedSchema);
ParquetReader parquetReader = new ParquetReader(messageColumnIO, blocks.build(), dataSource, systemMemoryContext, maxReadBlockSize, batchReaderEnabled, verificationEnabled, parquetPredicate, blockIndexStores, columnIndexFilterEnabled);
ImmutableList.Builder<String> namesBuilder = ImmutableList.builder();
ImmutableList.Builder<Type> typesBuilder = ImmutableList.builder();
ImmutableList.Builder<Optional<Field>> fieldsBuilder = ImmutableList.builder();
for (DeltaColumnHandle column : columns) {
checkArgument(column.getColumnType() == REGULAR || column.getColumnType() == SUBFIELD, "column type must be regular or subfield column");
String name = column.getName();
Type type = typeManager.getType(column.getDataType());
namesBuilder.add(name);
typesBuilder.add(type);
if (isPushedDownSubfield(column)) {
Subfield pushedDownSubfield = getPushedDownSubfield(column);
List<String> nestedColumnPath = nestedColumnPath(pushedDownSubfield);
Optional<ColumnIO> columnIO = findNestedColumnIO(lookupColumnByName(messageColumnIO, pushedDownSubfield.getRootName()), nestedColumnPath);
if (columnIO.isPresent()) {
fieldsBuilder.add(constructField(type, columnIO.get()));
} else {
fieldsBuilder.add(Optional.empty());
}
} else if (getParquetType(type, fileSchema, column, tableName, path).isPresent()) {
fieldsBuilder.add(constructField(type, lookupColumnByName(messageColumnIO, name)));
} else {
fieldsBuilder.add(Optional.empty());
}
}
return new ParquetPageSource(parquetReader, typesBuilder.build(), fieldsBuilder.build(), namesBuilder.build(), new RuntimeStats());
} catch (Exception exception) {
try {
if (dataSource != null) {
dataSource.close();
}
} catch (IOException ignored) {
}
if (exception instanceof PrestoException) {
throw (PrestoException) exception;
}
if (exception instanceof ParquetCorruptionException) {
throw new PrestoException(DELTA_BAD_DATA, exception);
}
if (exception instanceof AccessControlException) {
throw new PrestoException(PERMISSION_DENIED, exception.getMessage(), exception);
}
if (nullToEmpty(exception.getMessage()).trim().equals("Filesystem closed") || exception instanceof FileNotFoundException) {
throw new PrestoException(DELTA_CANNOT_OPEN_SPLIT, exception);
}
String message = format("Error opening Hive split %s (offset=%s, length=%s): %s", path, start, length, exception.getMessage());
if (exception.getClass().getSimpleName().equals("BlockMissingException")) {
throw new PrestoException(DELTA_MISSING_DATA, message, exception);
}
throw new PrestoException(DELTA_CANNOT_OPEN_SPLIT, message, exception);
}
}
Aggregations