hadoop自定義許可權

weixin_34198881發表於2018-09-19

#1.測試類
想執行hadoop的測試類,我們必須先編譯hadoop

mac下編譯hadoop-2.7.4

然後檢視測試類

org.apache.hadoop.hdfs.server.namenode.TestINodeAttributeProvider
  • 1

然後直接執行testDelegationToProvider這個測試類方法,就可以執行,並且在

@Override
      public void checkPermission(String fsOwner, String supergroup,
          UserGroupInformation ugi, INodeAttributes[] inodeAttrs,
          INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
          int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
          FsAction parentAccess, FsAction access, FsAction subAccess,
          boolean ignoreEmptyDir) throws AccessControlException {
          
          System.out.println("=========="+fsOwner);
        System.out.println("=========="+supergroup);
        System.out.println("=========="+ugi);
        System.out.println("=========="+inodeAttrs);
        System.out.println("=========="+inodes);
        System.out.println("=========="+pathByNameArr);
        System.out.println("=========="+snapshotId);
        System.out.println("=========="+path);
        System.out.println("=========="+ancestorIndex);
        System.out.println("=========="+doCheckOwner);
        System.out.println("=========="+ancestorAccess);
        System.out.println("=========="+parentAccess);
        System.out.println("=========="+access);
        System.out.println("=========="+subAccess);
        System.out.println("=========="+ignoreEmptyDir);
}

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25

方法中列印出我們需要的內容

/**
         fsOwner = lcc
         supergroup = supergroup
         ugi = u1 (auth:SIMPLE)
         inodeAttrs = [Lorg.apache.hadoop.hdfs.server.namenode.INodeAttributes;@6a93a149
         inodes = [Lorg.apache.hadoop.hdfs.server.namenode.INode;@31d3da26
         pathByNameArr = [[B@35308ff8
         snapshotId = 2147483646
         path = /tmp/foo
         ancestorIndex = 1
         doCheckOwner = false
         ancestorAccess = null
         parentAccess = null
         access = READ_EXECUTE
         subAccess = null
         ignoreEmptyDir = false
         */
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17

但是注意看方法

 @Before
  public void setUp() throws IOException {
    CALLED.clear();
    Configuration conf = new HdfsConfiguration();
    String name = MegrezHdfsAuthorizer.class.getName();
    conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
        MyAuthorizationProvider.class.getName());
//            name);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
    miniDFS = new MiniDFSCluster.Builder(conf).build();
  }
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12

這個方法設定了兩個重要的屬性

 public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = "dfs.namenode.inode.attributes.provider.class";
   public static final String  DFS_NAMENODE_ACLS_ENABLED_KEY = "dfs.namenode.acls.enabled";
  • 1
  • 2

2.不修改原始碼,實現自定義許可權

新建一個maven專案,在/Users/lcc/IdeaProjects/hadoop-hdfs/src/main/java/目錄下新建一個包package org.apache.hadoop.hdfs.server.namenode;(這個包位置很重要,一定要是和org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider這個類一樣的目錄),然後新建類MyHdfsAuthorizer.java
書寫程式碼

package org.apache.hadoop.hdfs.server.namenode;


import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.security.UserGroupInformation;

import java.security.AccessControlException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;


/**
 * HDFS 鑑權具體的實現
 *
 * @author hulb
 * @date 2018/7/11 下午3:17
 * <p>
 * cp hdfs-auth/target/hdfs-auth-spark2.0.jar  /Users/hulb/opt/third/hadoop/share/hadoop/hdfs/lib/
 * <p>
 * 實現思路:
 * 還需要加一個許可權列表的快取。
 */
public class MyHdfsAuthorizer extends INodeAttributeProvider {

    private static final Log LOG = LogFactory.getLog(MegrezHdfsAuthorizer.class);


    @Override
    public void start() {
        System.out.println("MegrezHdfsAuthorizer 啟動");
    }

    @Override
    public void stop() {
        System.out.println("MegrezHdfsAuthorizer 停止");
    }

    /**
     * 必須重寫的方法
     * @param deafultEnforcer
     * @return
     */
    @Override
    public AccessControlEnforcer getExternalAccessControlEnforcer(
            AccessControlEnforcer deafultEnforcer) {
        return new MegrezAccessControlEnforcer();
    }


    /**
     * cc@lcc ~$ hdfs dfs -ls /user/hive /user/lcc
     *
     * @param pathElements 這裡的fullPath是我們操作Hdfs時候的全路徑
     *                 比如:        hdfs dfs -ls /
     *                 那麼全路徑是
     *                 /
     *                 /hadoop
     *                 /ranger
     *                 比如:        hdfs dfs -ls /
     *                 那麼全路徑是
     *                 /hadoop/
     *                 /hadoop/tempdata
     * @param inode    路徑:/hadoop/tempdata
     *                 getGroupName            : supergroup
     *                 getUserName             : lcc
     *                 getAccessTime           : 0
     *                 getAclFeature           : null
     *                 getFsPermission         : rwxr-xr-x
     *                 getFsPermissionShort    : 493
     *                 getLocalNameBytes       : [B@50af74e1
     *                 getModificationTime     : 1530179365668
     *                 getXAttrFeature         : null
     *                 isDirectory             : true
     *                 toString                : tempdata
     * @return
     */

    @Override
    public INodeAttributes getAttributes(String[] pathElements,
                                          INodeAttributes inode) {
//        for(int i=0;i<pathElements.length;i++){
//            System.out.println(pathElements[i].toString());
//        }
        return inode;
    }



    public  class MegrezAccessControlEnforcer implements AccessControlEnforcer {


        private ConcurrentMap<String, UserAction> allPass =
                new ConcurrentHashMap<String, UserAction>();

		/**
	     * 必須重寫的方法
	     * 改方法會被系統自動呼叫
	     * /
        @Override
        public void checkPermission(String fsOwner, String supergroup,
                                    UserGroupInformation ugi, INodeAttributes[] inodeAttrs,
                                    INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
        

然後修改$HADOOP_HOME/etc/hadoop/hdfs-site.xml

<configuration>

        <property>
                 <name>dfs.replication</name>
                 <value>1</value>
        </property>
        <property>
                <name>dfs.permissions</name>
                <value>false</value>
        </property>
        <property>
                <name>www.fengshen157.com/ dfs.permissions</name>
                <value>true</value>
        </property>

        <property>
                <name>dfs.permissions.umask-mode</name>
                <value>077</value>
         </property>
        <property>
                <name>dfs.permissions.enabled</name>
                <value>true</value>
         </property>
        <property>
                <name>dfs.namenode.acls.enabled</name>
                <value>true</value>
lcc@lcc ~$ cp ~/IdeaProjects/spark-authorizer/hadoop_hdfs/target/hdfs-auth-spark2.0.jar /Users/lcc/soft/hadoop/hadoop/share/hadoop/hdfs/lib/
  • 1

然後重啟叢集

lcc@lcc hadoop$ sbin/start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
18/07/16 15:44:59 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
Starting namenodes on [lcc]
lcc: starting namenode, logging to /Users/lcc/soft/hadoop/hadoop/logs/hadoop-lcc-namenode-lcc.out
localhost: starting datanode, logging to /Users/lcc/soft/hadoop/hadoop/logs/hadoop-lcc-datanode-lcc.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /Users/lcc/soft/hadoop/hadoop/logs/hadoop-lcc-secondarynamenode-lcc.out
18/07/16 15:45:17 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
starting yarn daemons
starting resourcemanager, logging to /Users/lcc/soft/hadoop/hadoop/logs/yarn-lcc-resourcemanager-lcc.out
localhost: starting nodemanager,www.yongshiyule178.com logging to /Users/lcc/soft/hadoop/hadoop/logs/yarn-lcc-nodemanager-lcc.out
lcc@lcc hadoop$

然後測試

lcc@lcc ~$ hdfs dfs -mkdir  /lcc6
18/07/16 14:27:36 WARN util.NativeCodeLoader:www.taohuayuan178.com/ Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
  • 1
  • 2

然後檢視日誌

lcc@lcc ~$ tail -100 /Users/lcc/soft/hadoop/hadoop/logs/hadoop-lcc-namenode-lcc.out
MegrezHdfsAuthorizer 啟動
呼叫了本地重寫的getAttributes方法
呼叫了本地重寫的getAttributes方法
呼叫了本地重寫的getAttributes方法
呼叫了本地重寫的getAttributes方法
ax memory size         (kbytes, -m) unlimited
open files                      (-n) 256
pipe size            (512 bytes, -p) www.leyouzaixan.cn 1
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 709
virtual memory          (kbytes, -v) unlimited

然後他媽的尷尬了,這裡竟然每呼叫到內部類checkPermission方法

@Override
        public void checkPermission(String fsOwner, String supergroup,
                                    UserGroupInformation ugi, INodeAttributes[] inodeAttrs,
                                    INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
                                    int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
                                    FsAction parentAccess, FsAction access, FsAction subAccess,
                                    boolean ignoreEmptyDir) throws AccessControlException {

這個問題已經解決,原來是使用命令不會呼叫,實用程式呼叫就可以了
比如

hdfs dfs -mkdir /lcc12
  • 1

用程式呼叫就可以了


    <dependencies>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>3.8.1</version>
            <scope>test</scope>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.7.4</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.4</version>
        </dependency>

        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.4</version>
        </dependency>

    </dependencies>
package com.lcc.hadoop.test;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.net.URI;

public class MyMkdir {
    public static void main(String[] args)throws Exception{
        FileSystem fs = FileSystem.get(new URI("hdfs://lcc:9000"),new Configuration(),"root");
        //測試建立一個資料夾,在HDFS上建立一個leitao資料夾,原根目錄下使沒有這個檔案的
        boolean flag = fs.mkdirs(new Path("/leitao3"));

這是一個大坑,具體為撒不一樣,可能是命令和FileSystem內部實現是不一樣的

相關文章