hadoop 2.8.5完全分散式環境搭建
zookeeper叢集文件:
namenode HA文件:
1.下載zookeeper
[root@node1 ~]# wget
[root@node1 ~]# tar xvf zookeeper-3.4.13.tar.gz -C /opt/
[root@node1 ~]# cd /opt/zookeeper-3.4.13/conf/
[root@node1 conf]# vim zoo.cfg
tickTime=2000 dataDir=/opt/zookeeper-3.4.13/data clientPort=2181 initLimit=5 syncLimit=2 server.1=node1:2888:3888 server.2=node2:2888:3888 server.3=node3:2888:3888
[root@node1 conf]# mkdir /opt/zookeeper-3.4.13/data
[root@node1 conf]# cd /opt/zookeeper-3.4.13/data --myid必須要在data目錄下面,否則會報錯
[root@node1 data]# cat myid
1
[root@node1 zookeeper-3.4.13]# cd ..
[root@node1 opt]# scp -r zookeeper-3.4.13 node2:/opt/
[root@node1 opt]# scp -r zookeeper-3.4.13 node3:/opt/
2.在node2修改myid檔案
[root@node2 opt]# cat /opt/zookeeper-3.4.13/data/myid
2
[root@node2 opt]#
3.在node3修改myid檔案
[root@node3 ~]# cat /opt/zookeeper-3.4.13/data/myid
3
[root@node3 ~]# zkServer.sh start --每個節點都要啟動zookeeper服務
ZooKeeper JMX enabled by default
Using config: /opt/zookeeper-3.4.13/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@node3 opt]# zkCli.sh --使用客戶端登陸
4.下載解壓hadoop安裝包(在各節點上安裝jdk和hadoop)
[root@node1 ~]# wget
[root@node1 ~]# wget
[root@node1 ~]# scp jdk-8u202-linux-x64.rpm node2:/root/ --將jdk傳到各節點上
[root@node1 ~]# rpm -ivh jdk-8u202-linux-x64.rpm --在各節點安裝jdk
[root@node1 ~]# tar xvf hadoop-2.8.5.tar.gz -C /opt/
[root@node1 ~]# cd /opt/hadoop-2.8.5/etc/hadoop/
[root@node1 hadoop]# vim hadoop-env.sh
export JAVA_HOME=/usr/java/jdk1.8.0_202-amd64/
[root@node1 hadoop]# cat slaves --配置datanode節點(datanode節點都需要)
node2 node3 node4
[root@node1 hadoop]#
5.配置環境變數
[root@node1 opt]# vim /etc/profile --其它node節點一樣新增
export JAVA_HOME=/usr/java/jdk1.8.0_202-amd64
export HADOOP_HOME=/opt/hadoop-2.8.5
export ZOOKEEPER_HOME=/opt/zookeeper-3.4.13
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$JAVA_HOME/bin:$ZOOKEEPER_HOME/bin
[root@node1 opt]# source /etc/profile
[root@node1 opt]# scp /etc/profile node2:/etc/
profile 100% 2037 1.4MB/s 00:00
[root@node1 opt]# scp /etc/profile node3:/etc/
profile 100% 2037 961.7KB/s 00:00
[root@node1 opt]#
5.使用zookeeper做namenode高可用
[root@node1 ~]# cd /opt/hadoop-2.8.5/etc/hadoop/
[root@node1 hadoop]# vim hdfs-site.xml
dfs.nameservices --指定叢集名mycluster dfs.ha.namenodes.mycluster --指定叢集兩個namenode成員nn1,nn2 dfs.namenode.rpc-address.mycluster.nn1 --nn1成員是node1node1:8020 dfs.namenode.rpc-address.mycluster.nn2 --nn2成員是node4node4:8020 dfs.namenode.http-address.mycluster.nn1 node1:50070 dfs.namenode.http-address.mycluster.nn2 node4:50070 dfs.namenode.shared.edits.dir qjournal://node2:8485;node3:8485;node4:8485/mycluster dfs.client.failover.proxy.provider.mycluster org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider dfs.ha.fencing.methods sshfence dfs.ha.fencing.ssh.private-key-files --各個節點透過/root/.ssh/id_rsa檔案信任關係/root/.ssh/id_rsa dfs.ha.automatic-failover.enabled --開啟HA功能true
[root@node1 hadoop]# vim core-site.xml
fs.defaultFS hdfs://mycluster ha.zookeeper.quorum node1:2181,node2:2181,node3:2181 --定義zookeeper
[root@node1 hadoop]# scp hdfs-site.xml core-site.xml node2:/opt/hadoop-2.8.5/etc/hadoop/
[root@node1 hadoop]# scp hdfs-site.xml core-site.xml node3:/opt/hadoop-2.8.5/etc/hadoop/
[root@node1 hadoop]# scp hdfs-site.xml core-site.xml node4:/opt/hadoop-2.8.5/etc/hadoop/
6.啟用journalnode(node1,2,3,4都要啟動)
[root@node2 hadoop]# hadoop-daemon.sh start journalnode
starting journalnode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-journalnode-node2.out
[root@node2 hadoop]#
7.格式化磁碟和啟動服務
[root@node1 hadoop]# hdfs namenode -format --在任意一個datanode就可以了
[root@node1 hadoop]# scp -r /tmp/hadoop-root/dfs node4:/tmp/hadoop-root/ --將格式化好後的資料複製到另一個namenode上(node4)
[root@node1 hadoop]# hdfs zkfc -formatZK --初使化zookeeper
[root@node1 hadoop]# start-dfs.sh --啟動服務
Starting namenodes on [node1 node4]
node1: starting namenode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-namenode-node1.out
node4: starting namenode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-namenode-node4.out
node4: starting datanode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-datanode-node4.out
node2: starting datanode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-datanode-node2.out
node3: starting datanode, logging to /opt/hadoop-2.8.5/logs/hadoop-root-datanode-node3.out
Starting journal nodes [node2 node3 node4]
node2: journalnode running as process 10429. Stop it first.
node4: journalnode running as process 9923. Stop it first.
node3: journalnode running as process 10198. Stop it first.
Starting ZK Failover Controllers on NN hosts [node1 node4]
node1: starting zkfc, logging to /opt/hadoop-2.8.5/logs/hadoop-root-zkfc-node1.out
node4: starting zkfc, logging to /opt/hadoop-2.8.5/logs/hadoop-root-zkfc-node4.out
[root@node1 hadoop]# jps
12728 QuorumPeerMain --zookeeper程式
3929 DataNode --datanode程式
15707 JournalNode --JournalNode程式
16907 DFSZKFailoverController
16556 NameNode
17471 Jps
[root@node1 hadoop]#
8.datanode程式
[root@node2 hadoop]# jps
11282 DataNode
11867 Jps
10429 JournalNode
8590 QuorumPeerMain
[root@node2 hadoop]#
9.使用web訪問
來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/25854343/viewspace-1453156/,如需轉載,請註明出處,否則將追究法律責任。
相關文章
- hadoop完全分散式環境搭建Hadoop分散式
- Hadoop環境搭建(二)分散式Hadoop分散式
- Hadoop hdfs完全分散式搭建教程Hadoop分散式
- hadoop之旅2-centerOS7: 搭建分散式hadoop環境HadoopROS分散式
- Hadoop入門(一)之Hadoop偽分散式環境搭建Hadoop分散式
- Hadoop框架:叢集模式下分散式環境搭建Hadoop框架模式分散式
- hadoop3.1.0 HDFS快速搭建偽分散式環境Hadoop分散式
- Hadoop 及Spark 分散式HA執行環境搭建HadoopSpark分散式
- Hadoop3.x完全分散式搭建(詳細)Hadoop分散式
- Hbase偽分散式環境搭建分散式
- Hadoop完全分散式叢集配置Hadoop分散式
- [Hadoop踩坑]叢集分散式環境配置Hadoop分散式
- Hadoop2.7.5環境搭建Hadoop
- Hadoop+hive環境搭建HadoopHive
- Zookeeper — 本地完全分散式 搭建分散式
- Hbase完全分散式的搭建分散式
- Hadoop 系列(四)—— Hadoop 開發環境搭建Hadoop開發環境
- centOS 7-Hadoop3.3.0完全分散式部署CentOSHadoop分散式
- Hadoop--HDFS完全分散式(簡單版)Hadoop分散式
- hadoop分散式叢集搭建Hadoop分散式
- 搭建本地執行Hadoop環境Hadoop
- Hadoop 基礎之搭建環境Hadoop
- 完全分散式模式hadoop叢集安裝與配置分散式模式Hadoop
- Hadoop3.0完全分散式叢集安裝部署Hadoop分散式
- Hadoop分散式叢集搭建_1Hadoop分散式
- 【Hadoop】:Windows下使用IDEA搭建Hadoop開發環境HadoopWindowsIdea開發環境
- mac搭建hadoop開發環境(二)MacHadoop開發環境
- 虛擬機器裝Hadoop叢集完全分散式虛擬機Hadoop分散式
- OAuth2.0分散式系統環境搭建OAuth分散式
- 史上最詳細的Hadoop環境搭建Hadoop
- VSCode+Maven+Hadoop開發環境搭建VSCodeMavenHadoop開發環境
- 分散式基礎&專案環境搭建_學習筆記分散式筆記
- hadoop之旅7-centerOS7 : Hive環境搭建HadoopROSHive
- hadoop之旅10-centerOS7 : Flume環境搭建HadoopROS
- hadoop之旅1-centerOS7: 搭建java環境HadoopROSJava
- Hadoop叢集之 ZooKeeper和Hbase環境搭建Hadoop
- 大資料之Hadoop偽分散式的搭建大資料Hadoop分散式
- Hadoop HDFS 3.3.1分散式儲存搭建Hadoop分散式
- hadoop叢集搭建——單節點(偽分散式)Hadoop分散式