【Hadoop】HBase元件配置

王智剛發表於2022-04-16

HBase實驗步驟:

需要在Hadoop-全分散式配置的基礎上進行配置

1、配置時間同步(所有節點)

[root@master ~]# yum -y install chrony
[root@master ~]# vi /etc/chrony.conf
server 0.time1.aliyun.com iburst
#儲存
[root@master ~]# systemctl restart chronyd
[root@master ~]# systemctl enable chronyd
Created symlink from /etc/systemd/system/multi-user.target.wants/chronyd.service to /usr/lib/systemd/system/chronyd.service.
[root@master ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enabled)
   Active: active (running) since Fri 2022-04-15 15:39:55 CST; 23s ago
 Main PID: 1900 (chronyd)
   CGroup: /system.slice/chronyd.service
           └─1900 /usr/sbin/chronyd

#看到running則表示成功

2、部署HBase(master節點)

先使用xftp上傳hbase軟體包至/opt/software

# 解壓
[root@master ~]# tar xf /opt/software/hbase-1.2.1-bin.tar.gz -C /usr/local/src/
[root@master ~]# cd /usr/local/src/
[root@master src]# mv hbase-1.2.1 hbase
[root@master src]# ls
hadoop  hbase  hive  jdk

# 配置hbase環境變數
[root@master src]# vi /etc/profile.d/hbase.sh
export HBASE_HOME=/usr/local/src/hbase
export PATH=${HBASE_HOME}/bin:$PATH
#儲存
[root@master src]# source /etc/profile.d/hbase.sh
[root@master src]# echo $PATH
/usr/local/src/hbase/bin:/usr/local/src/jdk/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/usr/local/src/hive/bin:/root/bin

#看到環境變數中有hbase的路徑則表示成功

3、配置HBase(master節點)

# 配置HBase
[root@master src]# cd /usr/local/src/hbase/conf/
[root@master conf]# vi hbase-env.sh
export JAVA_HOME=/usr/local/src/jdk
export HBASE_MANAGES_ZK=true
export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop/
#儲存
[root@master conf]# vi hbase-site.xml
<configuration>
        <property>
                <name>hbase.rootdir</name>
                <value>hdfs://master:9000/hbase</value>
        </property>
        <property>
                <name>hbase.master.info.port</name>
                <value>60010</value>
        </property>
        <property>
                <name>hbase.zookeeper.property.clientPort</name>
                <value>2181</value>
        </property>
        <property>
                <name>zookeeper.session.timeout</name>
                <value>10000</value>
        </property>
        <property>
                <name>hbase.zookeeper.quorum</name>
                <value>master,slave1,slave2</value>
        </property>
        <property>
                <name>hbase.tmp.dir</name>
                <value>/usr/local/src/hbase/tmp</value>
                </property>
        <property>
                <name>hbase.cluster.distributed</name>
                <value>true</value>
        </property>
</configuration>
#儲存
[root@master conf]# vi regionservers
192.168.100.20
192.168.100.30
#儲存
[root@master conf]# mkdir -p /usr/local/src/hbase/tmp

4、拷貝檔案到slave節點

# master節點
[root@master conf]# scp -r /usr/local/src/hbase slave1:/usr/local/src/
[root@master conf]# scp -r /usr/local/src/hbase slave2:/usr/local/src/
[root@master conf]# scp /etc/profile.d/hbase.sh slave1:/etc/profile.d/
[root@master conf]# scp /etc/profile.d/hbase.sh slave2:/etc/profile.d/

5、修改許可權,切換使用者(所有節點)

# master節點
[root@master conf]# chown -R hadoop.hadoop /usr/local/src
[root@master conf]# ll /usr/local/src/
total 0
drwxr-xr-x. 12 hadoop hadoop 183 Apr  9 09:57 hadoop
drwxr-xr-x   8 hadoop hadoop 171 Apr 15 15:59 hbase
drwxr-xr-x. 11 hadoop hadoop 215 Apr  9 10:40 hive
drwxr-xr-x.  8 hadoop hadoop 255 Sep 14  2017 jdk
[root@master conf]# su - hadoop

# slave1節點
[root@slave1 ~]# chown -R hadoop.hadoop /usr/local/src
[root@slave1 ~]# ll /usr/local/src/
total 0
drwxr-xr-x. 12 hadoop hadoop 183 Apr  9 09:59 hadoop
drwxr-xr-x   8 hadoop hadoop 171 Apr 15 16:19 hbase
drwxr-xr-x.  8 hadoop hadoop 255 Apr  8 17:24 jdk
[root@slave1 ~]# su - hadoop

# slave2節點
[root@slave2 ~]# ll /usr/local/src/
總用量 0
drwxr-xr-x. 12 hadoop hadoop 183 4月   9 09:59 hadoop
drwxr-xr-x   8 hadoop hadoop 171 4月  15 16:19 hbase
drwxr-xr-x.  8 hadoop hadoop 255 4月   8 17:24 jdk
[root@slave2 ~]# su - hadoop

6、啟動hadoop(master節點)

#在master上啟動分散式hadoop叢集
[hadoop@master ~]$ start-all.sh
[hadoop@master ~]$ jps
3210 Jps
2571 NameNode
2780 SecondaryNameNode
2943 ResourceManager

# 檢視slave1節點
[hadoop@slave1 ~]$ jps
2512 DataNode
2756 Jps
2623 NodeManager

# 檢視slave2節點
[hadoop@slave2 ~]$ jps
3379 Jps
3239 NodeManager
3135 DataNode

#確保master上有NameNode、SecondaryNameNode、 ResourceManager程式, slave節點上要有DataNode、NodeManager程式

7、啟動hbase(master節點)

[hadoop@master ~]$ start-hbase.sh
[hadoop@master ~]$ jps
3569 HMaster
2571 NameNode
2780 SecondaryNameNode
3692 Jps
2943 ResourceManager
3471 HQuorumPeer

# 檢視slave1節點
[hadoop@slave1 ~]$ jps
2512 DataNode
2818 HQuorumPeer
2933 HRegionServer
3094 Jps
2623 NodeManager

# 檢視slave2節點
[hadoop@slave2 ~]$ jps
3239 NodeManager
3705 Jps
3546 HRegionServer
3437 HQuorumPeer
3135 DataNode

#確保master上有HQuorumPeer、HMaster程式,slave節點上要有HQuorumPeer、HRegionServer程式

8、檢視瀏覽器頁面

在windows主機上執行:
在C:\windows\system32\drivers\etc\下面把hosts檔案拖到桌面上,然後編輯它加入master的主機名與IP地址的對映關係,在瀏覽器上輸入http://master:60010訪問hbase的web介面

192.168.100.10  master  master.example.com
192.168.100.20  slave1  slave1.example.com
192.168.100.30  slave2  slave2.example.com

9、hbase語法應用(master節點)

[hadoop@master ~]$ hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help<RETURN>' for list of supported commands.
Type "exit<RETURN>" to leave the HBase Shell
Version 1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016

# 建立一張名為scores的表,表內有兩個列簇
hbase(main):001:0> create 'scores','grade','course'
0 row(s) in 1.3950 seconds

=> Hbase::Table - scores

# 檢視hbase狀態
hbase(main):002:0> status
1 active master, 0 backup masters, 2 servers, 0 dead, 1.5000 average load

# 檢視資料庫版本
hbase(main):003:0> version
1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016

# 檢視錶
hbase(main):004:0> list
TABLE                                                                                            
scores                                                                                           
1 row(s) in 0.0150 seconds

=> ["scores"]

# 插入記錄
hbase(main):005:0> put 'scores','jie','grade:','146cloud'
0 row(s) in 0.1000 seconds

hbase(main):006:0> put 'scores','jie','course:math','86'
0 row(s) in 0.0160 seconds

hbase(main):007:0> put 'scores','jie','course:cloud','92'
0 row(s) in 0.0120 seconds

hbase(main):008:0> put 'scores','shi','grade:','133soft'
0 row(s) in 0.0120 seconds

hbase(main):009:0> put 'scores','shi','course:math','87'
0 row(s) in 0.0080 seconds

hbase(main):010:0> put 'scores','shi','course:cloud','96'
0 row(s) in 0.0080 seconds

# 讀取的記錄
hbase(main):011:0> get 'scores','jie'
COLUMN                    CELL                                                                   
 course:cloud             timestamp=1650090459825, value=92                                      
 course:math              timestamp=1650090453152, value=86                                      
 grade:                   timestamp=1650090446128, value=146cloud                                
3 row(s) in 0.0190 seconds

hbase(main):012:0> get 'scores','jie','grade'
COLUMN                    CELL                                                                   
 grade:                   timestamp=1650090446128, value=146cloud                                
1 row(s) in 0.0080 seconds

# 檢視整個表記錄
hbase(main):013:0> scan 'scores'
ROW                       COLUMN+CELL                                                            
 jie                      column=course:cloud, timestamp=1650090459825, value=92                 
 jie                      column=course:math, timestamp=1650090453152, value=86                  
 jie                      column=grade:, timestamp=1650090446128, value=146cloud                 
 shi                      column=course:cloud, timestamp=1650090479946, value=96                 
 shi                      column=course:math, timestamp=1650090475684, value=87                  
 shi                      column=grade:, timestamp=1650090464698, value=133soft                  
2 row(s) in 0.0200 seconds

# 按例檢視錶記錄
hbase(main):014:0> scan 'scores',{COLUMNS=>'course'}
ROW                       COLUMN+CELL                                                            
 jie                      column=course:cloud, timestamp=1650090459825, value=92                 
 jie                      column=course:math, timestamp=1650090453152, value=86                  
 shi                      column=course:cloud, timestamp=1650090479946, value=96                 
 shi                      column=course:math, timestamp=1650090475684, value=87                  
2 row(s) in 0.0140 seconds

# 刪除指定記錄
hbase(main):015:0> delete 'scores','shi','grade'
0 row(s) in 0.0190 seconds

# 增加新的名為age的列簇
hbase(main):016:0> alter 'scores',NAME=>'age'
Updating all regions with the new schema...
1/1 regions updated.
Done.
0 row(s) in 1.9080 seconds

# 檢視錶結構
hbase(main):017:0> describe 'scores'
Table scores is ENABLED                                                                          
scores                                                                                           
COLUMN FAMILIES DESCRIPTION                                                                      
{NAME => 'age', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS =
> 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS =
> '0', BLOCKCACHE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                     
{NAME => 'course', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELL
S => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSION
S => '0', BLOCKCACHE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                  
{NAME => 'grade', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS
 => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS
 => '0', BLOCKCACHE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}                   
3 row(s) in 0.0230 seconds

# 刪除名為age的列簇
hbase(main):018:0> alter 'scores',NAME=>'age',METHOD=>'delete'
Updating all regions with the new schema...
1/1 regions updated.
Done.
0 row(s) in 1.8940 seconds

# 刪除表
hbase(main):019:0> disable 'scores'
0 row(s) in 2.2400 seconds

# 退出hbase
hbase(main):020:0> drop 'scores'
0 row(s) in 1.2450 seconds

hbase(main):021:0> list
TABLE                                                                                            
0 row(s) in 0.0040 seconds

=> []

# 退出hbase
hbase(main):022:0> quit

10、關閉hbase(master節點)

# 關閉hbase
[hadoop@master ~]$ stop-hbase.sh
stopping hbase...............

[hadoop@master ~]$ jps
44952 NameNode
45306 ResourceManager
46988 Jps
45150 SecondaryNameNode

# 關閉hadoop
[hadoop@master ~]$ stop-all.sh
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
…………
[hadoop@master ~]$ jps
47438 Jps

宣告:未經許可,不得轉載
原文地址:https://www.cnblogs.com/wzgwzg/p/16152890.html

相關文章