Hadoop自動化安裝shell指令碼

五柳-先生發表於2015-04-12

之前寫過一些如何安裝Cloudera Hadoop的文章,安裝hadoop過程中,最開始是手動安裝apache版本的hadoop,其次是使用Intel的IDH管理介面安裝IDH的hadoop,再然後分別手動和通過cloudera manager安裝hadoop,也使用bigtop-util yum方式安裝過apache的hadoop。

安裝過程中參考了很多網上的文章,解壓縮過cloudera的cloudera-manager-installer.bin,發現並修復了IDH shell指令碼中關於puppt的自認為是bug的一個bug,最後整理出了一個自動安裝hadoop的shell指令碼,指令碼託管在github上面: hadoop-install

hadoop安裝文章

部落格中所有關於安裝hadoop的文章列出如下:

  1. 【筆記】Hadoop安裝部署

  2. 手動安裝Cloudera Hive CDH

  3. 手動安裝Cloudera HBase CDH

  4. 手動安裝Cloudera Hadoop CDH

  5. 安裝impala過程

  6. 從yum安裝Cloudera CDH叢集

  7. 通過Cloudera Manager安裝CDH

hadoop-install

hadoop-install上指令碼,all-in-one-install.sh是在一個節點上安裝hdfs、hive、yarn、zookeeper和hbase,編寫該指令碼是為了在本機(fedora19系統)上除錯mapreduce、hive和hbase;cluster-install.sh是在多個節點上安裝hadoop叢集,同樣目前完成了hdfs、hive、yarn、zookeeper和hbase的自動安裝。

指令碼片段

IDH安裝指令碼中有一些寫的比較好的shell程式碼片段,摘出如下,供大家學習。

檢測作業系統版本

( grep -i "CentOS" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=centos
( grep -i "Red[[:blank:]]*Hat[[:blank:]]*Enterprise[[:blank:]]*Linux" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=rhel
( grep -i "Oracle[[:blank:]]*Linux" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=oel
( grep -i "Asianux[[:blank:]]*Server" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=an
( grep -i "SUSE[[:blank:]]*Linux[[:blank:]]*Enterprise[[:blank:]]*Server" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=sles
( grep -i "Fedora" /etc/issue > /dev/null ) && OS_DISTRIBUTOR=fedora

major_revision=`grep -oP '\d+' /etc/issue | sed -n "1,1p"`
minor_revision=`grep -oP '\d+' /etc/issue | sed -n "2,2p"`
OS_RELEASE="$major_revision.$minor_revision"

修改root密碼

echo 'redhat'|passwd root --stdin

修改dns

# Set up nameservers.
# http://ithelpblog.com/os/linux/redhat/centos-redhat/howto-fix-couldnt-resolve-host-on-centos-redhat-rhel-fedora/
# http://stackoverflow.com/a/850731/1486325
echo "nameserver 8.8.8.8" | tee -a /etc/resolv.conf
echo "nameserver 8.8.4.4" | tee -a /etc/resolv.conf

修改作業系統時區

cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

修改hosts檔案

cat > /etc/hosts <<EOF
127.0.0.1       localhost

192.168.56.121 cdh1
192.168.56.122 cdh2
192.168.56.123 cdh3
EOF

去掉b檔案中包括a檔案的內容

grep -vf a b >result.log

修改file-max

echo -e "Global file limit ..."
rst=`grep "^fs.file-max" /etc/sysctl.conf`
if [ "x$rst" = "x" ] ; then
    echo "fs.file-max = 727680" >> /etc/sysctl.conf || exit $?
else
    sed -i "s:^fs.file-max.*:fs.file-max = 727680:g" /etc/sysctl.conf
fi

生成ssh公要

[ ! -d ~/.ssh ] && ( mkdir ~/.ssh ) && ( chmod 600 ~/.ssh )
yes|ssh-keygen -f ~/.ssh/id_rsa -t rsa -N "" && ( chmod 600 ~/.ssh/id_rsa.pub )

ssh設定無密碼登陸

set timeout 20

set host [lindex $argv 0]
set password [lindex $argv 1]
set pubkey [exec cat /root/.ssh/id_rsa.pub]
set localsh [exec cat ./config_ssh_local.sh]

#spawn ssh-copy-id -i /root/.ssh/id_rsa.pub root@$host
spawn ssh root@$host "
umask 022
mkdir -p  /root/.ssh
echo \'$pubkey\' > /root/.ssh/authorized_keys
echo \'$localsh\' >  /root/.ssh/config_ssh_local.sh
cd /root/.ssh/; sh config_ssh_local.sh
"
expect {
    timeout exit
    yes/no  {send "yes\r";exp_continue}
    assword {send "$password\r"}
}
expect eof
#interact

配置JAVA_HOME

### JAVA_HOME ###
if [ -f ~/.bashrc ] ; then
    sed -i '/^export[[:space:]]\{1,\}JAVA_HOME[[:space:]]\{0,\}=/d' ~/.bashrc
    sed -i '/^export[[:space:]]\{1,\}CLASSPATH[[:space:]]\{0,\}=/d' ~/.bashrc
    sed -i '/^export[[:space:]]\{1,\}PATH[[:space:]]\{0,\}=/d' ~/.bashrc
fi
echo "" >>~/.bashrc
echo "export JAVA_HOME=/usr/java/latest" >>~/.bashrc
echo "export CLASSPATH=.:\$JAVA_HOME/lib/tools.jar:\$JAVA_HOME/lib/dt.jar">>~/.bashrc
echo "export PATH=\$JAVA_HOME/bin:\$PATH" >> ~/.bashrc

alternatives --install /usr/bin/java java /usr/java/latest 5
alternatives --set java /usr/java/latest 
source ~/.bashrc

格式化叢集

su -s /bin/bash hdfs -c 'yes Y | hadoop namenode -format >> /tmp/format.log 2>&1'

建立hadoop目錄

su -s /bin/bash hdfs -c "hadoop fs -chmod a+rw /"
while read dir user group perm
do
     su -s /bin/bash hdfs -c "hadoop fs -mkdir -R $dir && hadoop fs -chmod -R $perm $dir && hadoop fs -chown -R $user:$group $dir"
     echo "."
done << EOF
/tmp hdfs hadoop 1777 
/tmp/hadoop-yarn mapred mapred 777
/var hdfs hadoop 755 
/var/log yarn mapred 1775 
/var/log/hadoop-yarn/apps yarn mapred 1777
/hbase hbase hadoop 755
/user hdfs hadoop 777
/user/history mapred hadoop 1777
/user/root root hadoop 777
/user/hive hive hadoop 777
EOF

hive中安裝並初始化postgresql

yum install postgresql-server postgresql-jdbc -y >/dev/null
chkconfig postgresql on
rm -rf /var/lib/pgsql/data
rm -rf /var/run/postgresql/.s.PGSQL.5432
service postgresql initdb

sed -i "s/max_connections = 100/max_connections = 600/" /var/lib/pgsql/data/postgresql.conf
sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/" /var/lib/pgsql/data/postgresql.conf
sed -i "s/shared_buffers = 32MB/shared_buffers = 256MB/" /var/lib/pgsql/data/postgresql.conf
sed -i "s/127.0.0.1\/32/0.0.0.0\/0/" /var/lib/pgsql/data/pg_hba.conf

sudo cat /var/lib/pgsql/data/postgresql.conf | grep -e listen -e standard_conforming_strings

rm -rf /usr/lib/hive/lib/postgresql-jdbc.jar
ln -s /usr/share/java/postgresql-jdbc.jar /usr/lib/hive/lib/postgresql-jdbc.jar

su -c "cd ; /usr/bin/pg_ctl start -w -m fast -D /var/lib/pgsql/data" postgres
su -c "cd ; /usr/bin/psql --command \"create user hiveuser with password 'redhat'; \" " postgres
su -c "cd ; /usr/bin/psql --command \"CREATE DATABASE metastore owner=hiveuser;\" " postgres
su -c "cd ; /usr/bin/psql --command \"GRANT ALL privileges ON DATABASE metastore TO hiveuser;\" " postgres
su -c "cd ; /usr/bin/psql -U hiveuser -d metastore -f /usr/lib/hive/scripts/metastore/upgrade/postgres/hive-schema-0.10.0.postgres.sql" postgres
su -c "cd ; /usr/bin/pg_ctl restart -w -m fast -D /var/lib/pgsql/data" postgres

總結

更多指令碼,請關注github:hadoop-install,你可以下載、使用並修改其中程式碼!

----EOF-----

轉載: http://blog.javachen.com/2013/08/02/hadoop-install-script/


相關文章