spark_hadoop集群搭建自动化脚本

#!/bin/bash
#############################################################
#############脚本使用说明####################################
#1、使用脚本前需要弄好服务器的基础环境#######################
#2、在hadoop的每个节点需要手动创建如下目录:/data/hdfs/tmp###
#3、修改下面的配置参数#######################################
#4、脚本执行完备后需要收到格式化namenode#####################
#5、格式化指令:hdfs namenode -format########################
#############################################################
#定义参数变量
#主节点域名
master_dns='slave-3'
#从节点1域名,2NN的配置地址
slave_1='slave-4'
#所有从节点信息
slaves=(slave-4 slave-5)
#java安装路径
java_home='/opt/java/jdk1.8.0_144'
#hadoop版本信息
hadoop_version='hadoop-2.8.1'
#hadoop数据存储路径
hadoop_data_path='/data/hdfs/tmp'
#hadoop安装包存储路径
hadoop_install_package='/opt/package/'$hadoop_version'.tar.gz'
#hdfs副本数量
dfs_replication='2'
#spark版本信息
spark_version='spark-3.0.2-bin-hadoop2.7'
#spark安装包存储路径
spark_install_package='/opt/package/'$spark_version'.tgz'
###########################################
##############安装hadoop###################
###########################################
echo $(date) 'info: 开始安装大数据基础环境...'
echo $(date) 'info: 校验java版本...'
java -version
echo $(date) 'info: 创建hadoop数据存储路径...'
mkdir -p $hadoop_data_path
echo $(date) 'info: 解压hadoop安装包...'
tar -zxvf $hadoop_install_package -C /opt
#配置hadoop环境变量
echo $(date) 'info: 配置hadoop环境变量..'
echo 'export HADOOP_HOME=/opt/'$hadoop_version>>/etc/profile
echo 'export PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin'>>/etc/profile
source /etc/profile
#配置hadoop中hadoop-env.sh配置
echo $(date) 'info: 配置hadoop...'
echo 'export JAVA_HOME='$java_home>>/opt/$hadoop_version/etc/hadoop/hadoop-env.sh
#配置hadoop中core-site.xml 指定hdfs老大
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<name>fs.defaultFS</name>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<value>hdfs://'$master_dns':9000</value>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
#指定hadoop运行时产生的文件存储目录
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<name>hadoop.tmp.dir</name>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<value>'$hadoop_data_path'</value>' /opt/$hadoop_version/etc/hadoop/core-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/core-site.xml
#配置hadoop中hdfs-site.xml 指定hdfs副本数量
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<name>dfs.replication</name>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<value>'$dfs_replication'</value>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
#指定2NN地址
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<name>dfs.namenode.secondary.http-address</name>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<value>'$slave_1':9001</value>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/hdfs-site.xml
#配置hadoop中mapred-site.xml
cp /opt/$hadoop_version/etc/hadoop/mapred-site.xml.template /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t</property>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<name>mapreduce.framework.name</name>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<value>yarn</value>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
sed -i '19a\\t<property>' /opt/$hadoop_version/etc/hadoop/mapred-site.xml
#配置yarn-site.xml 指定RM地址
sed -i '15a\\t</property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<name>yarn.resourcemanager.hostname</name>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<value>'$master_dns'</value>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
#指定MapReduce程序可以运行在yarn上
sed -i '15a\\t</property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<name>yarn.nodemanager.aux-services</name>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<value>mapreduce_shuffle</value>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
sed -i '15a\\t<property>' /opt/$hadoop_version/etc/hadoop/yarn-site.xml
#配置slaves
sed -i '1d' /opt/$hadoop_version/etc/hadoop/slaves
for slave in ${slaves[@]};
do
  echo $slave>>/opt/$hadoop_version/etc/hadoop/slaves
done
#发送配置好的hadoop到其余节点
if [[ ${
    
    #slaves[@]} -gt 1 ]]; then
    for slave in ${slaves[@]};
do
  scp -r /opt/$hadoop_version/ @$slave:/opt/
  scp -r /etc/profile @$slave:/etc/
done
fi
#刷新环境变量
source /etc/profile
echo $(date) 'info: 配置hadoop完成!!!'
###########################################
############安装spark######################
###########################################
echo $(date) 'info: 开始安装spark...'
tar -zxvf $spark_install_package -C /opt
echo $(date) 'info: 开始配置spark'
#配置spark环境变量
echo 'export SPARK_HOME=/opt/'$spark_version>>/etc/profile
echo 'export PATH=$PATH:$SPARK_HOME/bin:$SPARK_HOME/sbin'>>/etc/profile
#配置spark-env.sh
cp /opt/$spark_version/conf/spark-env.sh.template /opt/$spark_version/conf/spark-env.sh
echo 'export HADOOP_CONF_DIR=/opt/'$hadoop_version'/etc/hadoop'>>/opt/$spark_version/conf/spark-env.sh
echo 'export SPARK_MASTER_PORT=7077'>>/opt/$spark_version/conf/spark-env.sh
echo 'export SPARK_MASTER_IP='$master_dns>>/opt/$spark_version/conf/spark-env.sh
echo 'export JAVA_HOME='$java_home>>/opt/$spark_version/conf/spark-env.sh
#配置slaves
cp /opt/$spark_version/conf/slaves.template /opt/$spark_version/conf/slaves
sed -i '$d' /opt/$spark_version/conf/slaves
for slave in ${slaves[@]};
do
  echo $slave>>/opt/$spark_version/conf/slaves
done
#将spark下发其余节点
if [[ ${
    
    #slaves[@]} -gt 1 ]]; then
    for slave in ${slaves[@]};
do
  scp -r /opt/$spark_version/ @$slave:/opt/
  scp -r /etc/profile @$slave:/etc/
done
fi
echo $(date) 'info: 配置spark完成!!!'

猜你喜欢

转载自blog.csdn.net/weixin_52201738/article/details/130614387