一、解压
[root@master app]# tar -zxvf hadoop-2.7.7.tar.gz -C /usr/local/src/
[root@master src]# mv hadoop-2.7.7/ hadoop
[root@master src]# ls
hadoop jdk
[root@master src]#
进去
[root@master src]# mv hadoop-2.7.7 hadoop
[root@master src]# ls
hadoop jdk
[root@master src]# cd hadoop/
[root@master hadoop]# ls
bin etc include lib libexec LICENSE.txt NOTICE.txt README.txt sbin share
[root@master hadoop]#
二、修改配置
1、hadoop-env.sh
[root@master hadoop]# vi hadoop-env.sh
# The java implementation to use.
export JAVA_HOME=/usr/local/src/jdk
export HADOOP_CONF_DIR=/usr/local/src/hadoop-2.9.2/etc/hadoop
[root@master hadoop]# source hadoop-env.sh
2、vi core-site.xml
[root@master hadoop]# vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/usr/local/src/hadoop/tmp</value>
</property>
</configuration>
3、vi hdfs-site.xml
[root@master hadoop]# vi hdfs-site.xml
<property>
<name>dfs.namenode.name.dir</name>
<value>/usr/local/src/hadoop/tmp/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/usr/local/src/hadoop/tmp/disk1,/usr/local/src/hadoop/tmp/disk2</value>
</property>
4、vi yarn-site.xml
[root@master hadoop]# vi yarn-site.xml
<property>
<name>yarn.resourcemanager.hostname</name>
<value>master</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
5、mv mapred-site.xml.template mapred-site.xml
[root@master hadoop]# mv mapred-site.xml.template mapred-site.xml
[root@master hadoop]# vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
6、vi slaves
[root@master hadoop]# vi slaves
master
slave1
slave2
三、修改环境变量
[root@master hadoop]# vi /etc/profile
JAVA_HOME=/usr/local/src/jdk
HADOOP_HOME=/usr/local/src/hadoop
PATH=$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
export PATH JAVA_HOME HADOOP_HOME
[root@master hadoop]# source /etc/profile
四、格式化及启动
[root@master namenode]# hdfs namenode -format
[root@master namenode]# start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-root-namenode-master.out
master: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-root-datanode-master.out
slave1: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-root-datanode-slave1.out
slave2: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-root-datanode-slave2.out
Starting secondary namenodes [0.0.0.0]
The authenticity of host '0.0.0.0 (0.0.0.0)' can't be established.
ECDSA key fingerprint is SHA256:jPI2KsJoefk2Mg7GiPu/gjOpdSzPjieI9FPlmir8uHM.
ECDSA key fingerprint is MD5:81:ad:2e:74:f9:c0:de:8b:a6:d9:f4:79:cd:78:8f:6f.
Are you sure you want to continue connecting (yes/no)? yes
0.0.0.0: Warning: Permanently added '0.0.0.0' (ECDSA) to the list of known hosts.
0.0.0.0: starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-root-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-root-resourcemanager-master.out
slave2: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-root-nodemanager-slave2.out
slave1: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-root-nodemanager-slave1.out
master: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-root-nodemanager-master.out
[root@master namenode]# jps
3330 DataNode
3794 NodeManager
3509 SecondaryNameNode
3685 ResourceManager
4103 Jps
3197 NameNode
[root@master namenode]#
[root@slave1 hadoop]# jps
3243 Jps
3005 DataNode
3119 NodeManager
[root@slave1 hadoop]#
[root@slave2 hadoop]# jps
3256 Jps
3017 DataNode
3131 NodeManager
[root@slave2 hadoop]#