1、HDFS NameNode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh start namenode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh stop namenode
bin/hdfs haadmin -DFSHAAdmin -getServiceState n1
2、HDFS DataNode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh start datanode
/usr/local/fqlhadoop/hadoop/sbin/hadoop-daemon.sh stop datanode
bin/yarn rmadmin -getServiceState rm1
3、Yarn ResourceManager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start resourcemanager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop resourcemanager
4、Yarn NodeManager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start nodemanager
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop nodemanager
5、Yarn WebAppProxyServer
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh stop proxyserver
/usr/local/fqlhadoop/hadoop/sbin/yarn-daemon.sh start proxyserver
X、Yarn 运维命令
yarn application -list
yarn application -queue root.lx_fdw
yarn application -list -queue lx_fdw -appStates ACCEPTED
yarn application -list -queue lx_fdw -appStates ACCEPTED | grep thrift_XXX.hadoop.com_10017
yarn application -kill application_1652347896268_18628697
6、Hive
服务端启动
6.1 hive metastore
cd /usr/local/fqlhadoop/hive/
bin/hive --service metastore -p 3316 &
ps -ef | grep ‘org.apache.hadoop.hive.metastore.HiveMetaStore’ | grep -v grep | awk ‘{print $2}’ | xargs kill -9
6.2 hive service
bin/hive --service hiveserver2 &
关联的配置,vim conf/hive-site.xml
hive.server2.thrift.port、hive.server2.thrift.bind.host
客户端连接
连接指定 Hive metastore 服务端
hive --hiveconf hive.metastore.uris=thrift://XXX.hadoop.com:3316
7、Spark
spark-sql --master=yarn --queue lx_etl --driver-memory 1g --driver-java-options '-XX:MetaspaceSize=1g -XX:MaxMetaspaceSize=1g' --num-executors 1 --executor-memory 1g --executor-cores 1 --conf spark.yarn.am.memory=2048m --hiveconf hive.cli.print.header=false
## Spark HistoryServer
/usr/local/fqlhadoop/spark/sbin/start-history-server.sh
/usr/local/fqlhadoop/spark/sbin/start-history-server.sh hdfs://hacluster/sparklog
/usr/local/fqlhadoop/spark/sbin/stop-history-server.sh hdfs://hacluster/sparklog
进程名称:
org.apache.spark.deploy.history.HistoryServer
## Yarn JobHistoryServer
/usr/local/fqlhadoop/hadoop/sbin/mr-jobhistory-daemon.sh start historyserver
8、HBase
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh stop master
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh start master
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh stop regionserver
/usr/local/fqlhadoop/hbase/bin/hbase-daemon.sh start regionserver