重要说明:一切安装时基于centos7
1.linux最小化安装
安装完成linux最小化base,关闭base,重新克隆一个完整的系统,并命名为hadoop40
2.配置主机,ip,hosts等文件
启动hadoop40,用root登录
==================================================
#配置静态ip
[root@base ~]#?vi?/etc/sysconfig/network-scripts/ifcfg-ens33
#默认属性
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=60a2315f-9632-42ad-9fe0-e74d628c79ad
DEVICE=ens33
#?修改属性
ONBOOT=yes
BOOTPROTO=static
#新增属性
#?静态ip
IPADDR=192.168.1.40
#?默认网关,在虚拟机网络已配好
GATEWAY=192.168.1.2
DNS1=114.114.114.114
DNS2=8.8.8.8
2.2
#配置主机名称
#修改主机配置文件
[root@base ~]# vi /etc/hostname
[root@base ~]# cat /etc/hostname
hadoop40
2.3#配置hosts(映射主机名和ip关系)
配置hadoop40-43为hadoop集群备用
[root@base ~]# vi /etc/hosts
[root@base ~]# cat /etc/hosts
127.0.0.1???localhost localhost.localdomain localhost4 localhost4.localdomain4
::1?????????localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.40 hadoop40
192.168.1.41 hadoop41
192.168.1.42 hadoop42
192.168.1.43 hadoop43
=========================================================
Windows上配置hosts主机
C:\Windows\System32\drivers\etc路径
3.安装必要的软件工具
yum install -y epel-release
yum install -y psmisc nc net-tools rsync vim lrzsz ntp libzstd openssl-static tree iotop git
epel-release:EPEL是由 Fedora 社区打造,为 RHEL 及衍生发行版如 CentOS、Scientific Linux 等提供高质量软件包的项目。装上了 EPEL之后,就相当于添加了一个第三方源。官方的rpm repository提供的rpm包也不够丰富,很多时候需要自己编译那太辛苦了,而EPEL恰恰可以解决这两方面的问题。
psmisc:??命令:pstree??fuser??killall
nc:功能强大的网络工具
net-tools: netstat ifconfig
rsync:远程数据同步工具
vim:编辑器
lrzsz:下载上传小文件的工具
ntp:Network Time Protocol,网络时间协议,用来使计算机时间同步化的一种协议
libzstd:
openssl-static:证书相关的工具
tree:树的结构展示
iotop:命令是一个用来监视磁盘I/O使用状况的top类工具。iotop具有与top相似的UI,其中包括PID、用户、I/O、进程等相关信息。Linux下的IO统计工具如iostat,nmon等大多数是只能统计到per设备的读写情况,如果你想知道每个进程是如何使用IO的就比较麻烦,使用iotop命令可以很方便的查看
git:
4.在linux上安装hadoop环境
? ? 4.0在linux上创建用户,并创建软件上传路径和安装路径,重启切换用户登录
# 创建用户,并设置密码
[root@base ~]# useradd myhadoop
[root@base ~]# passwd myhadoop
更改用户 myhadoop 的密码 。
新的 密码:
无效的密码: 密码包含用户名在某些地方
重新输入新的 密码:
passwd:所有的身份验证令牌已经成功更新。
=====================================================
#为用户赋予root权限
[root@hadoop40 myhadoop]# visudo
[root@hadoop40 myhadoop]# cat /etc/sudoers
........
root????ALL=(ALL)?????ALL
myhadoop??ALL=(ALL)????NOPASSWD:ALL
....
===================================================
#创建目录,并将所属组机所属主的权限交个myhadoop
[root@base ~]# cd /opt
[root@base opt]# ll
总用量 0
[root@base opt]# mkdir software
[root@base opt]# mkdir module
[root@base opt]# ll
总用量 0
drwxr-xr-x. 2 root root 6 11月 11 10:21 module
drwxr-xr-x. 2 root root 6 11月 11 10:21 software
[root@base opt]# chown myhadoop:myhadoop -R module/ software/
[root@base opt]# ll
总用量 0
drwxr-xr-x. 2 myhadoop myhadoop 6 11月 11 10:21 module
drwxr-xr-x. 2 myhadoop myhadoop 6 11月 11 10:21 software
[root@base opt]#
#关闭防火墙,并关闭自启动
[myhadoop@hadoop40 ~]$ sudo systemctl stop firewalld
[myhadoop@hadoop40 ~]$ sudo systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[myhadoop@hadoop40 ~]$ sudo systemctl is-enabled firewalld
disabled
[myhadoop@hadoop40 ~]$
#? 重启系统,并换用myhadoop登录
[root@base opt]#reboot
#使用myhadoop用户,重新登录
[myhadoop@hadoop40 ~]$ who
myhadoop pts/0????????2021-11-11 10:27 (192.168.1.9)
[myhadoop@hadoop40 ~]$
? ? 4.1安装jdk
#使用myhadoop用户登录xsftp上传jdk的安装包
[myhadoop@hadoop40 software]$ pwd
/opt/software
[myhadoop@hadoop40 software]$ ll
总用量 520600
-rw-rw-r--. 1 myhadoop myhadoop 338075860 11月 11 10:29 hadoop-3.1.3.tar.gz
-rw-rw-r--. 1 myhadoop myhadoop 195013152 11月 11 10:29 jdk-8u212-linux-x64.tar.gz
[myhadoop@hadoop40 software]$
===================================================================
#?解压jdk到module中
[myhadoop@hadoop40 software]$ tar -zxvf jdk-8u212-linux-x64.tar.gz -C /opt/module/
[myhadoop@hadoop40 jdk1.8.0_212]$ pwd
/opt/module/jdk1.8.0_212
==============================================================================
#配置jdk的环境变量
[myhadoop@hadoop40 jdk1.8.0_212]$ sudo vim /etc/profile.d/my_evn.sh
[myhadoop@hadoop40 jdk1.8.0_212]$ source /etc/profile.d/my_evn.sh
[myhadoop@hadoop40 jdk1.8.0_212]$ echo $PATH
/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/myhadoop/.local/bin:/home/myhadoop/bin:/opt/module/jdk1.8.0_212/bin
[myhadoop@hadoop40 jdk1.8.0_212]$ sudo cat /etc/profile.d/my_evn.sh
export JAVA_HOME=/opt/module/jdk1.8.0_212
export PATH=$PATH:$JAVA_HOME/bin
[myhadoop@hadoop40 jdk1.8.0_212]$
[myhadoop@hadoop40 jdk1.8.0_212]$ sudo vim /etc/profile.d/my_evn.sh
[myhadoop@hadoop40 jdk1.8.0_212]$ sudo cat /etc/profile.d/my_evn.sh
export JAVA_HOME=/opt/module/jdk1.8.0_212
#?刷新配置文件
[myhadoop@hadoop40 jdk1.8.0_212]$ source /etc/profile.d/my_evn.sh
-----------------------------------------------------
# 验证配置成功没
[myhadoop@hadoop40 jdk1.8.0_212]$ echo $PATH
/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/myhadoop/.local/bin:/home/myhadoop/bin:/opt/module/jdk1.8.0_212/bin
-------------------------------------------------------
export PATH=$PATH:$JAVA_HOME/bin
[myhadoop@hadoop40 jdk1.8.0_212]$
? ? 4.2安装hadoop
#?上传hadoop安装包
#?解压hadoop到module上
tar -zxvf hadoop-3.1.3.tar.gz -C /opt/module/
[myhadoop@hadoop40 hadoop-3.1.3]$ pwd
/opt/module/hadoop-3.1.3
==========================================
# 配置环境变量
[myhadoop@hadoop40 hadoop-3.1.3]$ sudo vim /etc/profile.d/my_evn.sh
[myhadoop@hadoop40 hadoop-3.1.3]$ sudo cat /etc/profile.d/my_evn.sh
#java
export JAVA_HOME=/opt/module/jdk1.8.0_212
export PATH=$PATH:$JAVA_HOME/bin
#hadoop
export HADOOP_HOME=/opt/module/hadoop-3.1.3
export PATH=$PATH:$HADOOP_HOME/bin
export PATH=$PATH:$HADOOP_HOME/sbin
[myhadoop@hadoop40 hadoop-3.1.3]$
============================================
#?刷新配置文件
[myhadoop@hadoop40 hadoop-3.1.3]$ source /etc/profile.d/my_evn.sh
#?验证结果
[myhadoop@hadoop40 hadoop-3.1.3]$ hadoop version
Hadoop 3.1.3
Source code repository https://gitbox.apache.org/repos/asf/hadoop.git -r ba631c436b806728f8ec2f54ab1e289526c90579
Compiled by ztang on 2019-09-12T02:47Z
Compiled with protoc 2.5.0
From source with checksum ec785077c385118ac91aadde5ec9799
This command was run using /opt/module/hadoop-3.1.3/share/hadoop/common/hadoop-common-3.1.3.jar
[myhadoop@hadoop40 hadoop-3.1.3]$
5.hadoop本地模式跑一下
#wordcount小案列
# cd?到用户myhadoop的根目录,创建input目录,创建aa.txt,并写入内容
[myhadoop@hadoop40 ~]$ cd?/home/myhadoop
[myhadoop@hadoop40 ~]$ pwd
/home/myhadoop
[myhadoop@hadoop40 ~]$ mkdir input
[myhadoop@hadoop40 ~]$ vim input/aa.txt
[myhadoop@hadoop40 ~]$ cat input/aa.txt
aa bb cc dd
ss
dd
ff ff
gg
aa
bb cc
================================
#?启动单词统计小案列
[myhadoop@hadoop40 ~]$ hadoop jar $HADOOP_HOME/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.3.jar wordcount input out
===============
#结果
[myhadoop@hadoop40 ~]$ ll
总用量 0
drwxrwxr-x. 2 myhadoop myhadoop 20 11月 11 10:58 input
drwxr-xr-x. 2 myhadoop myhadoop 88 11月 11 10:59 out
[myhadoop@hadoop40 ~]$ cd out/
[myhadoop@hadoop40 out]$ ll
总用量 4
-rw-r--r--. 1 myhadoop myhadoop 35 11月 11 10:59 part-r-00000
-rw-r--r--. 1 myhadoop myhadoop??0 11月 11 10:59 _SUCCESS
[myhadoop@hadoop40 out]$ cat part-r-00000
aa????2
bb????2
cc????2
dd????2
ff????2
gg????1
ss????1
[myhadoop@hadoop40 out]$
#?各个hadoop组件启动(不做演示)
6.hadoop分布式模式
hadoop规划
|
hadoop41
|
hadoop42
|
hadoop43
|
| nn? dn |
dn
|
2nn? dn
|
yarn
|
nd
|
nd rm
|
nd
|
?? ?6.1配置hadoop必要的配置文件5个
????????配置core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--指定HDFS中NameNode的地址 -->
????<property>
????????<name>fs.defaultFS</name>
????????<value>hdfs://hadoop41:8020</value>
</property>
<!-- 指定Hadoop运行时产生文件的存储目录 -->
????<property>
????????<name>hadoop.tmp.dir</name>
????????<value>/opt/module/hadoop-3.1.3/data</value>
</property>
<!--??通过web界面操作hdfs的权限 -->
<property>
????????<name>hadoop.http.staticuser.user</name>
????????<value>myhadoop</value>
</property>
<!-- 后面hive的兼容性配置??-->
????<property>
????????<name>hadoop.proxyuser.myhadoop.hosts</name>
????????<value>*</value>
????</property>
????<property>
????????<name>hadoop.proxyuser.myhadoop.groups</name>
????????<value>*</value>
</property>
</configduration>
#?具体命令
[myhadoop@hadoop40 hadoop]$ pwd
/opt/module/hadoop-3.1.3/etc/hadoop
--------------------------------------
[myhadoop@hadoop40 hadoop]$ vim core-site.xml
[myhadoop@hadoop40 hadoop]$ cat core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!--指定HDFS中NameNode的地址 -->
????<property>
????????<name>fs.defaultFS</name>
????????<value>hdfs://hadoop41:8020</value>
</property>
<!-- 指定Hadoop运行时产生文件的存储目录 -->
????<property>
????????<name>hadoop.tmp.dir</name>
????????<value>/opt/module/hadoop-3.1.3/data</value>
</property>
<!--??通过web界面操作hdfs的权限 -->
<property>
????????<name>hadoop.http.staticuser.user</name>
????????<value>myhadoop</value>
</property>
<!-- 后面hive的兼容性配置??-->
????<property>
????????<name>hadoop.proxyuser.myhadoop.hosts</name>
????????<value>*</value>
????</property>
????<property>
????????<name>hadoop.proxyuser.myhadoop.groups</name>
????????<value>*</value>
</property>
</configuration>
[myhadoop@hadoop40 hadoop]$
配置hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
????<property>
????????<name>dfs.namenode.secondary.http-address</name>
????????<value>hadoop43:9868</value>
????</property>
</configuration>
#?具体命令
[myhadoop@hadoop40 hadoop]$ vim hdfs-site.xml
[myhadoop@hadoop40 hadoop]$ cat hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
????<property>
????????<name>dfs.namenode.secondary.http-address</name>
????????<value>hadoop43:9868</value>
????</property>
</configuration>
[myhadoop@hadoop40 hadoop]$
配置mopred-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定MR运行在Yarn上 -->
????<property>
????????<name>mapreduce.framework.name</name>
????????<value>yarn</value>
????</property>
</configuration>
#?具体命令
[myhadoop@hadoop40 hadoop]$ vim mapred-site.xml
[myhadoop@hadoop40 hadoop]$ cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定MR运行在Yarn上 -->
????<property>
????????<name>mapreduce.framework.name</name>
????????<value>yarn</value>
????</property>
</configuration>
[myhadoop@hadoop40 hadoop]$
配置yarn-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!--??Reducer获取数据的方式-->
????<property>
????????<name>yarn.nodemanager.aux-services</name>
????????<value>mapreduce_shuffle</value>
</property>
<!--??指定YARN的ResourceManager的地址-->
????<property>
????????<name>yarn.resourcemanager.hostname</name>
????????<value>hadoop42</value>
</property>
<!-- 环境变量通过从NodeManagers的容器继承的环境属性,对于mapreduce应用程序,除了默认值 hadoop op_mapred_home应该被添加外。属性值 还有如下-->
????<property>
????????<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<!-- 解决Yarn在执行程序遇到超出虚拟内存限制,Container被kill??-->
????<property>
????????<name>yarn.nodemanager.pmem-check-enabled</name>
????????<value>false</value>
????</property>
????<property>
????????<name>yarn.nodemanager.vmem-check-enabled</name>
????????<value>false</value>
????</property>
<!-- 后面hive的兼容性配置??-->
????<property>
????????<name>yarn.scheduler.minimum-allocation-mb</name>
????????<value>512</value>
????</property>
????<property>
????????<name>yarn.scheduler.maximum-allocation-mb</name>
????????<value>4096</value>
????</property>
????<property>
????????<name>yarn.nodemanager.resource.memory-mb</name>
????????<value>4096</value>
</property>
</configuration>
#?具体命令
[myhadoop@hadoop40 hadoop]$ vim yarn-site.xml
[myhadoop@hadoop40 hadoop]$ cat yarn-site.xml
<?xml version="1.0"?>
<configuration>
<!--??Reducer获取数据的方式-->
????<property>
????????<name>yarn.nodemanager.aux-services</name>
????????<value>mapreduce_shuffle</value>
</property>
<!--??指定YARN的ResourceManager的地址-->
????<property>
????????<name>yarn.resourcemanager.hostname</name>
????????<value>hadoop42</value>
</property>
<!-- 环境变量通过从NodeManagers的容器继承的环境属性,对于mapreduce应用程序,除了默认值 hadoop op_mapred_home应该被添加外。属性值 还有如下-->
????<property>
????????<name>yarn.nodemanager.env-whitelist</name>
??<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<!-- 解决Yarn在执行程序遇到超出虚拟内存限制,Container被kill??-->
????<property>
????????<name>yarn.nodemanager.pmem-check-enabled</name>
????????<value>false</value>
????</property>
????<property>
????????<name>yarn.nodemanager.vmem-check-enabled</name>
????????<value>false</value>
????</property>
<!-- 后面hive的兼容性配置??-->
????<property>
????????<name>yarn.scheduler.minimum-allocation-mb</name>
????????<value>512</value>
????</property>
????<property>
????????<name>yarn.scheduler.maximum-allocation-mb</name>
????????<value>4096</value>
????</property>
????<property>
????????<name>yarn.nodemanager.resource.memory-mb</name>
????????<value>4096</value>
? ?</property>
</configuration>
[myhadoop@hadoop40 hadoop]$
配置wokers
hadoop41
hadoop42
hadoop43
#?具体命令
#这里的内容最好手写,怕复制是多复制了空格或者其他不显示的字符
[myhadoop@hadoop40 hadoop]$ vim workers
[myhadoop@hadoop40 hadoop]$ cat workers
hadoop41
hadoop42
hadoop43
[myhadoop@hadoop40 hadoop]$
?? ?6.2.配置ssh免密通讯? ?
#?关闭hadoop40主机,再克隆主机hadoop41,hadoop42,hadoop43
??????#分别启动hadoop41,hadoop42,hadoop43,
#?修改主机名,修改静态ip
==================================
vim /etc/hostname
[myhadoop@hadoop41 ~]$ hostname
hadoop41
[myhadoop@hadoop41 ~]$
*****************************************************
vim /etc/sysconfig/network-scripts/ifc
[myhadoop@hadoop41 ~]$ sudo cat /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
PROXY_METHOD=none
BROWSER_ONLY=no
BOOTPROTO=static
DEFROUTE=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=60a2315f-9632-42ad-9fe0-e74d628c79ad
DEVICE=ens33
ONBOOT=yes
IPADDR=192.168.1.41
GATEWAY=192.168.1.2
DNS1=114.114.114.114
DNS2=8.8.8.8
[myhadoop@hadoop41 ~]$
#重启主机,配置ssh
#?配置ssh,(三次enter键,不输入任何东西)
========================
[myhadoop@hadoop41 ~]$ ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/myhadoop/.ssh/id_rsa):
Created directory '/home/myhadoop/.ssh'.
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/myhadoop/.ssh/id_rsa.
Your public key has been saved in /home/myhadoop/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:h+TNs1/44FPIcmswi1gCbb1uSKMc2KQHFb1BVepIm78 myhadoop@hadoop41
The key's randomart image is:
+---[RSA 2048]----+
|????.+.....??????|
|????. o??.???????|
|???. ..+o????????|
|??. o.+B.+???????|
|???* o+ S.=. .???|
|??o + +.o.+o+..??|
|???o + B...*oo.??|
|????o o +..o++???|
|???????.E??.o..??|
+----[SHA256]-----+
********************************************************?复值公钥到对应的主机上
[myhadoop@hadoop41 ~]$ ssh-copy-id hadoop41
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/home/myhadoop/.ssh/id_rsa.pub"
The authenticity of host 'hadoop41 (192.168.1.41)' can't be established.
ECDSA key fingerprint is SHA256:qXwGkWeUQTyCeB/11yxU7S1O97QjnNQYIuOsZIrRREI.
ECDSA key fingerprint is MD5:cb:37:5a:78:4e:b3:dd:53:1d:fb:46:5f:28:97:8a:0b.
Are you sure you want to continue connecting (yes/no)? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
myhadoop@hadoop41's password:
Number of key(s) added: 1
Now try logging into the machine, with:???"ssh 'hadoop41'"
and check to make sure that only the key(s) you wanted were added.
[myhadoop@hadoop41 ~]$
=================================================
#查看结果
[myhadoop@hadoop43 ~]$ ssh hadoop2
ssh: Could not resolve hostname hadoop2: Name or service not known
[myhadoop@hadoop43 ~]$ ssh hadoop42
Last login: Thu Nov 11 14:06:24 2021 from hadoop41
[myhadoop@hadoop42 ~]$ ssh hadoop41
Last failed login: Thu Nov 11 14:08:10 CST 2021 from hadoop42 on ssh:notty
There was 1 failed login attempt since the last successful login.
Last login: Thu Nov 11 13:57:01 2021 from 192.168.1.9
[myhadoop@hadoop41 ~]$ ssh hadoop43
Last login: Thu Nov 11 14:08:47 2021 from hadoop42
[myhadoop@hadoop43 ~]$ ssh hadoop41
Last login: Thu Nov 11 14:10:32 2021 from hadoop42
[myhadoop@hadoop41 ~]$
? ? 6.3编写脚本小工具rsync,jps
? ? #编写清除hadoop数据的工具
[myhadoop@hadoop41 mybin]$ cat clear.sh
#!/bin/bash
#清除hadoop产生的数据
set -u
set -e
for host in hadoop41 hadoop42 hadoop43
do
????echo "========================== $host ======================"
????ssh $host rm -rf $HADOOP_HOME/data $HADOOP_HOME/logs
????ssh $host sudo rm -rf /tmp/*?????
done
[myhadoop@hadoop41 mybin]$
#编写同步分发文件的工具
[myhadoop@hadoop41 mybin]$ cat myrsync
#!/bin/bash
set -u
set -e
# 判断参数的个数
if [ $# -lt 1 ];then
????echo "Not Enough Arguement !"
????exit
fi
#遍历集群中的主机
for host in hadoop41 hadoop42 hadoop43
do
????echo "======================= $host ========================"
????for file in $@
????do????????????
????????if [ -e $file ];then
????????????pdir=$(cd -P $(dirname $file);pwd)
????????????fname=$(basename $file)
????????????ssh $host "mkdir -p $pdir"
????????????rsync -av $pdir/$fname $host:$pdir
????????else
????????????echo "$file does not exists!"
????done
done
[myhadoop@hadoop41 mybin]$
#编写查看启动服务的工具
[myhadoop@hadoop41 mybin]$ cat jpsall
#!/bin/bash
set -e
set -u
# 获取主机的上执行的任务
for host in hadoop41 hadoop42 hadoop43
do
????echo "======================= $host ==============================="
????ssh $host jps
done
[myhadoop@hadoop41 mybin]$
? ? 6.4启动集群
??
??
# 格式化namenode(第一次启动集群需要格式化代码)
[myhadoop@hadoop41 mybin]$ hdfs namenode -format
****************************************************************
#启动namenode
[myhadoop@hadoop41 mybin]$ start-dfs.sh
Starting namenodes on [hadoop41]
Starting datanodes
hadoop42: WARNING: /opt/module/hadoop-3.1.3/logs does not exist. Creating.
hadoop43: WARNING: /opt/module/hadoop-3.1.3/logs does not exist. Creating.
Starting secondary namenodes [hadoop43]
[myhadoop@hadoop41 mybin]$
****************************************************************
#启动resoucemanager
[myhadoop@hadoop41 mybin]$ start-yarn.sh
Starting resourcemanager
Starting nodemanagers
[myhadoop@hadoop41 mybin]$
#验证结果
[myhadoop@hadoop41 mybin]$ jpsall
======================= hadoop41 ===============================
4551 DataNode
4392 NameNode
5000 Jps
4857 NodeManager
======================= hadoop42 ===============================
3361 NodeManager
3555 Jps
2853 DataNode
3045 ResourceManager
======================= hadoop43 ===============================
3104 Jps
2963 NodeManager
2756 DataNode
2873 SecondaryNameNode
[myhadoop@hadoop41 mybin]$
6.5浏览器验证,fds-du*.js的脚本61行有错误需要修改一下
#浏览器验证地址
#hdfs数据存储(namenode,datanode)
http://hadoop41:9870/dfshealth.html#tab-overview
#执行任务(yarn)
http://hadoop42:8088/cluster
#辅助节点日志
http://hadoop43:9868/status.html
#?日志服务器地址
http://hadoop41:19888/jobhistory?
#hadoop3.1.3版本有点问题
/opt/module/hadoop-3.1.3/share/hadoop/hdfs/webapps/static/dfs-dust.js
vim dfs-dust.js
查看dfs-dust.js的第61行
???'date_tostring' : function (v) {
??????return moment(Number(v)).format('ddd MMM DD HH:mm:ss ZZ YYYY');
????},
并修改函数返回值如下:
'date_tostring' : function (v) {
??return new Date(Number(v)).toLocaleString();
},
**************************************************************
#修改完后同步分发一下
[myhadoop@hadoop41 share]$ myrsync hadoop/
======================= hadoop41 ========================
sending incremental file list
sent 16,133 bytes??received 88 bytes??10,814.00 bytes/sec
total size is 372,262,087??speedup is 22,949.39
======================= hadoop42 ========================
sending incremental file list
hadoop/hdfs/webapps/static/
hadoop/hdfs/webapps/static/dfs-dust.js
sent 16,882 bytes??received 146 bytes??34,056.00 bytes/sec
total size is 372,262,087??speedup is 21,861.76
======================= hadoop43 ========================
sending incremental file list
hadoop/hdfs/webapps/static/
hadoop/hdfs/webapps/static/dfs-dust.js
sent 16,882 bytes??received 146 bytes??34,056.00 bytes/sec
total size is 372,262,087??speedup is 21,861.76
[myhadoop@hadoop41 share]$
? ? #执行小案列
在浏览器hadoop41上创建input目录并上传aa.txt
=========================================aa.txt内容
aa
dd
ds
ff ff
dd
gg asf
aa
aa dd
========================================
#在hadoop41后台上执行命令
[myhadoop@hadoop41 share]$ hadoop jar hadoop/mapreduce/hadoop-mapreduce-examples-3.1.3.jar wordcount /input /out
运行结果==================================
aa????3
asf????1
dd????3
ds????1
ff????2
gg????1
下图
?? ?6.6配置历史服务器,配置日志汇聚
#?日志服务器
#?mapred-site.xml配置如下内容
<!-- 历史服务器端地址 -->
<property>
????<name>mapreduce.jobhistory.address</name>
????<value>hadoop41:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
????<name>mapreduce.jobhistory.webapp.address</name>
????<value>hadoop41:19888</value>
</property>
#查看配置的内容
[myhadoop@hadoop41 hadoop]$ vim mapred-site.xml
[myhadoop@hadoop41 hadoop]$ cat mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<!-- 指定MR运行在Yarn上 -->
????<property>
????????<name>mapreduce.framework.name</name>
????????<value>yarn</value>
????</property>
<!-- 历史服务器端地址 -->
<property>
????<name>mapreduce.jobhistory.address</name>
????<value>hadoop41:10020</value>
</property>
<!-- 历史服务器web端地址 -->
<property>
????<name>mapreduce.jobhistory.webapp.address</name>
????<value>hadoop41:19888</value>
</property>
</configuration>
[myhadoop@hadoop41 hadoop]$ d
-----------------------------------------------
#分发到其他集群主机上
[myhadoop@hadoop41 hadoop]$ myrsync ./
======================= hadoop41 ========================
sending incremental file list
sent 865 bytes??received 13 bytes??1,756.00 bytes/sec
total size is 108,315??speedup is 123.37
======================= hadoop42 ========================
sending incremental file list
./
mapred-site.xml
sent 1,400 bytes??received 51 bytes??2,902.00 bytes/sec
total size is 108,315??speedup is 74.65
======================= hadoop43 ========================
sending incremental file list
./
mapred-site.xml
sent 1,400 bytes??received 51 bytes??2,902.00 bytes/sec
total size is 108,315??speedup is 74.65
[myhadoop@hadoop41 hadoop]$
#启动历史服务器
[myhadoop@hadoop41 hadoop]$ mapred --daemon start historyserver
[myhadoop@hadoop41 hadoop]$ jpsall
======================= hadoop41 ===============================
5504 JobHistoryServer
4551 DataNode
4392 NameNode
4857 NodeManager
5577 Jps
======================= hadoop42 ===============================
3361 NodeManager
3985 Jps
2853 DataNode
3045 ResourceManager
======================= hadoop43 ===============================
3442 Jps
2963 NodeManager
2756 DataNode
2873 SecondaryNameNode
[myhadoop@hadoop41 hadoop]$
#?开启日志汇聚
# yarn-site.xml中新增如下内容
?
<!-- 开启日志聚集??-->
<property>
????<name>yarn.log-aggregation-enable</name>
????<value>true</value>
</property>
<!-- 访问路径-->
<property>
????<name>yarn.log.server.url</name>
????<value>http://hadoop41:19888/jobhistory/logs</value>
</property>
<!-- 保存的时间7天 -->
<property>
????<name>yarn.log-aggregation.retain-seconds</name>
????<value>604800</value>
</property>
#?分发到其他主机上
[myhadoop@hadoop41 hadoop]$ myrsync ./
======================= hadoop41 ========================
sending incremental file list
sent 866 bytes??received 13 bytes??1,758.00 bytes/sec
total size is 108,653??speedup is 123.61
======================= hadoop42 ========================
sending incremental file list
./
yarn-site.xml
sent 1,921 bytes??received 57 bytes??1,318.67 bytes/sec
total size is 108,653??speedup is 54.93
======================= hadoop43 ========================
sending incremental file list
./
yarn-site.xml
sent 1,921 bytes??received 57 bytes??3,956.00 bytes/sec
total size is 108,653??speedup is 54.93
[myhadoop@hadoop41 hadoop]$
-----------------------------------------------------------------
#验证结果:
#重启集群,重新跑Wordcount案例,获得日志,点击日志任务编号,在浏览器上查看日志
|