Hadoop install:修订间差异

来自牛奶河Wiki
跳到导航 跳到搜索
无编辑摘要
无编辑摘要
第63行: 第63行:
   </property>
   </property>
   <property>
   <property>
        <!-- 保存临时文件目录 -->
          <!-- 保存临时文件目录 -->
     <name>hadoop.tmp.dir</name>
     <name>hadoop.tmp.dir</name>
     <value>/u01/hdfs/tmp</value>
     <value>/u01/hdfs/tmp</value>
第76行: 第76行:
   </property>
   </property>
   <property>
   <property>
        <!-- 第二节点地址 -->
          <!-- 第二节点地址 -->
     <name>dfs.namenode.secondary.http-address</name>
     <name>dfs.namenode.secondary.http-address</name>
     <value>g2-hdfs-02:9870</value>
     <value>g2-hdfs-02:9870</value>
第93行: 第93行:
   </property>
   </property>
   <property>
   <property>
        <!-- 配置false后,无需权限即可生成dfs上的文件 -->
          <!-- 配置false后,无需权限即可生成dfs上的文件 -->
     <name>dfs.permissions</name>
     <name>dfs.permissions</name>
     <value>false</value>
     <value>false</value>
第101行: 第101行:
  -- del
  -- del
   <property>
   <property>
        <!-- 备份数为默认值3 -->
          <!-- 备份数为默认值3 -->
     <name>dfs.replication</name>
     <name>dfs.replication</name>
     <value>3</value>
     <value>3</value>
第200行: 第200行:
  /opt/hadoop/sbin/stop-yarn.sh
  /opt/hadoop/sbin/stop-yarn.sh
   
   
# hdfs
  http://mc:9870  # hdfs
  http://10.10.139.25:50070/
  http://mc:8088  # yarn
# yarn
  http://10.10.139.25:50010/
   
   
  ## 单节点启停
  ## 单节点启停
  # /opt/hadoop/bin start|stop
  # /opt/hadoop/bin
  hdfs --daemon start datanode
  hdfs --daemon start datanode
  hdfs --daemon start namenode
  hdfs --daemon start namenode

2023年2月14日 (二) 23:26的版本

ENV

USER

groupadd hadoop -g 1001

useradd hdfs -g hadoop -u 1001

Java

/usr/bin/java -> /usr/java/jdk1.8.0_361/jre/bin/java

hadoop

ln -s /opt/hadoop-3.3.0 /opt/hadoop

profile

# Java, 20201010, Adam

export JAVA_HOME=/usr/java/jdk1.8.0_361

export PATH=$PATH:$JAVA_HOME/bin

# hadoop, 20201010, Adam

export HADOOP_HOME=/opt/hadoop

export PATH=$PATH:$HADOOP_HOME/bin

export PATH=$PATH:$HADOOP_HOME/sbin

export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop

Hadoop 配置

配置 Hadoop 环境脚本文件中的 JAVA_HOME 参数

# hadoop是守护线程 读取不到 /etc/profile 里面配置的JAVA_HOME路径

# /opt/hadoop/etc/hadoop/

# hadoop-env.sh, mapred-env.sh, yarn-env.sh

cp hadoop-env.sh hadoop-env.sh.20210409

cp mapred-env.sh mapred-env.sh.20210409

cp yarn-env.sh yarn-env.sh.20210409

echo '

# hdfs, 20210409, Adam

export JAVA_HOME=/usr/java/jdk1.8.0_361' >>

Setup

core-site.xml (Common组件)
<configuration>
  <property>
    <name>fs.defaultFS</name>
    <value>hdfs://g2-hdfs-01:9000</value>
  </property>
  <property>
    <name>io.file.buffer.size</name>
    <value>131072</value>
  </property>
  <property>
    <name>hadoop.tmp.dir</name>
    <value>/u01/hdfs/tmp</value>
  </property>
</configuration>
hdfs-site.xml (HDFS组件)
<configuration>
  <property>
    <name>dfs.namenode.http-address</name>
    <value>g2-hdfs-01:9870</value>
  </property>
  <property>
    <name>dfs.namenode.secondary.http-address</name>
    <value>g2-hdfs-02:9870</value>
  </property>
  <property>
    <name>dfs.namenode.name.dir</name>
    <value>file:/u01/hdfs/dfs/nn</value>
  </property>
  <property>
    <name>dfs.datanode.data.dir</name>
    <value>file:/u01/hdfs/dfs/dn</value>
  </property>
  <property> 
    <name>dfs.webhdfs.enabled</name> 
    <value>true</value> 
  </property>
  <property>
    <name>dfs.permissions</name>
    <value>false</value>
  </property>
</configuration>

-- del
  <property>
    <name>dfs.replication</name>
    <value>3</value>
  </property>
  <property>
    <name>dfs.blocksize</name>
    <value>268435456</value>
  </property>
  <property>
    <name>dfs.namenode.handler.count</name>
    <value>100</value>
  </property>
mapred-site.xml
<configuration>
  <property>
    <name>mapreduce.framework.name</name>
    <value>yarn</value> 
  </property>
</configuration>

-- del
  <property>
     <name>mapreduce.jobhistory.address</name>
     <value>g2-hdfs-01:10020</value>
  </property>
  <property>
     <name>mapreduce.jobhistory.webapp.address</name>
     <value>g2-hdfs-01:19888</value>
  </property>
  <property>
     <name>mapreduce.application.classpath</name>
     <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/ hadoop/mapreduce/lib/*</value>
  </property>
yarn-site.xml
<configuration>
  <property>
    <name>yarn.resourcemanager.hostname</name>
    <value>g2-hdfs-01</value>
  </property>
  <property>
    <name>yarn.nodemanager.aux-services</name>  
    <value>mapreduce_shuffle</value>
  </property>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>g2-hdfs-01:8088</value>
  </property>
  <property>
    <name>yarn.scheduler.maximum-allocation-mb</name>
    <value>32768</value>
  </property>
  <property>
    <name>yarn.nodemanager.vmem-check-enabled</name>
    <value>false</value>
  </property>
  <property>
    <name>yarn.nodemanager.env-whitelist</name>
    <value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPE ND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
  </property>
</configuration>
  <property>
    <name>yarn.resourcemanager.webapp.address</name>
    <value>hadoop01/192.168.44.5:8088</value>
    <description>配置外网只需要替换外网ip为真实ip,否则默认为 localhost:8088</description>
  </property>

yarn.resourcemanager.hostname
指定yarn的ResourceManager管理界面的地址,不配的话,Active Node始终为0
yarn.scheduler.maximum-allocation-mb
每个节点可用内存,单位MB,默认8182MB
yarn.nodemanager.aux-services
reducer获取数据的方式
yarn.nodemanager.vmem-check-enabled
false = 忽略虚拟内存的检查


# workers

g2-hdfs-01

g2-hdfs-02

..

INIT

# chown
chown -R hdfs:hadoop /opt/hadoop*
# nn
/opt/hadoop/bin/hdfs namenode -format

Start

## Start : root(发现需要用 hdfs 停服务,root不可停)
/opt/hadoop/sbin/start-dfs.sh
/opt/hadoop/sbin/start-yarn.sh

/opt/hadoop/sbin/stop-dfs.sh
/opt/hadoop/sbin/stop-yarn.sh

http://mc:9870   # hdfs
http://mc:8088   # yarn

## 单节点启停
# /opt/hadoop/bin
hdfs --daemon start datanode
hdfs --daemon start namenode

yarn --daemon start nodemanager
yarn --daemon stop resourcemanager