新版hadoop3.1相较于老版hadoop2.8在配置文件这块有了变化。
这个是我早些时候配置的
在hadoop2.8中配置java路径和项目路径统一在/path/etc/hadoop/yarn-env.sh中。而hadoop3.1中则是在hadoop-env.sh中开启配置
原来slaves的节点配置改成works文件。
昨晚一不留神掉坑里了。这里附上配置。
java版本:1.8.221
用户hadoop
hadoop放置在/opt/目录下
core-site.xml配置
<configuration> <property> <name>fs.defaultFS</name> <value>hdfs://ec1069fcab62</value> </property> <property> <name>hadoop.tmp.dir</name> <value>file:/home/hadoop/tmp</value> </property> <property> <name>io.file.buffer.size</name> <value>131702</value> </property> <property> <name>hadoop.proxyuser.hadoop.hosts</name> <value>*</value> </property> <property> <name>hadoop.proxyuser.hadoop.groups</name> <value>*</value> </property> </configuration>
hadoop-env.sh配置
export JAVA_HOME=/usr/local/java/jdk1.8.0_221
export HADOOP_HOME=/opt/hadoop
export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
export HADOOP_HEAPSIZE_MAX=1024m
export HADOOP_HEAPSIZE_MIN=512m
# export HADOOP_JAAS_DEBUG=true
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
# export HADOOP_CLIENT_OPTS=""
# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
# export HADOOP_USER_CLASSPATH_FIRST="yes"
# export HADOOP_USE_CLIENT_CLASSLOADER=true
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
# export HADOOP_OPTIONAL_TOOLS="hadoop-aliyun,hadoop-aws,hadoop-azure-datalake,hadoop-azure,hadoop-kafka,hadoop-openstack"
# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o ConnectTimeout=10s"
# export HADOOP_SSH_PARALLEL=10
# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
export HADOOP_IDENT_STRING=$USER
# export HADOOP_STOP_TIMEOUT=5
export HADOOP_PID_DIR=/home/hadoop/tmp
export HADOOP_ROOT_LOGGER=INFO,console
export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
# export HADOOP_NICENESS=0
# export HADOOP_POLICYFILE="hadoop-policy.xml"
# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
# export JSVC_HOME=/usr/bin
#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
# export HADOOP_SECURE_IDENT_PRESERVE="true"
export HDFS_AUDIT_LOGGER=INFO,NullAppender
# a) Set JMX options
# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
#
# b) Set garbage collection logs
export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
#
# c) ... or set them directly
# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
# this is the default:
# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
# export HDFS_DATANODE_SECURE_USER=hdfs
# Supplemental options for secure datanodes
# By default, Hadoop uses jsvc which needs to know to launch a
# server jvm.
export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
# export HDFS_NFS3_OPTS=""
# export HDFS_PORTMAP_OPTS="-Xmx512m"
# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
# On privileged gateways, user to run the gateway as after dropping privileges
# This will replace the hadoop.id.str Java property in secure mode.
# export HDFS_NFS3_SECURE_USER=nfsserver
###
# export HDFS_ZKFC_OPTS=""
# export HDFS_JOURNALNODE_OPTS=""
# export HDFS_BALANCER_OPTS=""
# export HDFS_MOVER_OPTS=""
# export HDFS_DFSROUTER_OPTS=""
###
# export HADOOP_ENABLE_BUILD_PATHS="true"
export HDFS_NAMENODE_USER=hadoop
<configuration>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/hdfs/data</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>ec1069fcab62:9070</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>ec1069fcab62:9001</value>
</property>
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
</configuration>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>ec1069fcab62:10020</value>
</property>
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>ec1069fcab62:19888</value>
</property>
</configuration>
yarn-site.xml配置
<configuration> <!-- Site specific YARN configuration properties --> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value> </property> <property> <name>yarn.resourcemanager.hostname</name> <value>ec1069fcab62</value> </property> <property> <name>yarn.resourcemanager.address</name> <value>ec1069fcab62:8032</value> </property> <property> <name>yarn.resourcemanager.scheduler.address</name> <value>ec1069fcab62:8030</value> </property> <property> <name>yarn.resourcemanager.resource-tracker.address</name> <value>ec1069fcab62:8031</value> </property> <property> <name>yarn.resourcemanager.admin.address</name> <value>ec1069fcab62:8033</value> </property> <property> <name>yarn.resourcemanager.webapp.address</name> <value>ec1069fcab62:8088</value> </property> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>6078</value> </property> </configuration>
yarn-env.sh配置
export YARN_RESOURCE_MANAGER_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')" export YARN_REGISTRYDNS_SECURE_USER=yarn
本文作者:
艾瑞可erik
本文链接: https://erik.xyz/2019/09/10/hadoop2-8-and-3-1-change-version/
版权声明: 本作品采用 知识共享署名-非商业性使用-相同方式共享 4.0 国际许可协议 进行许可。转载请注明出处!
本文链接: https://erik.xyz/2019/09/10/hadoop2-8-and-3-1-change-version/
版权声明: 本作品采用 知识共享署名-非商业性使用-相同方式共享 4.0 国际许可协议 进行许可。转载请注明出处!