Professional Documents
Culture Documents
/etc/hosts
192.168.17.128 master
192.168.17.129 client1
192.168.17.130 client2
client1
-----------------------------------------------
# ssh-keygen -t rsa
# ssh-copy-id -i ~/.ssh/id_rsa.pub hduser@master
# ssh-copy-id -i ~/.ssh/id_rsa.pub hduser@client2
client2
-----------------------------------------------
# ssh-keygen -t rsa
# ssh-copy-id -i ~/.ssh/id_rsa.pub hduser@master
# ssh-copy-id -i ~/.ssh/id_rsa.pub hduser@client1
$ su
# vi .bashrc
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
export HADOOP_HOME=$HOME/hadoop-2.7.3
export HADOOP_CONF_DIR=$HOME/hadoop-2.7.3/etc/hadoop
export HADOOP_MAPRED_HOME=$HOME/hadoop-2.7.3
export HADOOP_COMMON_HOME=$HOME/hadoop-2.7.3
export HADOOP_HDFS_HOME=$HOME/hadoop-2.7.3
export YARN_HOME=$HOME/hadoop-2.7.3
export PATH=$PATH:$HOME/hadoop-2.7.3/bin
export JAVA_HOME=/usr/java/jdk1.8.0_101
export PATH=/usr/java/jdk1.8.0_101/bin:$PATH
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# exit
$ cd ~
$ source .bashrc
$ hadoop version
$ java -version
$ cd /home/hduser/hadoop-2.7.3/etc/hadoop
vi slaves
~~~~~~~~~~~
client1
client2
~~~~~~~~~~~
vi core-site.xml
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://master:8020</value>
</property>
</configuration>
vi hdfs-site.xml ~~~~~~~~~~master
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/home/hduser/hadoop-data/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hduser/hadoop-data/datanode</value>
</property>
</configuration>
<configuration>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/home/hduser/hadoop-data/datanode</value>
</property>
</configuration>
cp mapred-site.xml.template mapred-site.xml
vi mapred-site.xml
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
vi yarn-site.xml
<configuration>
<property>
<name>yarn.resourcemanager.resource-tracker.address</name>
<value>master:8025</value>
</property>
<property>
<name>yarn.resourcemanager.scheduler.address</name>
<value>master:8030</value>
</property>
<property>
<name>yarn.resourcemanager.address</name>
<value>master:8050</value>
</property>
<property>
<name>yarn.resourcemanager.aux-services</name>
<value>mapreduce_shuffler</value>
</property>
<property>
<name>yarn.resourcemanager.aux-services.mapreduce.shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<property>
<name>yarn.resourcemanager.disk-health-checker.min-healthy-disks</name>
<value>0</value>
</property>
</configuration>
jps
############## client1 & client2 ##############
mkdir -p /home/hduser/hadoop-data/datanode
chmod 755 /home/hduser/hadoop-data/datanode
jps
~~~~~~~~~~Manual start datanode if not started with namenode
cd /home/hduser/hadoop/sbin