注意HBase与Hadoop兼容性问题
安装jdk
aptitude install openjdk-7-jdk
创建hbase用户
for i in AP-HB1 AP-HB2 AP-HB3; do echo =====$i=====; ssh $i "groupadd hbase; useradd -m hbase -g hbase -s /bin/bash -d /home/hbase; passwd hbase"; done
创建HDFS存储目录
for i in AP-HB1 AP-HB2 AP-HB3; do ssh $i "mkdir -p /data/HadoopData/; chown -R hbase:hbase /data/HadoopData"; done
安装HDFS
解压hadoop
tar zxf ~/hadoop-1.2.1-bin.tar.gz
配置hadoop-env.sh(指定JAVA_HOME)
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
配置core-site.xml
<property>
<name>hadoop.tmp.dir</name>
<value>/data/HadoopData/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>hdfs://AP-HB1:9000</value>
</property>
配置conf/hdfs-site.xml
<property>
<name>dfs.name.dir</name>
<value>/data/HadoopData/dfs/name/</value>
</property>
<property>
<name>dfs.data.dir</name>
<value>/data/HadoopData/dfs/data/</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.permissions</name>
<value>false</value>
</property>
配置masters(指定SecondaryNameNode机器)
配置slaves(指定DataNode机器)
安装HBase
解压HBase
tar zxf ~/hbase-0.98.12-hadoop1-bin.tar.gz
配置hbase-env.sh(指定JAVA_HOME)
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
配置hdfs-site.xml
<property>
<name>hbase.rootdir</name>
<value>hdfs://AP-HB1:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.master</name>
<value>AP-HB1:60000</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/data/HadoopData/zookeeper</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>AP-HB1,AP-HB2,AP-HB3</value>
</property>
<property>
<name>hbase.regionserver.export.thrift</name>
<value>true</value>
</property>
<property>
<name>hbase.regionserver.thrift.port</name>
<value>9091</value>
</property>
配置regionservers
AP-HB1
AP-HB2
AP-HB3
复制程序到所有机器
cd /opt; chown hbase.hbase hadoop-1.2.1 hbase-0.98.12-hadoop1 -R
for i in AP-HB2 AP-HB3;do rsync -av /opt/hadoop-1.2.1 /opt/hbase-0.98.12-hadoop1 $i:/opt/; done
设置ssh免密登录
for i in AP-HB1 AP-HB2 AP-HB3;do ssh $i "mkdir /home/hbase/.ssh; ssh-keygen -t rsa -P '' -f /home/hbase/.ssh/id_rsa; chown hbase.hbase /home/hbase/.ssh -R"; done
for i in AP-HB2 AP-HB3;do ssh $i "cat /home/hbase/.ssh/authorized_keys";done >> /home/hbase/.ssh/authorized_keys
for i in AP-HB2 AP-HB3;do scp /home/hbase/.ssh/authorized_keys $i:/home/hbase/.ssh/;done
开启HDFS服务
cd /opt/hadoop-1.2.1; bin/start-dfs.sh
开启HBase服务
cd /opt/hbase-0.98.12-hadoop1; bin/start-hbase.sh
Troubleshooting
2015-05-19 16:48:34,815 ERROR org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Error getting localhost name. Using 'localhost'...
hadoop要使用hostname,需要修改hostname与配置文件一致,修改hostname
hostname $hostname
cat $hostname > /etc/hostname
2015-05-19 17:40:00,771 FATAL [AP-HB1:16020.activeMasterManager] master.HMaster: Unhandled exception. Starting shutdown.
java.io.IOException: Failed on local exception: java.io.IOException: Broken pipe; Host Details : local host is: "AP-HB1/10.162.50.249"; destination host is: "AP-HB1":9000;
注意HBase与Hadoop版本兼容性问题
时间: 2024-09-16 05:24:46