#此工具为Hadoop集群文件(常用于jar包更新或版本升级)分发工具
#把本地(namenode)文件分发到指定机器(datanode)
#前提是指定机器(datanode)的目录结构必须与本地(namenode)目录一致
# Run a shell command on all slave hosts.
#
# Environment Variables
#
# HADOOP_SLAVES File naming remote hosts.
# Default is ${HADOOP_CONF_DIR}/slaves.
# HADOOP_CONF_DIR Alternate conf dir. Default is ${HADOOP_HOME}/conf.
# HADOOP_SLAVE_SLEEP Seconds to sleep between spawning remote commands.
# HADOOP_SSH_OPTS Options passed to ssh when running remote commands.
# FILE_PATH 分发文件列表文件 默认为 ${HADOOP_CONF_DIR}/configs,配置的文件名必须为文件的绝对路径
#
##
usage="Usage: file-dispatcher.sh [--files <filelist>]"
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
# get arguments
FILE_PATH=""
if [ $# -eq 2 ] && [ $1 = "--files" ] ;then
echo $2
FILE_PATH=$2;
elif [ $# -eq 0 ] ;then
FILE_PATH="${HADOOP_CONF_DIR}/configs";
else
echo $usage
exit 1;
fi
# If the slaves file is specified in the command line,
# then it takes precedence over the definition in
# hadoop-env.sh. Save it here.
HOSTLIST=$HADOOP_SLAVES
if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
echo "加载待分发的机器列表..."
if [ "$HOSTLIST" = "" ]; then
if [ "$HADOOP_SLAVES" = "" ]; then
export HOSTLIST="${HADOOP_CONF_DIR}/slaves"
else
export HOSTLIST="${HADOOP_SLAVES}"
fi
fi
if [ "$HOSTLIST" = "" ];then
echo "请配置待分发的机器..."
exit 0;
else
echo "待分发的机器如下:"
for hostname in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`;
do
echo $hostname
done
fi
echo "从$FILE_PATH文件加载待分发的配置文件名..."
export CONFIGLIST="$FILE_PATH"
echo "待分发配置文件夹如下:"
for configname in `cat "$CONFIGLIST"|sed "s/#.*$//;/^$/d"`;
do
echo $configname
done
#开始分发
for configname in `cat "$CONFIGLIST"|sed "s/#.*$//;/^$/d"`;
do
for hostname in `cat "$HOSTLIST"|sed "s/#.*$//;/^$/d"`;
do
TO_PATH=`dirname "$configname"`;
if [ -f $configname ];then
echo "分发文件 $configname 至 $hostname:$TO_PATH "
scp $configname $hostname:$TO_PATH
elif [ -d $configname ];then
echo "分发文件夹 $configname 至 $hostname:$TO_PATH "
scp -r $configname $hostname:$TO_PATH
else
echo "文件或文件夹 $configname 不存在";
fi
done
done