Init package
This commit is contained in:
parent
d7ec8735dd
commit
f71cf6e338
3
context.xml
Normal file
3
context.xml
Normal file
@ -0,0 +1,3 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Context allowLinking="true">
|
||||
</Context>
|
||||
BIN
hadoop-3.2.1-src.tar.gz
Normal file
BIN
hadoop-3.2.1-src.tar.gz
Normal file
Binary file not shown.
37
hadoop-hdfs.service.template
Normal file
37
hadoop-hdfs.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-hdfs
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/hadoop-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/hadoop-daemon.sh stop DAEMON
|
||||
User=hdfs
|
||||
Group=hadoop
|
||||
PIDFile=/var/run/hadoop-hdfs/hadoop-hdfs-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
5
hadoop-httpfs.sysconfig
Normal file
5
hadoop-httpfs.sysconfig
Normal file
@ -0,0 +1,5 @@
|
||||
CATALINA_BASE=/usr/share/hadoop/httpfs/tomcat
|
||||
CATALINA_HOME=/usr/share/hadoop/httpfs/tomcat
|
||||
CATALINA_TMPDIR=/var/cache/hadoop-httpfs
|
||||
|
||||
CATALINA_OPTS="-Dhttpfs.home.dir=/usr -Dhttpfs.config.dir=/etc/hadoop -Dhttpfs.log.dir=/var/log/hadoop-httpfs -Dhttpfs.temp.dir=/var/cache/hadoop-httpfs -Dhttpfs.admin.port=14001 -Dhttpfs.http.port=14000"
|
||||
29
hadoop-layout.sh
Normal file
29
hadoop-layout.sh
Normal file
@ -0,0 +1,29 @@
|
||||
export HADOOP_PREFIX=/usr
|
||||
export HADOOP_COMMON_HOME=/usr
|
||||
export HADOOP_COMMON_DIR=share/hadoop/common
|
||||
export HADOOP_COMMON_LIB_JARS_DIR=share/hadoop/common/lib
|
||||
export HADOOP_COMMON_LIB_NATIVE_DIR=lib/hadoop
|
||||
export HADOOP_CONF_DIR=/etc/hadoop
|
||||
export HADOOP_LIBEXEC_DIR=/usr/libexec
|
||||
|
||||
export HADOOP_HDFS_HOME=$HADOOP_PREFIX
|
||||
export HDFS_DIR=share/hadoop/hdfs
|
||||
export HDFS_LIB_JARS_DIR=share/hadoop/hadoop/lib
|
||||
export HADOOP_PID_DIR=/var/run/hadoop-hdfs
|
||||
export HADOOP_LOG_DIR=/var/log/hadoop-hdfs
|
||||
export HADOOP_IDENT_STRING=hdfs
|
||||
|
||||
export HADOOP_YARN_HOME=$HADOOP_PREFIX
|
||||
export YARN_DIR=share/hadoop/yarn
|
||||
export YARN_LIB_JARS_DIR=share/hadoop/yarn/lib
|
||||
export YARN_PID_DIR=/var/run/hadoop-yarn
|
||||
export YARN_LOG_DIR=/var/log/hadoop-yarn
|
||||
export YARN_CONF_DIR=/etc/hadoop
|
||||
export YARN_IDENT_STRING=yarn
|
||||
|
||||
export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
|
||||
export MAPRED_DIR=share/hadoop/mapreduce
|
||||
export MAPRED_LIB_JARS_DIR=share/hadoop/mapreduce/lib
|
||||
export HADOOP_MAPRED_PID_DIR=/var/run/hadoop-mapreduce
|
||||
export HADOOP_MAPRED_LOG_DIR=/var/log/hadoop-mapreduce
|
||||
export HADOOP_MAPRED_IDENT_STRING=mapred
|
||||
37
hadoop-mapreduce.service.template
Normal file
37
hadoop-mapreduce.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-mapreduce
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/mr-jobhistory-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/mr-jobhistory-daemon.sh stop DAEMON
|
||||
User=mapred
|
||||
Group=hadoop
|
||||
PIDFile=/var/run/hadoop-mapreduce/mapred-mapred-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
49
hadoop-tomcat-users.xml
Normal file
49
hadoop-tomcat-users.xml
Normal file
@ -0,0 +1,49 @@
|
||||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<!--
|
||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
contributor license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright ownership.
|
||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
(the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
<tomcat-users xmlns="http://tomcat.apache.org/xml"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://tomcat.apache.org/xml tomcat-users.xsd"
|
||||
version="1.0">
|
||||
<!--
|
||||
NOTE: By default, no user is included in the "manager-gui" role required
|
||||
to operate the "/manager/html" web application. If you wish to use this app,
|
||||
you must define such a user - the username and password are arbitrary.
|
||||
-->
|
||||
<!--
|
||||
NOTE: The sample user and role entries below are wrapped in a comment
|
||||
and thus are ignored when reading this file. Do not forget to remove
|
||||
<!.. ..> that surrounds them.
|
||||
-->
|
||||
<!--
|
||||
<role rolename="tomcat"/>
|
||||
<role rolename="role1"/>
|
||||
<user username="tomcat" password="tomcat" roles="tomcat"/>
|
||||
<user username="both" password="tomcat" roles="tomcat,role1"/>
|
||||
<user username="role1" password="tomcat" roles="role1"/>
|
||||
-->
|
||||
|
||||
<!-- <role rolename="admin"/> -->
|
||||
<!-- <role rolename="admin-gui"/> -->
|
||||
<!-- <role rolename="admin-script"/> -->
|
||||
<!-- <role rolename="manager"/> -->
|
||||
<!-- <role rolename="manager-gui"/> -->
|
||||
<!-- <role rolename="manager-script"/> -->
|
||||
<!-- <role rolename="manager-jmx"/> -->
|
||||
<!-- <role rolename="manager-status"/> -->
|
||||
<!-- <user name="admin" password="adminadmin" roles="admin,manager,admin-gui,admin-script,manager-gui,manager-script,manager-jmx,manager-status" /> -->
|
||||
</tomcat-users>
|
||||
37
hadoop-yarn.service.template
Normal file
37
hadoop-yarn.service.template
Normal file
@ -0,0 +1,37 @@
|
||||
[Unit]
|
||||
Description=The Hadoop DAEMON daemon
|
||||
After=network.target
|
||||
After=NetworkManager.target
|
||||
|
||||
[Service]
|
||||
Type=forking
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-yarn
|
||||
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
|
||||
ExecStart=/usr/sbin/yarn-daemon.sh start DAEMON
|
||||
ExecStop=/usr/sbin/yarn-daemon.sh stop DAEMON
|
||||
User=yarn
|
||||
Group=hadoop
|
||||
PIDFile=/var/run/hadoop-yarn/yarn-yarn-DAEMON.pid
|
||||
LimitNOFILE=32768
|
||||
LimitNPROC=65536
|
||||
|
||||
#######################################
|
||||
# Note: Below are cgroup options
|
||||
#######################################
|
||||
#Slice=
|
||||
#CPUAccounting=true
|
||||
#CPUShares=1024
|
||||
|
||||
#MemoryAccounting=true
|
||||
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
|
||||
|
||||
#BlockIOAccounting=true
|
||||
#BlockIOWeight=??
|
||||
#BlockIODeviceWeight=??
|
||||
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
|
||||
|
||||
#DeviceAllow=
|
||||
#DevicePolicy=auto|closed|strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
8
hadoop.logrotate
Normal file
8
hadoop.logrotate
Normal file
@ -0,0 +1,8 @@
|
||||
/var/log/hadoop-NAME/*.log
|
||||
{
|
||||
missingok
|
||||
copytruncate
|
||||
compress
|
||||
weekly
|
||||
rotate 52
|
||||
}
|
||||
1017
hadoop.spec
Normal file
1017
hadoop.spec
Normal file
File diff suppressed because it is too large
Load Diff
4
hadoop.yaml
Normal file
4
hadoop.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
version_control: NA
|
||||
src_repo: NA
|
||||
tag_prefix: NA
|
||||
separator: NA
|
||||
66
hdfs-create-dirs
Normal file
66
hdfs-create-dirs
Normal file
@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
|
||||
hdfs_dirs="/user /var/log /tmp"
|
||||
mapred_dirs="/tmp/hadoop-yarn/staging /tmp/hadoop-yarn/staging/history /tmp/hadoop-yarn/staging/history/done /tmp/hadoop-yarn/staging/history/done_intermediate"
|
||||
yarn_dirs="/tmp/hadoop-yarn /var/log/hadoop-yarn"
|
||||
|
||||
# Must be run as root
|
||||
if [[ $EUID -ne 0 ]]
|
||||
then
|
||||
echo "This must be run as root" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Start the namenode if it isn't running
|
||||
started=0
|
||||
systemctl status hadoop-namenode > /dev/null 2>&1
|
||||
rc=$?
|
||||
if [[ $rc -gt 0 ]]
|
||||
then
|
||||
# Format the namenode if it hasn't been formatted
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hdfs namenode -format -nonInteractive" > /dev/null 2>&1
|
||||
if [[ $? -eq 0 ]]
|
||||
then
|
||||
echo "Formatted the Hadoop namenode"
|
||||
fi
|
||||
|
||||
echo "Starting the Hadoop namenode"
|
||||
systemctl start hadoop-namenode > /dev/null 2>&1
|
||||
rc=$?
|
||||
started=1
|
||||
fi
|
||||
|
||||
if [[ $rc -ne 0 ]]
|
||||
then
|
||||
echo "The Hadoop namenode failed to start"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for dir in $hdfs_dirs $yarn_dirs $mapred_dirs
|
||||
do
|
||||
echo "Creating directory $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -mkdir -p $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
echo "Setting permissions on /tmp"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 /tmp" > /dev/null 2>&1
|
||||
|
||||
for dir in $mapred_dirs
|
||||
do
|
||||
echo "Setting permissions and ownership for $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown mapred:mapred $dir" > /dev/null 2>&1
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
for dir in $yarn_dirs
|
||||
do
|
||||
echo "Setting permissions and ownership for $dir"
|
||||
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown yarn:mapred $dir" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
# Stop the namenode if we started it
|
||||
if [[ $started -gt 0 ]]
|
||||
then
|
||||
echo "Stopping the Hadoop namenode"
|
||||
systemctl stop hadoop-namenode > /dev/null 2>&1
|
||||
fi
|
||||
Loading…
x
Reference in New Issue
Block a user