misc changes

This commit is contained in:
Shane Peters
2019-01-15 13:59:01 -05:00
parent 41a9162c9c
commit 8b580286ba
7 changed files with 44 additions and 14 deletions

View File

@@ -28,12 +28,8 @@ pip install elasticsearch-curator
sed -i 's/#cluster.name: my-application/cluster.name: odin/g' /etc/elasticsearch/elasticsearch.yml sed -i 's/#cluster.name: my-application/cluster.name: odin/g' /etc/elasticsearch/elasticsearch.yml
sed -i 's/#node.name: node-1/node.name: node-1/g' /etc/elasticsearch/elasticsearch.yml sed -i 's/#node.name: node-1/node.name: node-1/g' /etc/elasticsearch/elasticsearch.yml
sed -i 's/#bootstrap.memory_lock: true/bootstrap.memory_lock: true/g' /etc/elasticsearch/elasticsearch.yml
sed -i "s/#network.host: 192.168.0.1/network.host: ${IP}/g" /etc/elasticsearch/elasticsearch.yml sed -i "s/#network.host: 192.168.0.1/network.host: ${IP}/g" /etc/elasticsearch/elasticsearch.yml
sed -i 's/-Xms2g/-Xms8g/g' /etc/elasticsearch/jvm.options
sed -i 's/-Xmx2g/-Xmx8g/g' /etc/elasticsearch/jvm.options
mkdir /etc/curator/ mkdir /etc/curator/
cat >/etc/curator/delete_indices.yml <<EOF cat >/etc/curator/delete_indices.yml <<EOF
--- ---

View File

@@ -9,7 +9,7 @@ log() {
} }
export CONF_411=https://gist.githubusercontent.com/scoutsec/4a4841ad4ea019190bfcc7d87b663600/raw/4424e66e50033c2e72559310a7bd25d8e959f023/411.conf export CONF_411=https://gist.githubusercontent.com/scoutsec/4a4841ad4ea019190bfcc7d87b663600/raw/4424e66e50033c2e72559310a7bd25d8e959f023/411.conf
export FOUR11_URL=https://github.com/etsy/411/releases/download/v1.4.0/release-es5x.tgz export FOUR11_URL=https://github.com/etsy/411/releases/download/v1.5.0/release.tgz
export IP=$(ip route | awk '/src/{print $9}') export IP=$(ip route | awk '/src/{print $9}')
echo "fouroneone" >/etc/hostname echo "fouroneone" >/etc/hostname
echo -e "${IP}\tfouroneone" >> /etc/hosts echo -e "${IP}\tfouroneone" >> /etc/hosts

View File

@@ -224,8 +224,7 @@ else
critical-stack-intel pull critical-stack-intel pull
fi fi
add-apt-repository -y -u ppa:oisf/suricata-stable apt-get install -y prometheus-node-exporter
apt-get install -y prometheus-node-exporter suricata
systemctl enable bro systemctl enable bro
systemctl start bro systemctl start bro

View File

@@ -8,7 +8,7 @@ log() {
echo -e "\t\e[96m[*]${1}\e[93m" echo -e "\t\e[96m[*]${1}\e[93m"
} }
export KAFKA_URL='http://apache.claz.org/kafka/0.11.0.0/kafka_2.11-0.11.0.0.tgz' export KAFKA_URL='http://apache.claz.org/kafka/2.1.0/kafka_2.12-2.1.0.tgz'
export IP=$(ip route | awk '/src/{print $9}') export IP=$(ip route | awk '/src/{print $9}')
echo "kafka" >/etc/hostname echo "kafka" >/etc/hostname
echo "${IP}\tkafka" >> /etc/hosts echo "${IP}\tkafka" >> /etc/hosts
@@ -22,7 +22,7 @@ apt-get install -y htop wget default-jre zookeeperd prometheus-node-exporter
useradd -r -d /opt/kafka -s /bin/true kafka useradd -r -d /opt/kafka -s /bin/true kafka
mkdir /var/lib/kafka && chown kafka /var/lib/kafka mkdir /var/lib/kafka && chown kafka /var/lib/kafka
wget -O /opt/kafka.tgz ${KAFKA_URL} wget -O /opt/kafka.tgz ${KAFKA_URL} || (echo "COULDN'T DOWNLOAD KAFKA" && exit 1)
tar -xzf /opt/kafka.tgz -C /opt tar -xzf /opt/kafka.tgz -C /opt
rm /opt/kafka.tgz rm /opt/kafka.tgz
mv /opt/kafka_* /opt/kafka mv /opt/kafka_* /opt/kafka

15
deploy
View File

@@ -9,16 +9,23 @@ log() {
} }
if [ "$#" -lt 2 ]; then if [ "$#" -lt 2 ]; then
log "usage: sudo ${0} ZFS_DATASET TAP_INTERFACE MGMT_INTERFACE" log "usage: sudo ${0} <zfs_dataset> <tap_interface> <mgmt_interface> <prod|dev>"
exit 1 exit 1
fi fi
set -x
export ZPOOL=${1} export ZPOOL=${1}
export TAP=${2} export TAP=${2}
export MGMT=${3} export MGMT=${3}
export PROD=${4}
export MGMT_IP=$(ip -o -4 a show ${MGMT} | awk '{print $4}' |cut -d '/' -f 1) export MGMT_IP=$(ip -o -4 a show ${MGMT} | awk '{print $4}' |cut -d '/' -f 1)
export LXC='/snap/bin/lxc' export LXC='/snap/bin/lxc'
source limits if [ ! -z $PROD ]; then
source limits.prod
else
source limits.dev
fi
need_zfs() { need_zfs() {
log "ZFS dataset \"${1}\" wasn't found. I suggest you create it and restart the deploy." log "ZFS dataset \"${1}\" wasn't found. I suggest you create it and restart the deploy."
@@ -54,15 +61,13 @@ setup_lxd() {
ZPOOL=${1} ZPOOL=${1}
log "Deploying lxd on ${ZPOOL}." log "Deploying lxd on ${ZPOOL}."
lxd init --auto --storage-backend=zfs --storage-pool="${ZPOOL}" lxd init --auto --storage-backend=zfs --storage-pool="${ZPOOL}"
${LXC} network create odinbr0 dns.domain="odin" ipv4.address="10.13.37.1/24" ipv4.nat=true ipv6.address=none
${LXC} network attach-profile odinbr0 default eth0
chown -R ${SUDO_USER}:${SUDO_USER} ${HOME}/.config/lxc chown -R ${SUDO_USER}:${SUDO_USER} ${HOME}/.config/lxc
} }
setup_containers() { setup_containers() {
export BROFACE=${1} export BROFACE=${1}
# Order is important - start the pipeline (kafka) first, fsf is before bro because it bro submits files to it, etc... # Order is important - start the pipeline (kafka) first, fsf is before ids because it bro submits files to it, etc...
export CONTAINERS="kafka elasticsearch logstash kibana fsf ids rita prometheus fouroneone" export CONTAINERS="kafka elasticsearch logstash kibana fsf ids rita prometheus fouroneone"
for CON in ${CONTAINERS}; do for CON in ${CONTAINERS}; do

View File

30
limits.prod Normal file
View File

@@ -0,0 +1,30 @@
export CPU_ids="6"
export CPU_kafka="2"
export CPU_elasticsearch="4"
export CPU_logstash="1"
export CPU_kibana="2"
export CPU_fouroneone="2"
export CPU_rita="4"
export CPU_fsf="4"
export CPU_prometheus="2"
export MEM_ids="8192MB"
export MEM_kafka="1024MB"
export MEM_elasticsearch="16384MB"
export MEM_logstash="1024MB"
export MEM_kibana="1024MB"
export MEM_fouroneone="2048MB"
export MEM_rita="8192MB"
export MEM_fsf="4096MB"
export MEM_prometheus="2048MB"
export DISK_ids="20GB"
export DISK_kafka="20GB"
export DISK_elasticsearch="250GB"
export DISK_logstash="10GB"
export DISK_kibana="10GB"
export DISK_fouroneone="10GB"
export DISK_rita="120GB"
export DISK_fsf="80GB"
export DISK_prometheus="60GB"