# The ELK stack
https://www.digitalocean.com/community/tutorials/how-to-install-elasticsearch-logstash-and-kibana-elastic-stack-on-ubuntu-18-04
digital ocean droplet with ubuntue 18.04
monitoring, ipv6, private network and user-data enabled
user-data
#!/bin/bash
#
# install script (user-data) for ubuntu 18.04 droplet on digital ocean
#
apt-get -y update
ufw allow ssh
ufw allow http
ufw allow https
ufw --force enable
adduser --disabled-password --gecos "" production
apt-get install -y bindfs
#
# installing certbot
#
apt-get update
apt-get install -y software-properties-common
add-apt-repository -y ppa:certbot/certbot
apt-get update
apt-get install -y python-certbot-nginx
install java
apt-get install openjdk-8-jdkjava -versionupdate-alternatives --config java- add the jdk folder without /bin/java from the previous command as JAVA_HOME="" in the next command
nano /etc/environmentsource /etc/environmentecho $JAVA_HOME
install elasticsearch (https://tecadmin.net/setup-elasticsearch-on-ubuntu/)
apt-get install apt-transport-httpswget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -add-apt-repository "deb https://artifacts.elastic.co/packages/7.x/apt stable main"apt-get updateapt-get install elasticsearchnano /etc/elasticsearch/elasticsearch.yml
back to the digital ocean tutorial
change network.host to localhostsystemctl start elasticsearchsystemctl enable elasticsearchcurl -X GET "localhost:9200"
kibana
apt-get install kibanasystemctl enable kibanasystemctl start kibana
nginx
apt-get install nginxecho "kibanaadmin:openssl passwd -apr1" | tee -a /etc/nginx/htpasswd.usersenter a password for kibanaadmin
nano /etc/nginx/sites-available/elk.matise.nl
server {
listen 80;
server_name elk.matise.nl;
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.users;
location / {
proxy_pass http://localhost:5601;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
ln -s /etc/nginx/sites-available/elk.matise.nl /etc/nginx/sites-enabled/elk.matise.nlfor some reason apache2 was running on my droplet
systemctl stop apache2systemctl disable apache2systemctl restart nginxgo to http://elk.matise.nl/status and see if it works
certbot --nginx
logstash
apt-get install logstashlogstash could not start because I was using a 2gb memory server, resizing to 4gb fixed this.
nano /etc/logstash/conf.d/02-beats-input.conf
input {
beats {
port => 5044
}
}
nano /etc/logstash/conf.d/10-syslog-filter.conf
filter {
if [fileset][module] == "system" {
if [fileset][name] == "auth" {
grok {
match => { "message" => ["%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} %{DATA:[system][auth][ssh][method]} for (invalid user )?%{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]} port %{NUMBER:[system][auth][ssh][port]} ssh2(: %{GREEDYDATA:[system][auth][ssh][signature]})?",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: %{DATA:[system][auth][ssh][event]} user %{DATA:[system][auth][user]} from %{IPORHOST:[system][auth][ssh][ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sshd(?:\[%{POSINT:[system][auth][pid]}\])?: Did not receive identification string from %{IPORHOST:[system][auth][ssh][dropped_ip]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} sudo(?:\[%{POSINT:[system][auth][pid]}\])?: \s*%{DATA:[system][auth][user]} :( %{DATA:[system][auth][sudo][error]} ;)? TTY=%{DATA:[system][auth][sudo][tty]} ; PWD=%{DATA:[system][auth][sudo][pwd]} ; USER=%{DATA:[system][auth][sudo][user]} ; COMMAND=%{GREEDYDATA:[system][auth][sudo][command]}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} groupadd(?:\[%{POSINT:[system][auth][pid]}\])?: new group: name=%{DATA:system.auth.groupadd.name}, GID=%{NUMBER:system.auth.groupadd.gid}",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} useradd(?:\[%{POSINT:[system][auth][pid]}\])?: new user: name=%{DATA:[system][auth][user][add][name]}, UID=%{NUMBER:[system][auth][user][add][uid]}, GID=%{NUMBER:[system][auth][user][add][gid]}, home=%{DATA:[system][auth][user][add][home]}, shell=%{DATA:[system][auth][user][add][shell]}$",
"%{SYSLOGTIMESTAMP:[system][auth][timestamp]} %{SYSLOGHOST:[system][auth][hostname]} %{DATA:[system][auth][program]}(?:\[%{POSINT:[system][auth][pid]}\])?: %{GREEDYMULTILINE:[system][auth][message]}"] }
pattern_definitions => {
"GREEDYMULTILINE"=> "(.|\n)*"
}
remove_field => "message"
}
date {
match => [ "[system][auth][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
geoip {
source => "[system][auth][ssh][ip]"
target => "[system][auth][ssh][geoip]"
}
}
else if [fileset][name] == "syslog" {
grok {
match => { "message" => ["%{SYSLOGTIMESTAMP:[system][syslog][timestamp]} %{SYSLOGHOST:[system][syslog][hostname]} %{DATA:[system][syslog][program]}(?:\[%{POSINT:[system][syslog][pid]}\])?: %{GREEDYMULTILINE:[system][syslog][message]}"] }
pattern_definitions => { "GREEDYMULTILINE" => "(.|\n)*" }
remove_field => "message"
}
date {
match => [ "[system][syslog][timestamp]", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ]
}
}
}
}
nano /etc/logstash/conf.d/30-elasticsearch-output.conf
output {
elasticsearch {
hosts => ["localhost:9200"]
manage_template => false
index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
}
}
test config:
sudo -u logstash /usr/share/logstash/bin/logstash --path.settings /etc/logstash -tsystemctl start logstashsystemctl enable logstash
filebeat
apt-get install filebeatnano /etc/filebeat/filebeat.yml- comment out elasticsearch:
...
#output.elasticsearch:
# Array of hosts to connect to.
#hosts: ["localhost:9200"]
...
- enable logstash
output.logstash:
# The Logstash hosts
hosts: ["localhost:5044"]
filebeat modules enable systemfilebeat modules listfilebeat setup --template -E output.logstash.enabled=false -E 'output.elasticsearch.hosts=["localhost:9200"]'filebeat setup -e -E output.logstash.enabled=false -E output.elasticsearch.hosts=['localhost:9200'] -E setup.kibana.host=localhost:5601systemctl start filebeatsystemctl enable filebeat
configure external metricbeats
nano /etc/elasticsearch/elasticsearch.ymlset network host to : network.host: 0.0.0.0
enable discovery.seed_hosts and set to: []
systemctl restart elasticsearchufw allow from 206.189.106.24/32 to any port 9200
ufw allow from 157.245.64.166/32 to any port 9200
apt-get install metricbeatmetricbeat setup --template -E 'output.elasticsearch.hosts=["localhost:9200"]'metricbeat setup -e -E output.elasticsearch.hosts=['localhost:9200'] -E setup.kibana.host=localhost:5601systemctl start metricbeatsystemctl enable metricbeat
https://www.digitalocean.com/community/tutorials/how-to-gather-infrastructure-metrics-with-metricbeat-on-ubuntu-18-04
on another host
apt-get install apt-transport-httpswget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -add-apt-repository "deb https://artifacts.elastic.co/packages/7.x/apt stable main"apt-get updateapt-get install metricbeat- change host output:
nano /etc/metricbeat/metricbeat.yml- I also set fields: env: to [client (matise/lenouveauchef)]-[server type (node/wordpress/varnish)]
systemctl start metricbeatsystemctl enable metricbeat
on another host filebeat setup
apt-get install apt-transport-httpswget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -add-apt-repository "deb https://artifacts.elastic.co/packages/7.x/apt stable main"apt-get updateapt-get install filebeatfilebeat modules enable apachefilebeat modules list- change host output:
nano /etc/filebeat/filebeat.ymlsystemctl start filebeatsystemctl enable filebeat
# curator
- on the elk host machine
nano /etc/apt/sources.list.d/curator.list- add
deb [arch=amd64] https://packages.elastic.co/curator/5/debian stable mainto that file apt-get updateapt-get install elasticsearch-curatorcurator_cli delete_indices --filter_list '{"filtertype":"age", "source":"creation_date", "direction":"older", "unit":"days", "unit_count":45}'