Adding fileeat role

Unsure about logstash currently.  That one is on hold for now.
This commit is contained in:
James Tombleson 2019-05-01 15:21:32 -07:00
parent 10c1d50483
commit d2d1e2734f
16 changed files with 582 additions and 2 deletions

View File

@ -0,0 +1,38 @@
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).

View File

@ -0,0 +1,2 @@
---
# defaults file for luther38.filebeat

View File

@ -0,0 +1,2 @@
---
# handlers file for luther38.filebeat

View File

@ -0,0 +1,60 @@
galaxy_info:
author: your name
description: your description
company: your company (optional)
# If the issue tracker for your role is not on github, uncomment the
# next line and provide a value
# issue_tracker_url: http://example.com/issue/tracker
# Some suggested licenses:
# - BSD (default)
# - MIT
# - GPLv2
# - GPLv3
# - Apache
# - CC-BY
license: license (GPLv2, CC-BY, etc)
min_ansible_version: 2.4
# If this a Container Enabled role, provide the minimum Ansible Container version.
# min_ansible_container_version:
# Optionally specify the branch Galaxy will use when accessing the GitHub
# repo for this role. During role install, if no tags are available,
# Galaxy will use this branch. During import Galaxy will access files on
# this branch. If Travis integration is configured, only notifications for this
# branch will be accepted. Otherwise, in all cases, the repo's default branch
# (usually master) will be used.
#github_branch:
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
# platforms:
# - name: Fedora
# versions:
# - all
# - 25
# - name: SomePlatform
# versions:
# - all
# - 1.0
# - 7
# - 99.99
galaxy_tags: []
# List tags for your role here, one per line. A tag is a keyword that describes
# and categorizes the role. Users find roles by searching for tags. Be sure to
# remove the '[]' above, if you add tags to this list.
#
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
# Maximum 20 tags per role.
dependencies: []
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.

View File

@ -0,0 +1,29 @@
---
- name: Install Elastic GPG Key
become: true
apt_key:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
state: present
id: 46095ACC8548582C1A2699A9D27D666CD88E42B4
- name: Install apt-transport-https
become: true
apt:
name: apt-transport-https
- name: Add Elastic Repo
become: true
copy:
dest: '/etc/apt/sources.list.d/elastic-7.x.list'
content: "deb https://artifacts.elastic.co/packages/7.x/apt stable main"
# copy:
#src: elastic-7.x.list
# dest: /etc/apt/sources.list.d/
# backup: yes
- name: Update Packages
become: true
apt:
update_cache: true

View File

@ -0,0 +1,7 @@
---
# tasks file for luther38.filebeat
- name: Install Filebeat on Ubuntu
when: ansible_distribution == 'Ubuntu'
include: ubuntu.yml

View File

@ -0,0 +1,44 @@
- name: Add Elastic repo
include: install-repo.yml
- name: Install ElasticSearch from apt
become: true
apt:
name: elasticsearch
- name: Update config
become: true
template:
src: elasticsearch.j2
dest: /etc/elasticsearch/elasticsearch.yml
backup: yes
- name: UFW allow http_port
become: true
when: ufw_http_port == true
ufw:
rule: allow
port: "{{ http_port }}"
- name: UFW allow transport_port
become: true
when: ufw_transport_port == true
ufw:
rule: allow
port: "{{ transport_port }}"
- name: systemd enable elasticsearch
become: true
when: systemd_enabled == true
systemd:
name: elasticsearch
enabled: true
- name: systemd restart elasticsearch
become: true
when: systemd_restart == true
systemd:
name: elasticsearch
state: restarted

View File

@ -0,0 +1,62 @@
#https://artifacts.elastic.co/downloads/beats/winlogbeat/metricbeat-7.0.0-windows-x86_64.zip
# This will install winlogbeat on a windows host.
---
- name: make tmp folder
win_file:
path: c:\tmp\
state: directory
- name: download winlogbeat
win_get_url:
url: 'https://artifacts.elastic.co/downloads/beats/winlogbeat/winlogbeat-{{ version }}-windows-x86_64.zip'
dest: 'C:\tmp\winlogbeat-{{ version }}.zip'
force: no
- name: unzip winlogbeat
win_unzip:
src: c:\tmp\winlogbeat-{{ version }}.zip
dest: C:\tmp\winlogbeat-{{ version }}\
creates: C:\tmp\winlogbeat-{{ version }}\
- name: Copy winlogbeat-{{ version }} folder
win_command: powershell.exe copy-item -Path 'c:\tmp\winlogbeat-{{ version }}\winlogbeat-{{ version }}-windows-x86_64\' -Filter * -Recurse -Destination 'C:\Program Files\Winlogbeat\'
args:
creates: C:\Program Files\Winlogbeat\
- name: Update tmplate
win_template:
src: winlogbeat.j2
dest: C:\Program Files\Winlogbeat\winlogbeat.yml
- name: Check if winlogbeat service is installed
register: service
win_service:
name: winlogbeat
#- debug: var=service
- name: Install service
when: service.exists == false
win_command: powershell.exe -ExecutionPolicy ByPass -File install-service-winlogbeat.ps1
args:
chdir: C:\program files\winlogbeat\
- name: check status service
register: service
win_service:
name: winlogbeat
#- debug: var=service
- name: restart service
when: service.state == 'running'
win_service:
name: winlogbeat
state: restarted
- name: start service
when: service.state == 'stopped'
win_service:
name: winlogbeat
state: started

View File

@ -0,0 +1,2 @@
localhost

View File

@ -0,0 +1,5 @@
---
- hosts: localhost
remote_user: root
roles:
- luther38.filebeat

View File

@ -0,0 +1,2 @@
---
# vars file for luther38.filebeat

View File

@ -1,2 +1,5 @@
---
# defaults file for luther38.logstash
http_host: '127.0.0.1'

View File

@ -0,0 +1,29 @@
---
- name: Install Elastic GPG Key
become: true
apt_key:
url: "https://artifacts.elastic.co/GPG-KEY-elasticsearch"
state: present
id: 46095ACC8548582C1A2699A9D27D666CD88E42B4
- name: Install apt-transport-https
become: true
apt:
name: apt-transport-https
- name: Add Elastic Repo
become: true
copy:
dest: '/etc/apt/sources.list.d/elastic-7.x.list'
content: "deb https://artifacts.elastic.co/packages/7.x/apt stable main"
# copy:
#src: elastic-7.x.list
# dest: /etc/apt/sources.list.d/
# backup: yes
- name: Update Packages
become: true
apt:
update_cache: true

View File

@ -1,2 +1,6 @@
---
# tasks file for luther38.logstash
- name: Install Logstash on Ubuntu
when: ansible_distribution == 'Ubuntu'
include: ubuntu.yml

View File

@ -0,0 +1,44 @@
- name: Add Elastic repo
include: install-repo.yml
- name: Install logstash from apt
become: true
apt:
name: logstash
- name: Update config
become: true
template:
src: logstash.j2
dest: /tmp/logstash/logstash.yml
backup: yes
- name: UFW allow http_port
become: true
when: ufw_http_port == true
ufw:
rule: allow
port: "{{ http_port }}"
- name: UFW allow transport_port
become: true
when: ufw_transport_port == true
ufw:
rule: allow
port: "{{ transport_port }}"
- name: systemd enable elasticsearch
become: true
when: systemd_enabled == true
systemd:
name: elasticsearch
enabled: true
- name: systemd restart elasticsearch
become: true
when: systemd_restart == true
systemd:
name: elasticsearch
state: restarted

View File

@ -0,0 +1,247 @@
# Settings file in YAML
#
# Settings can be specified either in hierarchical form, e.g.:
#
# pipeline:
# batch:
# size: 125
# delay: 5
#
# Or as flat keys:
#
# pipeline.batch.size: 125
# pipeline.batch.delay: 5
#
# ------------ Node identity ------------
#
# Use a descriptive name for the node:
#
# node.name: test
#
# If omitted the node name will default to the machine's host name
#
# ------------ Data path ------------------
#
# Which directory should be used by logstash and its plugins
# for any persistent needs. Defaults to LOGSTASH_HOME/data
#
path.data: /var/lib/logstash
#
# ------------ Pipeline Settings --------------
#
# The ID of the pipeline.
#
# pipeline.id: main
#
# Set the number of workers that will, in parallel, execute the filters+outputs
# stage of the pipeline.
#
# This defaults to the number of the host's CPU cores.
#
# pipeline.workers: 2
#
# How many events to retrieve from inputs before sending to filters+workers
#
# pipeline.batch.size: 125
#
# How long to wait in milliseconds while polling for the next event
# before dispatching an undersized batch to filters+outputs
#
# pipeline.batch.delay: 50
#
# Force Logstash to exit during shutdown even if there are still inflight
# events in memory. By default, logstash will refuse to quit until all
# received events have been pushed to the outputs.
#
# WARNING: enabling this can lead to data loss during shutdown
#
# pipeline.unsafe_shutdown: false
#
# ------------ Pipeline Configuration Settings --------------
#
# Where to fetch the pipeline configuration for the main pipeline
#
# path.config:
#
# Pipeline configuration string for the main pipeline
#
# config.string:
#
# At startup, test if the configuration is valid and exit (dry run)
#
# config.test_and_exit: false
#
# Periodically check if the configuration has changed and reload the pipeline
# This can also be triggered manually through the SIGHUP signal
#
# config.reload.automatic: false
#
# How often to check if the pipeline configuration has changed (in seconds)
#
# config.reload.interval: 3s
#
# Show fully compiled configuration as debug log message
# NOTE: --log.level must be 'debug'
#
# config.debug: false
#
# When enabled, process escaped characters such as \n and \" in strings in the
# pipeline configuration files.
#
# config.support_escapes: false
#
# ------------ Module Settings ---------------
# Define modules here. Modules definitions must be defined as an array.
# The simple way to see this is to prepend each `name` with a `-`, and keep
# all associated variables under the `name` they are associated with, and
# above the next, like this:
#
# modules:
# - name: MODULE_NAME
# var.PLUGINTYPE1.PLUGINNAME1.KEY1: VALUE
# var.PLUGINTYPE1.PLUGINNAME1.KEY2: VALUE
# var.PLUGINTYPE2.PLUGINNAME1.KEY1: VALUE
# var.PLUGINTYPE3.PLUGINNAME3.KEY1: VALUE
#
# Module variable names must be in the format of
#
# var.PLUGIN_TYPE.PLUGIN_NAME.KEY
#
# modules:
#
# ------------ Cloud Settings ---------------
# Define Elastic Cloud settings here.
# Format of cloud.id is a base64 value e.g. dXMtZWFzdC0xLmF3cy5mb3VuZC5pbyRub3RhcmVhbCRpZGVudGlmaWVy
# and it may have an label prefix e.g. staging:dXMtZ...
# This will overwrite 'var.elasticsearch.hosts' and 'var.kibana.host'
# cloud.id: <identifier>
#
# Format of cloud.auth is: <user>:<pass>
# This is optional
# If supplied this will overwrite 'var.elasticsearch.username' and 'var.elasticsearch.password'
# If supplied this will overwrite 'var.kibana.username' and 'var.kibana.password'
# cloud.auth: elastic:<password>
#
# ------------ Queuing Settings --------------
#
# Internal queuing model, "memory" for legacy in-memory based queuing and
# "persisted" for disk-based acked queueing. Defaults is memory
#
# queue.type: memory
#
# If using queue.type: persisted, the directory path where the data files will be stored.
# Default is path.data/queue
#
# path.queue:
#
# If using queue.type: persisted, the page data files size. The queue data consists of
# append-only data files separated into pages. Default is 64mb
#
# queue.page_capacity: 64mb
#
# If using queue.type: persisted, the maximum number of unread events in the queue.
# Default is 0 (unlimited)
#
# queue.max_events: 0
#
# If using queue.type: persisted, the total capacity of the queue in number of bytes.
# If you would like more unacked events to be buffered in Logstash, you can increase the
# capacity using this setting. Please make sure your disk drive has capacity greater than
# the size specified here. If both max_bytes and max_events are specified, Logstash will pick
# whichever criteria is reached first
# Default is 1024mb or 1gb
#
# queue.max_bytes: 1024mb
#
# If using queue.type: persisted, the maximum number of acked events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.acks: 1024
#
# If using queue.type: persisted, the maximum number of written events before forcing a checkpoint
# Default is 1024, 0 for unlimited
#
# queue.checkpoint.writes: 1024
#
# If using queue.type: persisted, the interval in milliseconds when a checkpoint is forced on the head page
# Default is 1000, 0 for no periodic checkpoint.
#
# queue.checkpoint.interval: 1000
#
# ------------ Dead-Letter Queue Settings --------------
# Flag to turn on dead-letter queue.
#
# dead_letter_queue.enable: false
# If using dead_letter_queue.enable: true, the maximum size of each dead letter queue. Entries
# will be dropped if they would increase the size of the dead letter queue beyond this setting.
# Default is 1024mb
# dead_letter_queue.max_bytes: 1024mb
# If using dead_letter_queue.enable: true, the directory path where the data files will be stored.
# Default is path.data/dead_letter_queue
#
# path.dead_letter_queue:
#
# ------------ Metrics Settings --------------
#
# Bind address for the metrics REST endpoint
#
# http.host: "127.0.0.1"
#
# Bind port for the metrics REST endpoint, this option also accept a range
# (9600-9700) and logstash will pick up the first available ports.
#
# http.port: 9600-9700
#
# ------------ Debugging Settings --------------
#
# Options for log.level:
# * fatal
# * error
# * warn
# * info (default)
# * debug
# * trace
#
# log.level: info
path.logs: /var/log/logstash
#
# ------------ Other Settings --------------
#
# Where to find custom plugins
# path.plugins: []
#
# ------------ X-Pack Settings (not applicable for OSS build)--------------
#
# X-Pack Monitoring
# https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html
#xpack.monitoring.enabled: false
#xpack.monitoring.elasticsearch.username: logstash_system
#xpack.monitoring.elasticsearch.password: password
#xpack.monitoring.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
#xpack.monitoring.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ]
#xpack.monitoring.elasticsearch.ssl.truststore.path: path/to/file
#xpack.monitoring.elasticsearch.ssl.truststore.password: password
#xpack.monitoring.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.monitoring.elasticsearch.ssl.keystore.password: password
#xpack.monitoring.elasticsearch.ssl.verification_mode: certificate
#xpack.monitoring.elasticsearch.sniffing: false
#xpack.monitoring.collection.interval: 10s
#xpack.monitoring.collection.pipeline.details.enabled: true
#
# X-Pack Management
# https://www.elastic.co/guide/en/logstash/current/logstash-centralized-pipeline-management.html
#xpack.management.enabled: false
#xpack.management.pipeline.id: ["main", "apache_logs"]
#xpack.management.elasticsearch.username: logstash_admin_user
#xpack.management.elasticsearch.password: password
#xpack.management.elasticsearch.hosts: ["https://es1:9200", "https://es2:9200"]
#xpack.management.elasticsearch.ssl.certificate_authority: [ "/path/to/ca.crt" ]
#xpack.management.elasticsearch.ssl.truststore.path: /path/to/file
#xpack.management.elasticsearch.ssl.truststore.password: password
#xpack.management.elasticsearch.ssl.keystore.path: /path/to/file
#xpack.management.elasticsearch.ssl.keystore.password: password
#xpack.management.elasticsearch.ssl.verification_mode: certificate
#xpack.management.elasticsearch.sniffing: false
#xpack.management.logstash.poll_interval: 5s