Compare commits
20 Commits
terraform-
...
master
Author | SHA1 | Date | |
---|---|---|---|
d14c2aaa2c | |||
23ee532279 | |||
7cd173afcb | |||
2aa633d884 | |||
c95f1099a0 | |||
2081ea949b | |||
23417a9a01 | |||
e4589dc169 | |||
757bc655f6 | |||
55698826e4 | |||
e9d2d3e52c | |||
76444888a8 | |||
0597719a6c | |||
a26ed9ca32 | |||
9a21fd3da7 | |||
b227938ad9 | |||
c094921e17 | |||
fad12ff06d | |||
d369f84902 | |||
c82c6eb42d |
57
.devcontainer/Dockerfile
Normal file
57
.devcontainer/Dockerfile
Normal file
@ -0,0 +1,57 @@
|
||||
|
||||
FROM ubuntu:focal
|
||||
|
||||
ARG USER_NAME=ansible
|
||||
ARG USER_UID=110
|
||||
ARG USER_GID=110
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update -y && \
|
||||
apt install -y python3 \
|
||||
python3-pip \
|
||||
sshpass \
|
||||
git \
|
||||
libssl-dev \
|
||||
curl \
|
||||
unzip \
|
||||
apt-utils \
|
||||
software-properties-common \
|
||||
sudo
|
||||
|
||||
#RUN useradd -s /bin/bash --uid ${USER_UID} --gid ${USER_GID} -m ${USER_NAME}
|
||||
|
||||
|
||||
RUN curl https://releases.hashicorp.com/terraform/0.13.3/terraform_0.13.3_linux_amd64.zip > /tmp/terraform.zip && \
|
||||
unzip -q /tmp/terraform.zip -d /bin/ && \
|
||||
/bin/terraform --version
|
||||
|
||||
# Install Docker CE CLI.
|
||||
RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
&& add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y docker-ce-cli
|
||||
|
||||
RUN pip3 install \
|
||||
setuptools \
|
||||
molecule \
|
||||
#ansible==2.9.11 \
|
||||
ansible==2.10.2 \
|
||||
# Docker Support
|
||||
docker \
|
||||
# VMWare support
|
||||
PyVmomi \
|
||||
# Azure Support
|
||||
azure-mgmt-compute \
|
||||
azure-mgmt-storage \
|
||||
azure-mgmt-resource \
|
||||
azure-keyvault-secrets \
|
||||
azure-storage-blob \
|
||||
# AWS Support
|
||||
boto \
|
||||
boto3 \
|
||||
botocore
|
||||
|
||||
VOLUME [ "/var/run/docker.sock", '/workspace/']
|
||||
|
||||
CMD [ "sleep", "infinity" ]
|
33
.devcontainer/devcontainer.json
Normal file
33
.devcontainer/devcontainer.json
Normal file
@ -0,0 +1,33 @@
|
||||
// For format details, see https://aka.ms/vscode-remote/devcontainer.json or this file's README at:
|
||||
// https://github.com/microsoft/vscode-dev-containers/tree/v0.128.0/containers/azure-ansible
|
||||
{
|
||||
"name": "Ansible Workspace",
|
||||
"dockerFile": "Dockerfile",
|
||||
//"image": "docker.pkg.github.com/jtom38/docker-ansible/ansible:2.10.3",
|
||||
//"image":"jtom38/ansible:2.10.3",
|
||||
"mounts": [
|
||||
// [Optional] Anisble Collections: Uncomment if you want to mount your local .ansible/collections folder.
|
||||
// "source=${localEnv:HOME}${localEnv:USERPROFILE}/.ansible/collections,target=/root/.ansible/collections,type=bind,consistency=cached",
|
||||
"source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind"
|
||||
],
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash"
|
||||
},
|
||||
// Add the IDs of extensions you want installed when the container is created.
|
||||
"extensions": [
|
||||
"vscoss.vscode-ansible",
|
||||
"redhat.vscode-yaml",
|
||||
"ms-vscode.azurecli",
|
||||
"ms-azuretools.vscode-docker",
|
||||
"samuelcolvin.jinjahtml"
|
||||
],
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
// "forwardPorts": [],
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
"postCreateCommand": "ansible --version && ansible-galaxy install -r requirements.yml",
|
||||
// Uncomment when using a ptrace-based debugger like C++, Go, and Rust
|
||||
// "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ],
|
||||
// Uncomment to connect as a non-root user. See https://aka.ms/vscode-remote/containers/non-root.
|
||||
// "remoteUser": "vscode"
|
||||
}
|
25
.gitignore
vendored
25
.gitignore
vendored
@ -1,7 +1,30 @@
|
||||
# Ansible Files
|
||||
*.retry
|
||||
|
||||
*.swp
|
||||
|
||||
*.ignore.*
|
||||
|
||||
# Ignore the roles path as production roles are handled with requirements.yml
|
||||
roles/*
|
||||
|
||||
# Ignore collections
|
||||
collections/
|
||||
|
||||
# OSX files
|
||||
.DS_Store
|
||||
|
||||
# Ansible Inventory
|
||||
hosts
|
||||
win_hosts
|
||||
hosts.d
|
||||
|
||||
# Ansible vault secret
|
||||
# Used to decrypt the vault files
|
||||
.ansible_vault
|
||||
|
||||
# Vagrant
|
||||
.vagrant
|
||||
|
||||
# Debug files
|
||||
ansible-vars.all
|
||||
res.json
|
||||
|
@ -1 +0,0 @@
|
||||
1556550033
|
@ -1 +0,0 @@
|
||||
1001
|
@ -1 +0,0 @@
|
||||
96b95408-ffc7-44c3-919f-402645785ccd
|
@ -1 +0,0 @@
|
||||
3d922cc5783b466ab28a7ae61cc46639
|
@ -1 +0,0 @@
|
||||
/home/jamestombleson/Documents/github/ansible
|
@ -1,9 +0,0 @@
|
||||
# This file loads the proper rgloader/loader.rb file that comes packaged
|
||||
# with Vagrant so that encoded files can properly run with Vagrant.
|
||||
|
||||
if ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"]
|
||||
require File.expand_path(
|
||||
"rgloader/loader", ENV["VAGRANT_INSTALLER_EMBEDDED_DIR"])
|
||||
else
|
||||
raise "Encoded files can't be read outside of the Vagrant installer."
|
||||
end
|
9
.vscode/settings.json
vendored
Normal file
9
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
{
|
||||
"cSpell.ignoreWords": [
|
||||
"ansible",
|
||||
"okta",
|
||||
"specialcounsel",
|
||||
"vault"
|
||||
],
|
||||
"python.pythonPath": "/System/Library/Frameworks/Python.framework/Versions/2.7/Resources/Python.app/Contents/MacOS/Python"
|
||||
}
|
12
.vscode/tasks.json
vendored
Normal file
12
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "echo",
|
||||
"type": "shell",
|
||||
"command": "echo Hello"
|
||||
}
|
||||
]
|
||||
}
|
34
README.md
34
README.md
@ -2,6 +2,13 @@
|
||||
|
||||
This repo contains my configuration and setup for my ansible use. Use at your own risk.
|
||||
|
||||
1. Install Ansible on development device
|
||||
2. Clone this repository to work in
|
||||
3. Configure devices to remote into
|
||||
4. Configure Ansible's Inventory files
|
||||
5. Set Vault Secret
|
||||
6. Run Connection tests
|
||||
|
||||
## Installers
|
||||
|
||||
### New hosts
|
||||
@ -25,11 +32,34 @@ powershell.exe -ExecutionPolicy ByPass -File $file
|
||||
|
||||
## Configuration
|
||||
|
||||
I have a basic configuration file in place at the root of this folder for anisble to find.
|
||||
I have a basic configuration file in place at the root of this folder for anisble to find. If you work out of this directory the configuration file will take effect that is loaded.
|
||||
|
||||
## Inventory
|
||||
|
||||
I have a template file in place that should only be used as a refrence. Make a copy of that file and name it hosts then update that file.
|
||||
Inventory files have been moved over to .yml format. The ansibe.cfg is looking for ./dev.yml as it's default inventory file. For Prod use make another file that will contain all servers that will be managed.
|
||||
|
||||
I have a template file in place that should only be used as a reference. Make a copy of that file and name it hosts then update that file.
|
||||
|
||||
The configuration file that is active is looking for a directory that contains all of the inventory files. This way all files can be parted out rather then one big file.
|
||||
|
||||
## Vault Secret
|
||||
|
||||
The configuration file is looking for ./.ansible_vault file to contain the secret for vault entries. Git is already configured to ignore this file. You will need to make this file and place your key in it so ansible
|
||||
can decrypt vaults as needed.
|
||||
|
||||
Run the following command and replace secret with your password. Once that is done move on to generating the encrypted strings.
|
||||
|
||||
```shell
|
||||
echo 'secret' > ./.ansible_vault
|
||||
```
|
||||
|
||||
To generate secure strings for the inventory file run the following command.
|
||||
|
||||
```shell
|
||||
ansible-vault encrypt_string 'secret'
|
||||
```
|
||||
|
||||
This will output the value that needs to be placed
|
||||
|
||||
### Testing Linux devices
|
||||
|
||||
|
20
ansible.cfg
20
ansible.cfg
@ -11,8 +11,11 @@
|
||||
|
||||
# some basic default values...
|
||||
|
||||
inventory = ~/.ansible/hosts
|
||||
library = /usr/share/my_modules/
|
||||
inventory = ./inventory/
|
||||
#library = /usr/share/my_modules/:./modules/
|
||||
|
||||
# Looks like modules can only be pointed to a single directory
|
||||
library = ./modules/
|
||||
module_utils = /usr/share/my_module_utils/
|
||||
remote_tmp = ~/.ansible/tmp
|
||||
local_tmp = ~/.ansible/tmp
|
||||
@ -64,8 +67,11 @@ local_tmp = ~/.ansible/tmp
|
||||
# ansible_facts.
|
||||
# inject_facts_as_vars = True
|
||||
|
||||
# Paths to search for collections, colon separated
|
||||
collections_paths = ./collections/:~/.ansible/collections:/usr/share/ansible/collections
|
||||
|
||||
# additional paths to search for roles in, colon separated
|
||||
roles_path = ./roles/:/etc/ansible/roles
|
||||
roles_path = ./roles/:~/Documents/Github/ansible-project/:/etc/ansible/roles
|
||||
|
||||
# uncomment this to disable SSH key host checking
|
||||
#host_key_checking = False
|
||||
@ -108,7 +114,7 @@ roles_path = ./roles/:/etc/ansible/roles
|
||||
|
||||
# logging is off by default unless this path is defined
|
||||
# if so defined, consider logrotate
|
||||
#log_path = /var/log/ansible.log
|
||||
#log_path = ./ansible.log
|
||||
|
||||
# default module name for /usr/bin/ansible
|
||||
#module_name = command
|
||||
@ -137,7 +143,7 @@ roles_path = ./roles/:/etc/ansible/roles
|
||||
|
||||
# If set, configures the path to the Vault password file as an alternative to
|
||||
# specifying --vault-password-file on the command line.
|
||||
#vault_password_file = /path/to/vault_password_file
|
||||
vault_password_file = ./.ansible_vault
|
||||
|
||||
# format of string {{ ansible_managed }} available within Jinja2
|
||||
# templates indicates to users editing templates files will be replaced.
|
||||
@ -195,7 +201,7 @@ roles_path = ./roles/:/etc/ansible/roles
|
||||
#callback_plugins = /usr/share/ansible/plugins/callback
|
||||
#connection_plugins = /usr/share/ansible/plugins/connection
|
||||
#lookup_plugins = /usr/share/ansible/plugins/lookup
|
||||
#inventory_plugins = /usr/share/ansible/plugins/inventory
|
||||
#inventory_plugins = ./plugins/inventory:/usr/share/ansible/plugins/inventory
|
||||
#vars_plugins = /usr/share/ansible/plugins/vars
|
||||
#filter_plugins = /usr/share/ansible/plugins/filter
|
||||
#test_plugins = /usr/share/ansible/plugins/test
|
||||
@ -255,7 +261,7 @@ roles_path = ./roles/:/etc/ansible/roles
|
||||
# You can enable this feature by setting retry_files_enabled to True
|
||||
# and you can change the location of the files by setting retry_files_save_path
|
||||
|
||||
#retry_files_enabled = False
|
||||
retry_files_enabled = False
|
||||
#retry_files_save_path = ~/.ansible-retry
|
||||
|
||||
# squash actions
|
||||
|
92
ci/ansible-job-common/docker/jenkinsfile
Normal file
92
ci/ansible-job-common/docker/jenkinsfile
Normal file
@ -0,0 +1,92 @@
|
||||
pipeline {
|
||||
//agent any
|
||||
agent {
|
||||
docker {
|
||||
image 'jtom38/ansible:2.10.3'
|
||||
args '-u 0:0'
|
||||
}
|
||||
}
|
||||
triggers {
|
||||
cron('H 2 * * 7')
|
||||
}
|
||||
environment {
|
||||
GIT_BRANCH='dev'
|
||||
GIT_URL='https://github.com/jtom38/ansible.git'
|
||||
ANSIBLE_VAULT_FILE='./.ansible_vault'
|
||||
|
||||
// This will allow us to not pass
|
||||
ANSIBLE_HOST_KEY_CHECKING='False'
|
||||
|
||||
DISCORD_HOOK=credentials('discord-mm-hook')
|
||||
DISCORD_JOB_NAME='ansible-job-common'
|
||||
DISCORD_FOOTER='Jenkins Automation'
|
||||
}
|
||||
stages {
|
||||
stage('Checkout-Code') {
|
||||
steps {
|
||||
sh ''' #!/bin/bash
|
||||
#echo "Cleaning up old builds"
|
||||
#rm ./gitAnsible -f -r
|
||||
|
||||
git clone ${GIT_URL} gitAnsible || echo "skip checkout"
|
||||
cd ./gitAnsible
|
||||
git checkout ${GIT_BRANCH}
|
||||
git pull
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Set-Secret') {
|
||||
steps {
|
||||
withCredentials([file(credentialsId: 'ansible-vault-file', variable: 'FILE')]) {
|
||||
sh '''#!/bin/bash
|
||||
echo "Set Vault File"
|
||||
cp $FILE ./gitAnsible/.ansible_vault
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Get-Galaxy-Requirements'){
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-galaxy install -r requirements.yml
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-Linux-Common') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/linux/common.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-Docker-Common') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/common.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-MediaServer-Common') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/mediaserver/common.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh 'rm -f ./gitAnsible/.ansible_vault'
|
||||
discordSend description: "Job Status is "+currentBuild.currentResult+"!", footer: env.DISCORD_FOOTER, link: env.BUILD_URL, result: currentBuild.currentResult, title: env.DISCORD_JOB_NAME, webhookURL: env.DISCORD_HOOK
|
||||
}
|
||||
/*
|
||||
success {}
|
||||
failure {}
|
||||
*/
|
||||
}
|
||||
}
|
71
ci/ansible-job-common/k8s/jenkinsfile
Normal file
71
ci/ansible-job-common/k8s/jenkinsfile
Normal file
@ -0,0 +1,71 @@
|
||||
pipeline {
|
||||
environment {
|
||||
GIT_URL='https://github.com/jtom38/docker-ansible.git'
|
||||
GIT_PROJECT='docker-ansible'
|
||||
GIT_BRANCH='main'
|
||||
DOCKER_USER='jtom38'
|
||||
DOCKER_IMAGE='ansible'
|
||||
DOCKER_TAG_1='2.9.11'
|
||||
DOCKER_TAG_2='2.10.3'
|
||||
DOCKER_REPO_LOCAL='192.168.1.221:30002'
|
||||
GITHUB_SITE='docker.pkg.github.com'
|
||||
TF_VER='0.13.5'
|
||||
}
|
||||
agent {
|
||||
kubernetes{
|
||||
//defaultContainer 'docker'
|
||||
yaml """
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
spec:
|
||||
containers:
|
||||
- name: docker
|
||||
image: docker
|
||||
command: ['cat']
|
||||
tty: true
|
||||
volumeMounts:
|
||||
- name: dockersock
|
||||
mountPath: /var/run/docker.sock
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
stages {
|
||||
stage('Pull-Source') {
|
||||
steps {
|
||||
sh "git clone ${GIT_URL}"
|
||||
//sh 'git checkout ${GIT_BRANCH}'
|
||||
}
|
||||
}
|
||||
stage('Build-Image-2.9') {
|
||||
steps {
|
||||
container('docker') {
|
||||
sh 'docker build -t ${DOCKER_USER}/${DOCKER_IMAGE}:${DOCKER_TAG_1} ${GIT_PROJECT}/ --build-arg ANSIBLE_VER=${DOCKER_TAG_1} --build-arg TF_VER=${TF_VER}'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Build-Image-2.10') {
|
||||
steps {
|
||||
container('docker'){
|
||||
sh 'docker build -t ${DOCKER_USER}/${DOCKER_IMAGE}:${DOCKER_TAG_2} ${GIT_PROJECT}/ --build-arg ANSIBLE_VER=${DOCKER_TAG_2} --build-arg TF_VER=${TF_VER}'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
post {
|
||||
always {
|
||||
container('docker') {
|
||||
sh 'docker rmi ${DOCKER_USER}/${DOCKER_IMAGE}:${DOCKER_TAG_1}'
|
||||
sh 'docker rmi ${DOCKER_USER}/${DOCKER_IMAGE}:${DOCKER_TAG_2}'
|
||||
//catch ( echo 'Did not find ${DOCKER_USER}/${DOCKER_IMAGE}:${DOCKER_TAG_1} to remove.' )
|
||||
//sh 'docker rmi ${DOCKER_REPO_LOCAL}/${DOCKER_IMAGE}:${DOCKER_TAG_1}'
|
||||
//catch ( )
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
78
ci/ansible-job-mediaback-backup/jenkinsfile
Normal file
78
ci/ansible-job-mediaback-backup/jenkinsfile
Normal file
@ -0,0 +1,78 @@
|
||||
pipeline {
|
||||
//agent any
|
||||
agent {
|
||||
docker {
|
||||
image 'jtom38/ansible:2.10.3'
|
||||
args '-u 0:0'
|
||||
}
|
||||
}
|
||||
triggers {
|
||||
cron('H 2 * * 6')
|
||||
}
|
||||
environment {
|
||||
GIT_BRANCH='dev'
|
||||
GIT_URL='https://github.com/jtom38/ansible.git'
|
||||
ANSIBLE_VAULT_FILE='./.ansible_vault'
|
||||
ANSIBLE_HOST_KEY_CHECKING='False'
|
||||
DISCORD_HOOK=credentials('discord-mm-hook')
|
||||
DISCORD_JOB_NAME='ansible-job-mediaback-backup'
|
||||
DISCORD_FOOTER='Jenkins Automation'
|
||||
}
|
||||
stages {
|
||||
stage('Checkout-Code') {
|
||||
steps {
|
||||
sh ''' #!/bin/bash
|
||||
git clone ${GIT_URL} gitAnsible || echo "skip checkout"
|
||||
cd ./gitAnsible
|
||||
git checkout ${GIT_BRANCH}
|
||||
git pull
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Set-Secret') {
|
||||
steps {
|
||||
withCredentials([file(credentialsId: 'ansible-vault-file', variable: 'FILE')]) {
|
||||
sh '''#!/bin/bash
|
||||
echo "Set Vault File"
|
||||
cp $FILE ./gitAnsible/.ansible_vault
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Get-Galaxy-Requirements'){
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-galaxy install -r requirements.yml
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-MediaBack-Backup') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/mediaserver/back/backup.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-MediaBack-Deploy') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/mediaserver/back/deploy.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh 'rm -f ./gitAnsible/.ansible_vault'
|
||||
discordSend description: "Job Status is "+currentBuild.currentResult+"!", footer: env.DISCORD_FOOTER, link: env.BUILD_URL, result: currentBuild.currentResult, title: env.DISCORD_JOB_NAME, webhookURL: env.DISCORD_HOOK
|
||||
}
|
||||
/*
|
||||
success {}
|
||||
failure {}
|
||||
*/
|
||||
}
|
||||
}
|
78
ci/ansible-job-newsbot-backup/jenkinsfile
Normal file
78
ci/ansible-job-newsbot-backup/jenkinsfile
Normal file
@ -0,0 +1,78 @@
|
||||
pipeline {
|
||||
//agent any
|
||||
agent {
|
||||
docker {
|
||||
image 'jtom38/ansible:2.10.3'
|
||||
args '-u 0:0'
|
||||
}
|
||||
}
|
||||
triggers {
|
||||
cron('H 2 * * 6')
|
||||
}
|
||||
environment {
|
||||
GIT_BRANCH='dev'
|
||||
GIT_URL='https://github.com/jtom38/ansible.git'
|
||||
ANSIBLE_VAULT_FILE='./.ansible_vault'
|
||||
ANSIBLE_HOST_KEY_CHECKING='False'
|
||||
DISCORD_HOOK=credentials('discord-mm-hook')
|
||||
DISCORD_JOB_NAME='ansible-job-newsbot-backup'
|
||||
DISCORD_FOOTER='Jenkins Automation'
|
||||
}
|
||||
stages {
|
||||
stage('Checkout-Code') {
|
||||
steps {
|
||||
sh ''' #!/bin/bash
|
||||
git clone ${GIT_URL} gitAnsible || echo "skip checkout"
|
||||
cd ./gitAnsible
|
||||
git checkout ${GIT_BRANCH}
|
||||
git pull
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Set-Secret') {
|
||||
steps {
|
||||
withCredentials([file(credentialsId: 'ansible-vault-file', variable: 'FILE')]) {
|
||||
sh '''#!/bin/bash
|
||||
echo "Set Vault File"
|
||||
cp $FILE ./gitAnsible/.ansible_vault
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Get-Galaxy-Requirements'){
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-galaxy install -r requirements.yml
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-MediaBack-Backup') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/newsbot/backup.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-MediaBack-Deploy') {
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
cd ./gitAnsible
|
||||
ansible-playbook ./playbook/docker/newsbot/deploy.yml -i ./inventory
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
post {
|
||||
always {
|
||||
sh 'rm -f ./gitAnsible/.ansible_vault'
|
||||
discordSend description: "Job Status is "+currentBuild.currentResult+"!", footer: env.DISCORD_FOOTER, link: env.BUILD_URL, result: currentBuild.currentResult, title: env.DISCORD_JOB_NAME, webhookURL: env.DISCORD_HOOK
|
||||
}
|
||||
/*
|
||||
success {}
|
||||
failure {}
|
||||
*/
|
||||
}
|
||||
}
|
2
docs/playbook.md
Normal file
2
docs/playbook.md
Normal file
@ -0,0 +1,2 @@
|
||||
# Playbooks
|
||||
|
40
docs/readme.md
Normal file
40
docs/readme.md
Normal file
@ -0,0 +1,40 @@
|
||||
# Ansible Docs
|
||||
|
||||
This is the location for my notes with ansible. Hopefully the things I work though will help someone else with learning how ansible functions.
|
||||
|
||||
## Playbooks
|
||||
|
||||
Playbooks should be viewed like the configuration file for you process. This where is where all of your variables and basic tests should be done. Do not treat roles like playbooks! More on that later.
|
||||
|
||||
### Documentation
|
||||
|
||||
YAML allows us to use `#` as a comment value, make use of it. You can write notes for your team about a task with comments. Use them, it wont hurt.
|
||||
|
||||
When you are writing a playbook and you need a quick refresher on what the syntax that a module supports use `ansible-doc moduleName` command. Example: `ansible-doc pip`
|
||||
|
||||
This will give you a quick way to see what flags are supported without having to go to the browser to find out.
|
||||
|
||||
## Vault
|
||||
|
||||
Vaults are a great way to store secrets in your source code. Never store insecure secrets in a file other then for quick testing. Even then, don't put un-encrypted secrets in public locations.
|
||||
|
||||
### Config changes
|
||||
|
||||
Before you use ansible-value you will want to update your ansible.cfg file. Uncomment ```#vault_password_file``` and update it to where you will store your secret file. This is a file that should be added to ```.gitignore``` so that the password is stored safely. For reference I use .ansible_vault as my file and you can see my .gitignore file to see how I ignore it.
|
||||
|
||||
### How to use Vault
|
||||
|
||||
Make sure you adjusted your ansible.cfg before doing this. That password is how vault decrypts values.
|
||||
|
||||
```bash
|
||||
echo 'secret' > .ansible_vault
|
||||
ansible-value encrypt_string 'sshPassword'
|
||||
```
|
||||
|
||||
With the value that was exported you would add that to the playbook that needs to be able to decrypt the secret to use it.
|
||||
Something to note. When the password that is stored in .ansible_vault that is defined in ansible.cfg changes, the vault might start to fail to decrypt strings. I have not made it that far yet with vault to confirm how much this is true.
|
||||
|
||||
## Roles
|
||||
|
||||
Roles are very important when it comes to Ansible. If you need to define how say pip handles actions you would build a role for it. With that role you can define how pip would work. Do not treat roles as your playbook. They are meant to be used as a guide and the playbook passes variables to the role to tell it how something should be configured.
|
||||
|
28
examples/hosts.yml
Normal file
28
examples/hosts.yml
Normal file
@ -0,0 +1,28 @@
|
||||
---
|
||||
all:
|
||||
hosts:
|
||||
dmi-dev01:
|
||||
children:
|
||||
linux:
|
||||
hosts:
|
||||
172.20.0.142:
|
||||
vars:
|
||||
ansible_user:
|
||||
ansible_connection: ssh
|
||||
ansible_password:
|
||||
ansible_become_method: sudo
|
||||
ansible_become_pass:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
windows:
|
||||
hosts:
|
||||
dev01:
|
||||
vars:
|
||||
ansible_user:
|
||||
ansible_password:
|
||||
ansible_connection: winrm
|
||||
ansible_port: 5985
|
||||
ansible_winrm_scheme: http
|
||||
ansible_winrm_transport: ntlm
|
||||
ansible_winrm_server_cert_validation: ignore
|
||||
|
||||
|
@ -1,21 +0,0 @@
|
||||
[linux]
|
||||
192.168.0.60
|
||||
|
||||
[linux:vars]
|
||||
ansible_user=ansible
|
||||
ansible_connection=ssh
|
||||
ansible_password=
|
||||
ansible_become_method=sudo
|
||||
ansible_become_pass=
|
||||
#ansible_python_interpreter=/usr/bin/python3
|
||||
|
||||
[windows]
|
||||
192.168.0.2
|
||||
|
||||
[windows:vars]
|
||||
ansible_user=ansible
|
||||
ansible_password=
|
||||
ansible_connection=winrm
|
||||
ansible_port=5986
|
||||
ansible_winrm_scheme=https
|
||||
ansible_winrm_server_cert_validation=ignore
|
3
inventory/group_vars/ceph-primary.yml
Normal file
3
inventory/group_vars/ceph-primary.yml
Normal file
@ -0,0 +1,3 @@
|
||||
|
||||
ceph_primary: true
|
||||
|
8
inventory/group_vars/duckdns.yml
Normal file
8
inventory/group_vars/duckdns.yml
Normal file
@ -0,0 +1,8 @@
|
||||
duckdns_token: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
37306161633536613837613762313935343430386136363432363662656232353630616533383961
|
||||
3062333265616463336631313938343731326133306263330a313261376266373663633432393238
|
||||
65336430396335643135373233376336303162316335333361643264653761346238396534313162
|
||||
3262386535373266300a373662383064363364393461643739306631613430366264386335336261
|
||||
32376332376537363637316630313065333763616637616162383764613165336363316231613566
|
||||
3466303038646163633631633632323537393536666131356162
|
22
inventory/group_vars/kube-fs.yml
Normal file
22
inventory/group_vars/kube-fs.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
# Inventory vars for the 'kube-fs' host
|
||||
monit_filesystems:
|
||||
- name: root
|
||||
path: /
|
||||
when:
|
||||
- usage: '> 80%'
|
||||
tries: 1
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- name: 'mnt/data'
|
||||
path: '/mnt/data'
|
||||
when:
|
||||
- usage: '> 80%'
|
||||
tries: 1
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
|
142
inventory/group_vars/kube-master.yml
Normal file
142
inventory/group_vars/kube-master.yml
Normal file
@ -0,0 +1,142 @@
|
||||
---
|
||||
# Inventory vars for the 'kube-master' host
|
||||
kubernetes_role: master
|
||||
|
||||
|
||||
|
||||
monit_hosts:
|
||||
- name: jenkins
|
||||
group: kube
|
||||
address: 192.168.1.247
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: ''
|
||||
password: ''
|
||||
port: 80
|
||||
protocol: http
|
||||
request: '/login'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
|
||||
- name: pihole
|
||||
group: kube
|
||||
address: 192.168.1.248
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: ''
|
||||
password: ''
|
||||
port: 80
|
||||
protocol: http
|
||||
request: '/'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
- name: nextcloud
|
||||
group: kube
|
||||
address: 192.168.1.249
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: ''
|
||||
password: ''
|
||||
port: 80
|
||||
protocol: http
|
||||
request: '/'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
|
||||
- name: search
|
||||
group: kube
|
||||
address: 192.168.1.251
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
protocol: http
|
||||
username: ''
|
||||
password: ''
|
||||
port: 80
|
||||
request: '/'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
- name: get
|
||||
group: kube
|
||||
address: 192.168.1.252
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
63653338356435333664323436633063663132623530356162653130313435363761613633623266
|
||||
3237623031353935626131346461303034373433366136640a323436613831646432356566626564
|
||||
31653733346164383363373238343534613662613636346334646539636134386365656334333638
|
||||
3037626533363965630a373537363563373566613237663635363132353563656262363939316635
|
||||
3565
|
||||
password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32383461323230323435386635316166353461316237356138666335363734333338353131303536
|
||||
3032383231323461336565303231316338666436313361630a343332383163333932363734653734
|
||||
62653266623764333335663335623162616235323232653936663166393436633734303363373662
|
||||
6330363538616166320a353063653863613862373834303331666138333836313530313132613962
|
||||
3034
|
||||
port: 80
|
||||
protocol: http
|
||||
request: '/'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
- name: son
|
||||
group: kube
|
||||
address: 192.168.1.253
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: ''
|
||||
password: ''
|
||||
port: 80
|
||||
protocol: http
|
||||
request: '/'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
|
||||
- name: registry
|
||||
group: kube
|
||||
address: 192.168.1.250
|
||||
when:
|
||||
- http:
|
||||
enabled: true
|
||||
username: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
63653338356435333664323436633063663132623530356162653130313435363761613633623266
|
||||
3237623031353935626131346461303034373433366136640a323436613831646432356566626564
|
||||
31653733346164383363373238343534613662613636346334646539636134386365656334333638
|
||||
3037626533363965630a373537363563373566613237663635363132353563656262363939316635
|
||||
3565
|
||||
password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32383461323230323435386635316166353461316237356138666335363734333338353131303536
|
||||
3032383231323461336565303231316338666436313361630a343332383163333932363734653734
|
||||
62653266623764333335663335623162616235323232653936663166393436633734303363373662
|
||||
6330363538616166320a353063653863613862373834303331666138333836313530313132613962
|
||||
3034'
|
||||
port: 443
|
||||
protocol: https
|
||||
request: '/v2'
|
||||
then:
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
restart: false
|
||||
|
||||
|
||||
|
1
inventory/group_vars/kube-node.yml
Normal file
1
inventory/group_vars/kube-node.yml
Normal file
@ -0,0 +1 @@
|
||||
kubernetes_role: "node"
|
56
inventory/group_vars/kube.yml
Normal file
56
inventory/group_vars/kube.yml
Normal file
@ -0,0 +1,56 @@
|
||||
|
||||
kube_fs_ip: 192.168.1.222
|
||||
kube_fs_mount: /mnt/data
|
||||
|
||||
monit_processes:
|
||||
- name: ssh
|
||||
pidfile: '/var/run/sshd.pid'
|
||||
matching: ''
|
||||
start: '/bin/systemctl start ssh'
|
||||
stop: '/bin/systemctl stop ssh'
|
||||
timeout: '30 seconds'
|
||||
when:
|
||||
- type: 'totalmem'
|
||||
usage: '> 80%'
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- name: 'kubelet'
|
||||
pidfile: ''
|
||||
matching: 'kubelet'
|
||||
start: '/bin/systemctl start kubelet'
|
||||
stop: '/bin/systemctl stop kubelet'
|
||||
timeout: '30 seconds'
|
||||
when:
|
||||
- type: 'totalmem'
|
||||
usage: '> 80%'
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- name: docker
|
||||
pidfile: '/var/run/docker.pid'
|
||||
matching: ''
|
||||
start: '/bin/systemctl start docker'
|
||||
stop: '/bin/systemctl stop docker'
|
||||
timeout: '30 seconds'
|
||||
when:
|
||||
- type: 'totalmem'
|
||||
usage: '> 80%'
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- name: containerd
|
||||
pidfile: ''
|
||||
matching: containerd
|
||||
start: '/bin/systemctl start containerd'
|
||||
stop: '/bin/systemctl stop containerd'
|
||||
timeout: '30 seconds'
|
||||
when:
|
||||
- type: 'totalmem'
|
||||
usage: '> 80%'
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
199
inventory/group_vars/linux-all.yml
Normal file
199
inventory/group_vars/linux-all.yml
Normal file
@ -0,0 +1,199 @@
|
||||
---
|
||||
|
||||
# Vars file for linux group
|
||||
ansible_user: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
32323438633437386234366165646365303038656639396632313933396431376136343837393738
|
||||
6131653037623836383032613766653233656338303566330a653938333062363432643365316133
|
||||
61626164383063636362343362663133653964646139386635626365373564306238306566633930
|
||||
3139363666373864620a656336653633376539616337303361333936313462623963643861646166
|
||||
3364
|
||||
ansible_password: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
63363131623134643365366432393962613032383931613663353233356334316536326334333739
|
||||
3130663431363561373437353262313430623131363864350a393064636161613232633036303139
|
||||
65643166363565343562663937343866623035356639333635636432333363653463666433303035
|
||||
6134646432353330630a343839643163323733623265356261306661396332326465656561633734
|
||||
6231
|
||||
ansible_connection: ssh
|
||||
ansible_become_method: sudo
|
||||
ansible_become_pass: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
63363131623134643365366432393962613032383931613663353233356334316536326334333739
|
||||
3130663431363561373437353262313430623131363864350a393064636161613232633036303139
|
||||
65643166363565343562663937343866623035356639333635636432333363653463666433303035
|
||||
6134646432353330630a343839643163323733623265356261306661396332326465656561633734
|
||||
6231
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
ansible_ssh_key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
34393539356664633665366432336234313864616432636133613432393565343436326338613161
|
||||
6435306431303866383037383434333138333534623262320a383539363161313532626362666434
|
||||
65396432373635646666323834303530616439376565663235643664336665343133336230396334
|
||||
6164303235383565370a363961366162346464633132353061346538373034343835613561333533
|
||||
63343933303936326533386563613430383832656366653239636463663032333762306461363238
|
||||
37623734363130356235373330643763333635346136613331643030666434356131346566343064
|
||||
39306534646361376333643834393162323839343762333564343465343136643464356531353732
|
||||
30336662656665626561306633343533346465353065333937663039363666323130613936656332
|
||||
66336566653239323235363336376564366365363735663136366635396338336432656634633866
|
||||
33393936373539626661353461313238646430633139346434343936373137653836616438396261
|
||||
62643739666463653832373065373064333939366636663735363930613436356631313531303232
|
||||
37343832663166663733306331303034333530633362363433303062643362333532626638663464
|
||||
32393661376238376562376232643862363733343865313330616538363166343062303332616538
|
||||
64646538646538356561323131666233633737393438633937623237323562356232303431313865
|
||||
34396531633835323965643664623830663039356438373563616463626430333430626132313531
|
||||
37336536306638356532663436363730663662363064366332383534383866363532383633336663
|
||||
34303566303761616630383537373566393834306164616134626135393434626637633666396234
|
||||
66326233633061393162343638373130356562656432343734633539336131613934643830633837
|
||||
35346238316131333537343066343033613565373762363463366539383234326332333735353330
|
||||
65373462633262326331363563356537353038306132636132366530646430363864656333656362
|
||||
64643562623630316135316232336434666237643834326630303439336132636131383039353161
|
||||
62306637303866333436613539393664353835393637353061356633613263633335623138663361
|
||||
33366462303735623666663232393165316235313064663830663732643834346135343938666463
|
||||
66353037353031613063353263376162353562663062653733636635306465633965626439613965
|
||||
32666536626133376166636330323366393064646137333134343530316565626366623137633766
|
||||
64396138316436646639393336643535626162343930633036663838613438353761373132626531
|
||||
30633833343831346538383039376661313866643065363762353238326335393934353839656132
|
||||
34646434353465653837636335373930373763363764623161333930366431633333306434393539
|
||||
3463
|
||||
|
||||
# Webhook info for sending messages to servers
|
||||
discord_corgi_hook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61336238643261633430303664386565333131313636663235663733656538633165623438333030
|
||||
3739316137393937646162313266636563656132653866620a653465643138323363376263383931
|
||||
62666461333464393165383466333764303235613562313631353239306664373761393663656533
|
||||
3166663432313933320a333332383737616337336562623435623131653935353165316266356133
|
||||
33343166616161343734376465666563313932333864343230623339326562653435323862623031
|
||||
38376666326536393034306161636563633061383239323330326232326165383538323266323736
|
||||
65313736656530653261646562646161643062616533363633633935653566373362366539623030
|
||||
30313331323661616438303031633666343231353837383561613666653937616265613533306436
|
||||
65343435363937393732316333633961376635343332316132396238383435653364616138386438
|
||||
6634346462383838636264656436326361316535393139636436
|
||||
discord_test_hook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61333433653661666235306562643161626638313734383966326536313539376362643636636135
|
||||
6631396661666430326365393136363161366661386261350a653566663034303065336433623433
|
||||
30326637343438646531313461323435316164623264363330326538613431366633346238343561
|
||||
3931613531373935390a323961386639623033393233363839383366303963363333623732383432
|
||||
34353334373264643136396633393133323764656233393037366132303866383537316666636336
|
||||
35636530643532633930393262336266346561336662633765303861363763313866376238616631
|
||||
30646137303933653932613834353337386338666535313966343963346363323534633262363064
|
||||
37363833653835623730346638393136343039343730653231626438376638666139386635323538
|
||||
65303666333566323538373335363565646134643034373039326164633736373036333634363135
|
||||
3235656231373565656335646461663838613738326362663535
|
||||
|
||||
discord_mm_hook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
33636534323837333466613165383738343332636530643864623464353461666164376231393839
|
||||
6431363038633835383330303230363034363562626439300a376563373061636562353339346165
|
||||
36303364616564306361663731373062653432613732616538323361626431343965663536383332
|
||||
6337326631366239330a373232663265306530303166393634643430373438656236366262353962
|
||||
35323739336561313434333066393731326639636239373935383663386230373662376564663630
|
||||
36373239386335643061306564343838306663306362326631393765623335316438363762393931
|
||||
63383965363634626662663238383965643036303438326230623635366363643661393039316430
|
||||
62356465356433643639326265646237653231376466346664633862353563376266303238663766
|
||||
32613665626238363338343131623666306431313961653937333436343136633232346332393566
|
||||
3537666436643536373361393932353430636337386162623735
|
||||
|
||||
discord_mmt_hook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
31366161353962386638363763353665353934346338343539323465643131333263333463643137
|
||||
3334353838303063393133666539376431646330663339380a353139343630396237643463653761
|
||||
66613232633238366466323464363866346261643730643634396665323036373864363239386563
|
||||
3536333261626662330a326666306235623230613334356563373535353431626538633937366530
|
||||
38356533616265353432653034666537323638643664636630626534653065333564613337326531
|
||||
32343530376465386630323366383165306334643136363637313538663165393435623363633564
|
||||
35343061366332353035643338363161306534306264633931356265353362343536656333366365
|
||||
62656330653136663434313364643331333164383063306566663030363439386431633531303764
|
||||
35633437326538353230646366323163366663663364646464626336636433386664393937366639
|
||||
3762303365626430653532653633633663653265666139313638
|
||||
|
||||
# Used with geerlingguy.ntp
|
||||
ntp_enabled: true
|
||||
ntp_timezone: 'America/Los_Angeles'
|
||||
|
||||
zsh_backup: false
|
||||
zsh_ohmyzsh_install: /home/miharu
|
||||
zsh_plugins: git ansible dotenv
|
||||
|
||||
# Used with
|
||||
unattended_automatic_reboot: true
|
||||
|
||||
# Monit
|
||||
|
||||
|
||||
|
||||
monit_alert_discord:
|
||||
deploy: true
|
||||
webhook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
36326161303166663837313931653436636531373434353964663766363566336231653063303831
|
||||
6239666434326439333362313961663638303866653735640a626363363031646236613039353263
|
||||
37343331353138303562653237636638623965656133353731383265343164393037323363643666
|
||||
3863643462376538630a613430323030656530386638643537643430343339666561373863656539
|
||||
35663934623331613332343538326334633361333566623466646235396134386237306536646238
|
||||
65653634343537616534303237663763653065333333663266306237363561626132343638613363
|
||||
31636133626635663666386663363332653465336265656433353332643638396235343934646432
|
||||
34333839666637613234666562633130343536663534396433393164306135376435363434356565
|
||||
39386439613861383433656666613231653636363864646564656564613866623934653539313036
|
||||
6664326337363335343236383362663134383464396539356263
|
||||
username: 'Monit'
|
||||
|
||||
monit_processes:
|
||||
- name: ssh
|
||||
pidfile: '/var/run/sshd.pid'
|
||||
matching: ''
|
||||
start: '/bin/systemctl start ssh'
|
||||
stop: '/bin/systemctl stop ssh'
|
||||
timeout: '30 seconds'
|
||||
when:
|
||||
- type: 'totalmem'
|
||||
usage: '> 80%'
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
|
||||
monit_system:
|
||||
hostname: "{{ ansible_hostname }}"
|
||||
when:
|
||||
- type: cpu
|
||||
usage: "usage (user) > 80%"
|
||||
cycles: 5
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
- type: cpu
|
||||
usage: "usage (system) > 30%"
|
||||
cycles: 5
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
- type: cpu
|
||||
usage: "usage (wait) > 20%"
|
||||
cycles: 5
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- type: memory
|
||||
usage: "usage > 90%"
|
||||
cycles: 5
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
- type: swap
|
||||
usage: "usage > 50%"
|
||||
cycles: 5
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
||||
monit_filesystems:
|
||||
- name: root
|
||||
path: /
|
||||
when:
|
||||
- usage: '> 80%'
|
||||
tries: 1
|
||||
cycles: 1
|
||||
alert: false
|
||||
exec: "{{ monit_discord_alert_script }}"
|
||||
|
12
inventory/group_vars/localhost.yml
Normal file
12
inventory/group_vars/localhost.yml
Normal file
@ -0,0 +1,12 @@
|
||||
discord_mm_hook: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
33636534323837333466613165383738343332636530643864623464353461666164376231393839
|
||||
6431363038633835383330303230363034363562626439300a376563373061636562353339346165
|
||||
36303364616564306361663731373062653432613732616538323361626431343965663536383332
|
||||
6337326631366239330a373232663265306530303166393634643430373438656236366262353962
|
||||
35323739336561313434333066393731326639636239373935383663386230373662376564663630
|
||||
36373239386335643061306564343838306663306362326631393765623335316438363762393931
|
||||
63383965363634626662663238383965643036303438326230623635366363643661393039316430
|
||||
62356465356433643639326265646237653231376466346664633862353563376266303238663766
|
||||
32613665626238363338343131623666306431313961653937333436343136633232346332393566
|
||||
3537666436643536373361393932353430636337386162623735
|
8
inventory/group_vars/mediaserver-back.yml
Normal file
8
inventory/group_vars/mediaserver-back.yml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
|
||||
pathDockerRoot: "/docker"
|
||||
pathConfig: "/docker/cfg"
|
||||
pathDownloads: "/docker/downloads"
|
||||
pathNfsBackup: "/docker/nfs/backup"
|
||||
pathNfsMedia: "/docker/nfs/media"
|
||||
pathNfsSync: "/docker/nfs/sync"
|
19
inventory/group_vars/mediaserver.yml
Normal file
19
inventory/group_vars/mediaserver.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
# local dir
|
||||
pathDockerRoot: "/docker"
|
||||
pathConfig: "{{ pathDockerRoot }}/cfg"
|
||||
pathNfs: "{{ pathDockerRoot }}/nfs"
|
||||
pathBackups: "/tmp/backups"
|
||||
|
||||
# nfs dir
|
||||
pathNfsBackup: "{{ pathNfs }}/backup"
|
||||
pathNfsMedia: "{{ pathNfs }}/media"
|
||||
pathNfsSync: "{{ pathNfs }}/sync"
|
||||
#pathNfsTmp: "{{ pathNfs }}/tmp"
|
||||
pathNfsTmp: "/docker/tmp/"
|
||||
pathMedia: "{{ pathDockerRoot }}/media"
|
||||
nfsAddress: 192.168.1.85
|
||||
nfsDockerConfig: "docker"
|
||||
nfsMedia: "plex"
|
||||
dockerNetwork: mediaserver
|
153
inventory/group_vars/newsbot.yml
Normal file
153
inventory/group_vars/newsbot.yml
Normal file
@ -0,0 +1,153 @@
|
||||
---
|
||||
|
||||
twitter_api_key: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61313764363239636663663835393231396638623262366231383530323634383031633935626663
|
||||
6232633432356134356334616238643865616663316631370a346536666166376533663338393037
|
||||
39386632363636343131316363303564623232623766613863396261666230643765623836313030
|
||||
3262373162363837660a376232363736643439313564636565383132323033643562363031386633
|
||||
38373561376338363062326131326265373931663633643434646531363934333430
|
||||
|
||||
twitter_api_key_secret: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35383761623635613065653933303063616166666165353838613865383434343831633636363339
|
||||
3064373731653633653963363932353232633030663133610a643634363830336666336266316133
|
||||
30343864643330323064316465313265353065633637333537626461613235356366313966623866
|
||||
3031326432333534630a396566643032376638306232303631356164663662326634313234653861
|
||||
31396435633936393461316539316431383131656566656539353463633833353465633337613737
|
||||
36376165366136636164653261633161663733623962643532316337613637623837626137656664
|
||||
643731343233353966393236393661346335
|
||||
|
||||
twitch_client_id: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39346539366562656137366431643765316334313037336265326635306266643030303335316339
|
||||
3336633332386634393063373961623465653439376166370a303764336436323062343537366464
|
||||
34313164663661303739333039346461663035376338303331326465393639356335663863323239
|
||||
6464303266613533340a643162333939313462653763313862383662616436373563343838653330
|
||||
37383166383535383639323632376135623363613737353235313237303965613437
|
||||
|
||||
twitch_client_secret: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35626166373436633664313230396131626335363539666431623538333733393334306363633236
|
||||
6461313061343463633065353139333330396466616639640a656433633936323436346433656337
|
||||
66623433653836356261643030623730386563633332626335373865333231636437376532313535
|
||||
6330616464636235340a363965373461616161616530323035613762376363616364663634303936
|
||||
65343432616264383263366566646238316335663134323664663065373366333935
|
||||
|
||||
mm_pogo_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
36373534636139616466663237373165386134353466343961303064313731316531343733623065
|
||||
3866366430376362633332353061386531626461353465320a323639636538376238383037643530
|
||||
32613532363465346630306561346432613565636263613832623136396462616663303431633932
|
||||
3938623565646631640a643466643563393333653066313230373862613839326366613066643761
|
||||
30303731386634323339623234623933333936646262383963373865633366633965633536393764
|
||||
38663032386138303862363537386365623439666532326432336565663766613066366539346561
|
||||
62316132353637373434626639353164616637646438333239616634623936303632306634343465
|
||||
63326134633636613335633436653062313566323066343736356635653934386633396466643461
|
||||
65666137323066323938663938333266366564646636623130326631363763336637326430313230
|
||||
65393939323830396232343532626431383835653034616435373961346465656531636530623635
|
||||
37373164656332653133336464653738383830306561326639373063636635663365363235343437
|
||||
64616163643763343732353930623537656663303136376565333639313362336665346633306430
|
||||
61383864323033363332343663663139636263623637326236626336356163356231643835666463
|
||||
33666165396438313837393534383331386630666130653663353533386238613336306233306537
|
||||
64623538666532623933616566623966346439383465656639386266376130323764316162306639
|
||||
65323163643333373332333065636363343761316632656363333463626137353531666532346439
|
||||
6164
|
||||
|
||||
mm_pso2_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35393664303332376363616663366438613866636166643037653439663061646361386337313462
|
||||
6636356464373361363435613933313263613630363566350a363166623933616364386566363366
|
||||
38336634396536643865666235353263623662653330323438303063616232643934386666303439
|
||||
3739356265313164300a306234346635626539303331613338353936636565383139333433636231
|
||||
32633537353630306163646664613562343264306537353934666631613964303336646232386263
|
||||
65366364333631663864306536663438626532353764336139636464376662316231323432313538
|
||||
35646538626237306233616339643733303738353433303531396166633563333162376439333264
|
||||
38343038626165303561623834313964363165646235626561623137393963363834383430386331
|
||||
33626663346466383864303037386565386638653439373262323637306562376335306637366435
|
||||
6663386531313636663932356162303962306434306136323865
|
||||
|
||||
mm_ffxiv_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65393564633762333836353430656232346566323132346335623633633637633262313534316438
|
||||
6133333037383733636261383332653864313430383539370a626163373536303832326562653666
|
||||
38353164653966633264373237636130613333383563356164303665653939333966356564643534
|
||||
3838366237383161380a663239663332653565333833373563396430613138633763633062353132
|
||||
61643630343138653135356230383932383738373734373961336636353465343364623665623031
|
||||
64316534663161623231303132393266666264396230353730373134636461656664343837383037
|
||||
34646538643432323166643532393864363266336432346462353364633432393265376433656639
|
||||
38653562653261613864313130373830396336623034333862613939336132383639653439363435
|
||||
37626561613739386263643666613964356338656438323930373564346331613138323534303434
|
||||
3235343836616230336132323661656337396238343231316463
|
||||
|
||||
mm_programmer_humor_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
39646538326463373964303866356139383565316563653334623662343730373266343264656436
|
||||
6663313664326536656562383330313733363066356338630a353833306464373133313930303933
|
||||
34636564333033333264636461326630623564643632663565623734376266613062383362323032
|
||||
3562653435626130610a393337646136666434373630313565356665613534323133363735613038
|
||||
38633033366238613833636364303033663166363138666531626439386261656566646432356636
|
||||
61376664333439363436343861346564393966343335313033373765393538393438353831633834
|
||||
65373333383465626634383832623739323538616565666665626634383962306534623035306666
|
||||
30653038393730306162356266656635326163653564663037353238313039373164616165356265
|
||||
32393262356537646633646431326465636564653639643863633561623966396534343461656533
|
||||
38653937626662633661646136346333316263653137353430363638306663343362303334393138
|
||||
32313963383937393632343037656461303961313561323636333336653339313038626338366637
|
||||
35356535643834643336303534306237643261663638333830643163353430363933616663383139
|
||||
63366630646563313737376535336566386334393732653936623534356363643933643133316561
|
||||
33383431636630643362303137633136303437616538656431383134396136636635333139333664
|
||||
31613439373263653663383361383463346663363330616337376535316331326332306330633133
|
||||
32353039343832386662666436313465626137303730383934656231383464373464626533313139
|
||||
3362
|
||||
|
||||
mm_lrr_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
65383633363536373265663466363134306663613065613061633363316465383236303634643465
|
||||
3364323031353630663338333635613435656466386264610a373162333135643335306439393365
|
||||
66383937636336316136613330343232363531656262373663663039356432316662333839326162
|
||||
6339353538383464380a643464346363656262663030643362313536613466343635323064393661
|
||||
31626264663532333738356235336539623531653063333537343065663335346261333165666265
|
||||
34303732313566613238326563653137636537333631653132643637623832393832623666616535
|
||||
62333363343930313131323663396464653665356133383737356130323630643161343265316234
|
||||
61616335343266333439323138306635383965626162643466376339386236653463623333393966
|
||||
30343739313661313638613338353639376536356564633836323937313034343735383732363863
|
||||
3231633864663530636366326130356262326335323633636432
|
||||
|
||||
mm_genshin_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35663738356138653465316333653838376638613166313237666537383162366434333535333437
|
||||
6565653834313534303863653738386265383831633561390a323865373661643530336434333235
|
||||
33373161626362643338313732643633336530643837366330626362353436353635383065303461
|
||||
3533366263633736650a363438643531653637393436623466626639306162333932303466383062
|
||||
33663038316434613635363239366336323736666563343130373261346666306334366164663138
|
||||
30333936393432373462663639633366323530323262383463333334353463633834643730396361
|
||||
30656439303466363536366136356534643936333962306333303037336435396465613562316662
|
||||
66396432373364376664346531373564333362636461303062363435616439653939363230656233
|
||||
64643438373330386335333837666163613738386538383063663165663636393234306430323431
|
||||
3666366162613137386662303961306564363264616662633136
|
||||
|
||||
mm_happyfeed_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
35356264383331666662333834313637643936653765613637613163663939636130663732653566
|
||||
6265616462306533326639633437363337613565353865300a343633343238663731373162363162
|
||||
32616238313834633661353138396562386338363433363332616165373031326132653561353531
|
||||
3130636139373264660a336333303962636436663336396339326233363332663061663964323637
|
||||
32376434356339313666396133373238373138353237656666613966613739363136386639373735
|
||||
65663462383536613437353262383566323661643530316234393139303734383234653431616362
|
||||
61356533383539333435376163666232326265366537336262376234633465663738643662353563
|
||||
35316632376165376466333666663761346638303935313531303062646336353134396334303464
|
||||
30386537346565343332663963326337333965313436316363303033643034643131343537616463
|
||||
6431623162383762353230373534663737643938656636626239
|
||||
|
||||
sin_newsbot_hooks: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66386134653336623134663031343166663365396664636563623664636365323462356562306462
|
||||
6137626235306431336439343966636365663632396562650a643134646334633663653339373730
|
||||
62383235353136613234623032663037313934313363643232333463323439633038303532333165
|
||||
3261353335653434330a303637313966613862613537656135636137663934653530323532313136
|
||||
36656465393835333264313037353766346432633535303735636461346161316433383363663438
|
||||
35373735623234396662626237663766393437333931643738393165656431636166666338633565
|
||||
32636636656264306266613539326566653930383336386431646462336632613563626531616662
|
||||
62306331303331323134323738646165613433303430323937663764336135386438313937316461
|
||||
65626234343566646435663834366633323038613332323232636235383933623432333366646137
|
||||
3133366634306166376464393738613231343166393266636237
|
94
inventory/inv.yaml
Normal file
94
inventory/inv.yaml
Normal file
@ -0,0 +1,94 @@
|
||||
|
||||
all:
|
||||
children:
|
||||
linux-all:
|
||||
hosts:
|
||||
|
||||
children:
|
||||
kube:
|
||||
kube-fs:
|
||||
docker:
|
||||
jenkins:
|
||||
ceph:
|
||||
|
||||
docker:
|
||||
hosts:
|
||||
192.168.1.243:
|
||||
192.168.1.244:
|
||||
|
||||
192.168.1.226:
|
||||
|
||||
mediaserver:
|
||||
children:
|
||||
#192.168.1.243:
|
||||
#192.168.1.244:
|
||||
mediaserver-front:
|
||||
#mediaserver-back:
|
||||
|
||||
mediaserver-back:
|
||||
hosts:
|
||||
192.168.1.244:
|
||||
mediaserver-front:
|
||||
hosts:
|
||||
192.168.1.226:
|
||||
|
||||
newsbot:
|
||||
hosts:
|
||||
192.168.1.244:
|
||||
duckdns:
|
||||
hosts:
|
||||
192.168.1.244:
|
||||
pihole:
|
||||
hosts:
|
||||
192.168.1.223:
|
||||
|
||||
jenkins:
|
||||
hosts:
|
||||
192.168.1.246:
|
||||
|
||||
ceph:
|
||||
children:
|
||||
ceph-primary:
|
||||
ceph-node:
|
||||
|
||||
ceph-primary:
|
||||
hosts:
|
||||
#fs01.k8s.home.local:
|
||||
192.168.1.222:
|
||||
vars:
|
||||
ceph_primary: true
|
||||
|
||||
ceph-node:
|
||||
hosts:
|
||||
#fs02.k8s.home.local:
|
||||
192.168.1.225:
|
||||
vars:
|
||||
ceph_primary: false
|
||||
|
||||
kube:
|
||||
children:
|
||||
kube-master:
|
||||
kube-node:
|
||||
|
||||
kube-master:
|
||||
hosts:
|
||||
# master.k8s.home.local:
|
||||
192.168.1.221: # master
|
||||
|
||||
kube-node:
|
||||
hosts:
|
||||
#node01.k8s.home.local:
|
||||
#node02.k8s.home.local:
|
||||
#node03.k8s.home.local:
|
||||
192.168.1.223: # node01
|
||||
# 192.168.1.224: # node02
|
||||
# 192.168.1.226: # node03
|
||||
# 192.168.1.225: # node04
|
||||
|
||||
kube_media_node:
|
||||
hosts:
|
||||
192.168.1.223:
|
||||
|
||||
kube-fs:
|
||||
hosts:
|
||||
fs01.k8s.home.local:
|
56
jenkinsfile
Normal file
56
jenkinsfile
Normal file
@ -0,0 +1,56 @@
|
||||
pipeline {
|
||||
//agent any
|
||||
agent {
|
||||
docker {
|
||||
image 'ansible:2.9.11'
|
||||
args '-u 0:0'
|
||||
}
|
||||
}
|
||||
environment {
|
||||
GIT_BRANCH='dev'
|
||||
GIT_URL='https://github.com/jtom38/ansible.git'
|
||||
ANSIBLE_VAULT_FILE='./.ansible_vault'
|
||||
ANSIBLE_HOST_KEY_CHECKING='False'
|
||||
}
|
||||
stages {
|
||||
stage('Checkout-Code') {
|
||||
steps {
|
||||
sh ''' #!/bin/bash
|
||||
echo "Checking where we start"
|
||||
pwd
|
||||
|
||||
echo "Checking current dir"
|
||||
ls
|
||||
|
||||
echo "Checking active user"
|
||||
whoami
|
||||
|
||||
echo "Cleaning up old builds"
|
||||
rm ./gitAnsible -f -r
|
||||
|
||||
git clone ${GIT_URL} gitAnsible
|
||||
cd ./gitAnsible
|
||||
git checkout ${GIT_BRANCH}
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Get-Galaxy-Requirements'){
|
||||
steps {
|
||||
sh '''#!/bin/bash
|
||||
pwd
|
||||
cd ./gitAnsible
|
||||
ansible-galaxy install -r requirements.yml
|
||||
'''
|
||||
}
|
||||
}
|
||||
stage('Run-Linux-Common') {
|
||||
steps {
|
||||
withCredentials([file(credentialsId: 'ansible-vault-file', variable: 'FILE')]) {
|
||||
sh '''#!/bin/bash
|
||||
ansible-playbook ./gitAnsible/playbook/linux/common.yml -i ./gitAnsible/inventory --vault-password-file $FILE
|
||||
'''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
12
makefile
Normal file
12
makefile
Normal file
@ -0,0 +1,12 @@
|
||||
.PHONY: help
|
||||
help: ## Shows this help command
|
||||
@egrep -h '\s##\s' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
build-image-2.9:
|
||||
docker build -t ansible:2.9.11 ./.devcontainer/
|
||||
|
||||
build-image-2.10:
|
||||
docker build -t ansible:2.10 ./.devcontainer/Dockerfile_210
|
||||
|
||||
install-requirements: ## Install Ansible Galaxy Requirements
|
||||
ansible-galaxy install -r requirements.yml
|
178
modules/discord.py
Normal file
178
modules/discord.py
Normal file
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, James Tombleson <luther38@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_args.urls import fetch_url
|
||||
import json
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.0',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: discord_webhook
|
||||
|
||||
short_description: This module sends messages to a discord webhook.
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
description:
|
||||
- "This is my longer description explaining my test module"
|
||||
|
||||
options:
|
||||
webhook_url:
|
||||
description:
|
||||
- This defines where ansible will send the json payload for discord to intake.
|
||||
required: true
|
||||
content:
|
||||
description:
|
||||
- This defines the message that will be presented within the payload.
|
||||
required: true
|
||||
|
||||
username:
|
||||
description:
|
||||
- This will allow you to overwrite the default webhook name.
|
||||
- Useful for when different services use the same webhook.
|
||||
required: false
|
||||
|
||||
avatar_url:
|
||||
description:
|
||||
- Add a URL here if you want to overwrite the default avatar image configured on the webhook.
|
||||
required: false
|
||||
|
||||
|
||||
author:
|
||||
- James Tombleson (github.com/luther38)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Pass in a message
|
||||
- name: Test with a message
|
||||
my_test:
|
||||
name: hello world
|
||||
|
||||
# pass in a message and have changed true
|
||||
- name: Test with a message and changed output
|
||||
my_test:
|
||||
name: hello world
|
||||
new: true
|
||||
|
||||
# fail the module
|
||||
- name: Test failure of the module
|
||||
my_test:
|
||||
name: fail me
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
original_message:
|
||||
description: The original name param that was passed in
|
||||
type: str
|
||||
returned: always
|
||||
message:
|
||||
description: The output message that the test module generates
|
||||
type: str
|
||||
returned: always
|
||||
'''
|
||||
|
||||
|
||||
def run_module():
|
||||
# define available arguments/parameters a user can pass to the module
|
||||
|
||||
|
||||
# seed the result dict in the object
|
||||
# we primarily care about changed and state
|
||||
# change is if this module effectively modified the target
|
||||
# state will include any data that you want your module to pass back
|
||||
# for consumption, for example, in a subsequent task
|
||||
result = dict(
|
||||
changed=False,
|
||||
original_message='',
|
||||
message=''
|
||||
)
|
||||
|
||||
# the AnsibleModule object will be our abstraction working with Ansible
|
||||
# this includes instantiation, a couple of common attr would be the
|
||||
# args/params passed to the execution, as well as if the module
|
||||
# supports check mode
|
||||
module = AnsibleModule(
|
||||
argument_spec=module_args,
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
# if the user is working with this module in only check mode we do not
|
||||
# want to make any changes to the environment, just return the current
|
||||
# state with no modifications
|
||||
if module.check_mode:
|
||||
module.exit_json(**result)
|
||||
|
||||
# manipulate or modify the state as needed (this is going to be the
|
||||
# part where your module will do what it needs to do)
|
||||
result['original_message'] = module.params['name']
|
||||
result['message'] = 'goodbye'
|
||||
|
||||
# use whatever logic you need to determine whether or not this module
|
||||
# made any modifications to your target
|
||||
if module.params['new']:
|
||||
result['changed'] = True
|
||||
|
||||
# during the execution of the module, if there is an exception or a
|
||||
# conditional state that effectively causes a failure, run
|
||||
# AnsibleModule.fail_json() to pass in the message and the result
|
||||
if module.params['name'] == 'fail me':
|
||||
module.fail_json(msg='You requested this to fail', **result)
|
||||
|
||||
# in the event of a successful module execution, you will want to
|
||||
# simple AnsibleModule.exit_json(), passing the key/value results
|
||||
module.exit_json(**result)
|
||||
|
||||
def basic(ansibleModule):
|
||||
|
||||
headers = '{ "Content-Type": "application/json" }'
|
||||
payload = {
|
||||
'content': ansibleModule.argument_spec['content']
|
||||
}
|
||||
resp, info = fetch_url(
|
||||
module=payload,
|
||||
url= ansibleModule.argument_spec['webhook_url'],
|
||||
headers= json.loads(headers),
|
||||
method='GET')
|
||||
|
||||
if info['status'] != 204:
|
||||
ansibleModule.fail_json(msg="Fail: ")
|
||||
|
||||
pass
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec= dict(
|
||||
webhook_url =dict(type='str', required=True),
|
||||
content =dict(type='str', required=True),
|
||||
username =dict(type='str', required=False),
|
||||
avatar_url =dict(type='str', required=False)
|
||||
),
|
||||
supports_check_mode= True
|
||||
)
|
||||
|
||||
result = dict(
|
||||
changed= False,
|
||||
original_message= '',
|
||||
message= ''
|
||||
)
|
||||
|
||||
if module.check_mode:
|
||||
return result
|
||||
|
||||
basic(module)
|
||||
|
||||
#run_module()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
317
modules/okta_groups.py
Normal file
317
modules/okta_groups.py
Normal file
@ -0,0 +1,317 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2019, Whitney Champion <whitney.ellis.champion@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.0',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: okta_groups
|
||||
short_description: Communicate with the Okta API to manage groups
|
||||
description:
|
||||
- The Okta groups module manages Okta groups
|
||||
version_added: "1.0"
|
||||
author: "Whitney Champion (@shortstack)"
|
||||
options:
|
||||
organization:
|
||||
description:
|
||||
- Okta subdomain for your organization. (i.e.
|
||||
mycompany.okta.com).
|
||||
required: false
|
||||
default: None
|
||||
api_key:
|
||||
description:
|
||||
- Okta API key.
|
||||
required: false
|
||||
default: None
|
||||
action:
|
||||
description:
|
||||
- Action to take against groups API.
|
||||
required: false
|
||||
default: list
|
||||
choices: [ create, update, delete, list, add_user, remove_user ]
|
||||
id:
|
||||
description:
|
||||
- ID of the group.
|
||||
required: false
|
||||
default: None
|
||||
name:
|
||||
description:
|
||||
- Group name.
|
||||
required: false
|
||||
default: None
|
||||
description:
|
||||
description:
|
||||
- Group description.
|
||||
required: false
|
||||
default: yes
|
||||
limit:
|
||||
description:
|
||||
- List limit.
|
||||
required: false
|
||||
default: 200
|
||||
user_id:
|
||||
description:
|
||||
- ID of user to add to group.
|
||||
required: false
|
||||
default: None
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# List groups
|
||||
- okta_groups:
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
limit: 200
|
||||
|
||||
# Create group
|
||||
- okta_groups:
|
||||
action: create
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
name: "Imaginary Creatures"
|
||||
description: "They are so majestic"
|
||||
|
||||
# Update group
|
||||
- okta_groups:
|
||||
action: update
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM457"
|
||||
name: "Imaginary Creatures"
|
||||
description: "They are so majestic and beautiful"
|
||||
|
||||
# Add user to group
|
||||
- okta_groups:
|
||||
action: add_user
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM457"
|
||||
user_id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
# Remove user from group
|
||||
- okta_groups:
|
||||
action: remove_user
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM457"
|
||||
user_id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
# Delete group
|
||||
- okta_groups:
|
||||
action: delete
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM457"
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
json:
|
||||
description: The JSON response from the Okta API
|
||||
returned: always
|
||||
type: complex
|
||||
msg:
|
||||
description: The HTTP message from the request
|
||||
returned: always
|
||||
type: str
|
||||
sample: OK (unknown bytes)
|
||||
status:
|
||||
description: The HTTP status code from the request
|
||||
returned: always
|
||||
type: int
|
||||
sample: 200
|
||||
url:
|
||||
description: The actual URL used for the request
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://www.ansible.com/
|
||||
'''
|
||||
|
||||
def create(module,base_url,api_key,name,description):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
payload = {}
|
||||
profile = {}
|
||||
|
||||
if name is not None:
|
||||
profile['name'] = name
|
||||
if description is not None:
|
||||
profile['description'] = description
|
||||
|
||||
payload['profile'] = profile
|
||||
|
||||
url = base_url
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='POST', data=module.jsonify(payload))
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def update(module,base_url,api_key,id,name,description):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
payload = {}
|
||||
profile = {}
|
||||
|
||||
if name is not None:
|
||||
profile['name'] = name
|
||||
if description is not None:
|
||||
profile['description'] = description
|
||||
|
||||
payload['profile'] = profile
|
||||
|
||||
url = base_url+"/%s" % (id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='PUT', data=module.jsonify(payload))
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def delete(module,base_url,api_key,id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s" % (id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='DELETE') # delete
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def list(module,base_url,api_key,limit):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/?limit=%s" % (limit)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='GET')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def add_user(module,base_url,api_key,id,user_id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s/users/%s" % (id,user_id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='PUT')
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def remove_user(module,base_url,api_key,id,user_id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s/users/%s" % (id,user_id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='DELETE')
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
organization = dict(type='str', default=None),
|
||||
api_key = dict(type='str', no_log=True),
|
||||
action = dict(type='str', default='list', choices=['create', 'update', 'delete', 'list', 'add_user', 'remove_user']),
|
||||
id = dict(type='str', default=None),
|
||||
user_id = dict(type='str', default=None),
|
||||
name = dict(type='str', default=None),
|
||||
description = dict(type='str', default=None),
|
||||
limit = dict(type='int', default=200)
|
||||
)
|
||||
)
|
||||
|
||||
organization = module.params['organization']
|
||||
api_key = module.params['api_key']
|
||||
action = module.params['action']
|
||||
id = module.params['id']
|
||||
user_id = module.params['user_id']
|
||||
name = module.params['name']
|
||||
description = module.params['description']
|
||||
limit = module.params['limit']
|
||||
|
||||
base_url = "https://%s-admin.okta.com/api/v1/groups" % (organization)
|
||||
|
||||
if action == "create":
|
||||
status, message, content, url = create(module,base_url,api_key,name,description)
|
||||
elif action == "update":
|
||||
status, message, content, url = update(module,base_url,api_key,id,name,description)
|
||||
elif action == "delete":
|
||||
status, message, content, url = delete(module,base_url,api_key,id)
|
||||
elif action == "list":
|
||||
status, message, content, url = list(module,base_url,api_key,limit)
|
||||
elif action == "add_user":
|
||||
status, message, content, url = add_user(module,base_url,api_key,id,user_id)
|
||||
elif action == "remove_user":
|
||||
status, message, content, url = remove_user(module,base_url,api_key,id,user_id)
|
||||
|
||||
uresp = {}
|
||||
content = to_text(content, encoding='UTF-8')
|
||||
|
||||
try:
|
||||
js = json.loads(content)
|
||||
except ValueError, e:
|
||||
js = ""
|
||||
|
||||
uresp['json'] = js
|
||||
uresp['status'] = status
|
||||
uresp['msg'] = message
|
||||
uresp['url'] = url
|
||||
|
||||
module.exit_json(**uresp)
|
||||
|
||||
# import module snippets
|
||||
import json
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
455
modules/okta_users.py
Normal file
455
modules/okta_users.py
Normal file
@ -0,0 +1,455 @@
|
||||
#!/usr/bin/python
|
||||
# (c) 2019, Whitney Champion <whitney.ellis.champion@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.0',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: okta_users
|
||||
short_description: Communicate with the Okta API to manage users
|
||||
description:
|
||||
- The Okta user module manages Okta users
|
||||
version_added: "1.0"
|
||||
author: "Whitney Champion (@shortstack)"
|
||||
options:
|
||||
organization:
|
||||
description:
|
||||
- Okta subdomain for your organization. (i.e.
|
||||
mycompany.okta.com).
|
||||
required: false
|
||||
default: None
|
||||
api_key:
|
||||
description:
|
||||
- Okta API key.
|
||||
required: false
|
||||
default: None
|
||||
action:
|
||||
description:
|
||||
- Action to take against user API.
|
||||
required: false
|
||||
default: list
|
||||
choices: [ create, update, delete, list, activate, deactivate ]
|
||||
id:
|
||||
description:
|
||||
- ID of the user.
|
||||
required: false
|
||||
default: None
|
||||
login:
|
||||
description:
|
||||
- Username.
|
||||
required: false
|
||||
default: None
|
||||
activate:
|
||||
description:
|
||||
- Whether or not the new user is activate.
|
||||
required: false
|
||||
default: yes
|
||||
password:
|
||||
description:
|
||||
- Password.
|
||||
required: false
|
||||
default: None
|
||||
first_name:
|
||||
description:
|
||||
- First name.
|
||||
required: false
|
||||
default: None
|
||||
last_name:
|
||||
description:
|
||||
- Last name.
|
||||
required: false
|
||||
default: None
|
||||
email:
|
||||
description:
|
||||
- Email.
|
||||
required: false
|
||||
default: None
|
||||
group_ids:
|
||||
description:
|
||||
- List of Group IDs to add the user to.
|
||||
required: false
|
||||
default: None
|
||||
limit:
|
||||
description:
|
||||
- List limit.
|
||||
required: false
|
||||
default: 25
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
# List users
|
||||
- okta_users:
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
limit: 25
|
||||
|
||||
# Create user
|
||||
- okta_users:
|
||||
action: create
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
login: "whitney@unicorns.lol"
|
||||
first_name: "Whitney"
|
||||
last_name: "Champion"
|
||||
email: "whitney@unicorns.lol"
|
||||
password: "cookiesaredelicious"
|
||||
activate: yes
|
||||
|
||||
# Create user in group(s)
|
||||
- okta_users:
|
||||
action: create
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
login: "whitney@unicorns.lol"
|
||||
first_name: "Whitney"
|
||||
last_name: "Champion"
|
||||
email: "whitney@unicorns.lol"
|
||||
password: "cookiesaredelicious"
|
||||
group_ids:
|
||||
- "00f5b3gqiLpE114tV2M7"
|
||||
activate: yes
|
||||
|
||||
# Create multiple users in group
|
||||
- okta_users:
|
||||
action: create
|
||||
organization: "crypto"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
login: "{{ item.login }}"
|
||||
first_name: "{{ item.first_name }}"
|
||||
last_name: "{{ item.last_name }}"
|
||||
email: "{{ item.email }}"
|
||||
password: "{{ item.password }}"
|
||||
group_ids:
|
||||
- "00f5b3gqiLpE324tV2M7"
|
||||
activate: "{{ item.activate }}"
|
||||
with_items:
|
||||
- { login: "alice@aol.com", first_name: "Alice", last_name: "A", email: "alice@aolcom", password: "ilovebob111", activate: yes }
|
||||
- { login: "bob@aol.com", first_name: "Bob", last_name: "B", email: "bob@aolcom", password: "ilovealice111", activate: yes }
|
||||
|
||||
# Update user's email address
|
||||
- okta_users:
|
||||
action: update
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM456"
|
||||
email: "whitney@ihateunicorns.lol"
|
||||
|
||||
# Activate user
|
||||
- okta_users:
|
||||
action: activate
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
# Deactivate user
|
||||
- okta_users:
|
||||
action: deactivate
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
# Delete user
|
||||
- okta_users:
|
||||
action: delete
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
# Get a list of groups a user is in
|
||||
- okta_users:
|
||||
action: usergroups
|
||||
organization: "unicorns"
|
||||
api_key: "TmHvH4LY9HH9MDRDiLChLGwhRjHsarTCBzpwbua3ntnQ"
|
||||
id: "01c5pEucucMPWXjFM456"
|
||||
|
||||
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
json:
|
||||
description: The JSON response from the Okta API
|
||||
returned: always
|
||||
type: complex
|
||||
msg:
|
||||
description: The HTTP message from the request
|
||||
returned: always
|
||||
type: str
|
||||
sample: OK (unknown bytes)
|
||||
status:
|
||||
description: The HTTP status code from the request
|
||||
returned: always
|
||||
type: int
|
||||
sample: 200
|
||||
url:
|
||||
description: The actual URL used for the request
|
||||
returned: always
|
||||
type: str
|
||||
sample: https://www.ansible.com/
|
||||
'''
|
||||
|
||||
def create(module,base_url,api_key,login,password_input,email,first_name,last_name,group_ids,activate):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
payload = {}
|
||||
profile = {}
|
||||
credentials = {}
|
||||
password = {}
|
||||
groupIds= []
|
||||
|
||||
if first_name is not None:
|
||||
profile['firstName'] = first_name
|
||||
if last_name is not None:
|
||||
profile['lastName'] = last_name
|
||||
if email is not None:
|
||||
profile['email'] = email
|
||||
if login is not None:
|
||||
profile['login'] = login
|
||||
if password_input is not None:
|
||||
password['value'] = password_input
|
||||
if group_ids is not None:
|
||||
groupIds = group_ids
|
||||
|
||||
credentials['password'] = password
|
||||
payload['credentials'] = credentials
|
||||
payload['groupIds'] = groupIds
|
||||
payload['profile'] = profile
|
||||
|
||||
url = base_url+"?activate=%s" % (activate)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='POST', data=module.jsonify(payload))
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def update(module,base_url,api_key,id,login,email,first_name,last_name):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s" % (id)
|
||||
|
||||
payload = {}
|
||||
profile = {}
|
||||
|
||||
if first_name is not None:
|
||||
profile['firstName'] = first_name
|
||||
if last_name is not None:
|
||||
profile['lastName'] = last_name
|
||||
if email is not None:
|
||||
profile['email'] = email
|
||||
if login is not None:
|
||||
profile['login'] = login
|
||||
|
||||
payload['profile'] = profile
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='POST', data=module.jsonify(payload))
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def delete(module,base_url,api_key,id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s" % (id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='DELETE')
|
||||
|
||||
if info['status'] != 204:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def activate(module,base_url,api_key,id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s/lifecycle/activate" % (id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='POST')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def deactivate(module,base_url,api_key,id):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/%s/lifecycle/deactivate" % (id)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='POST')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def list(module,base_url,api_key,limit):
|
||||
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
|
||||
url = base_url+"/?limit=%s" % (limit)
|
||||
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='GET')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def findByLogin(module, base_url, api_key, login):
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
#url = base_url+"?q=%s&limit=1" % (login)
|
||||
url = base_url+"?filter=profile.login+eq+\"%s\"" % (login)
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='GET')
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def findByName(module, base_url, api_key, Name):
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
url = base_url+"?q=%s&limit=1" % (Name)
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='GET')
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def getusergroups(module, base_url, api_key, id):
|
||||
headers = '{ "Content-Type": "application/json", "Authorization": "SSWS %s", "Accept": "application/json" }' % (api_key)
|
||||
url = base_url+"/%s/groups" % (id)
|
||||
response, info = fetch_url(module=module, url=url, headers=json.loads(headers), method='GET')
|
||||
|
||||
if info['status'] != 200:
|
||||
module.fail_json(msg="Fail: %s" % ( "Status: "+str(info['msg']) + ", Message: " + str(info['body'])))
|
||||
|
||||
try:
|
||||
content = response.read()
|
||||
except AttributeError:
|
||||
content = info.pop('body', '')
|
||||
|
||||
return info['status'], info['msg'], content, url
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
organization = dict(type='str', default=None),
|
||||
api_key = dict(type='str', no_log=True),
|
||||
action = dict(type='str', default='list', choices=['create', 'update', 'delete', 'list', 'activate', 'deactivate', 'usergroups']),
|
||||
id = dict(type='str', default=None),
|
||||
login = dict(type='str', default=None),
|
||||
password = dict(type='str', default=None, no_log=True),
|
||||
first_name = dict(type='str', default=None),
|
||||
last_name = dict(type='str', default=None),
|
||||
email = dict(type='str', default=None),
|
||||
group_ids = dict(type='list', default=None),
|
||||
limit = dict(type='int', default=25),
|
||||
activate = dict(type='bool', default='yes')
|
||||
)
|
||||
)
|
||||
|
||||
organization = module.params['organization']
|
||||
api_key = module.params['api_key']
|
||||
action = module.params['action']
|
||||
id = module.params['id']
|
||||
login = module.params['login']
|
||||
password = module.params['password']
|
||||
first_name = module.params['first_name']
|
||||
last_name = module.params['last_name']
|
||||
email = module.params['email']
|
||||
group_ids = module.params['group_ids']
|
||||
limit = module.params['limit']
|
||||
activate = module.params['activate']
|
||||
|
||||
base_url = "https://%s-admin.okta.com/api/v1/users" % (organization)
|
||||
|
||||
if action == "create":
|
||||
status, message, content, url = create(module,base_url,api_key,login,password,email,first_name,last_name,group_ids,activate)
|
||||
elif action == "update":
|
||||
status, message, content, url = update(module,base_url,api_key,id,login,email,first_name,last_name)
|
||||
elif action == "delete":
|
||||
status, message, content, url = deactivate(module,base_url,api_key,id)
|
||||
status, message, content, url = delete(module,base_url,api_key,id)
|
||||
elif action == "activate":
|
||||
status, message, content, url = activate(module,base_url,api_key,id)
|
||||
elif action == "deactivate":
|
||||
status, message, content, url = deactivate(module,base_url,api_key,id)
|
||||
elif action == "list":
|
||||
if login is not None:
|
||||
status, message, content, url = findByLogin(module,base_url,api_key,login)
|
||||
elif first_name is not None:
|
||||
status, message, content, url = findByName(module,base_url,api_key, first_name)
|
||||
elif last_name is not None:
|
||||
status, message, content, url = findByName(module,base_url,api_key,last_name)
|
||||
else:
|
||||
status, message, content, url = list(module,base_url,api_key,limit)
|
||||
elif action == "usergroups":
|
||||
status, message, content, url = getusergroups(module, base_url, api_key, id)
|
||||
|
||||
uresp = {}
|
||||
content = to_text(content, encoding='UTF-8')
|
||||
|
||||
try:
|
||||
js = json.loads(content)
|
||||
except ValueError, e:
|
||||
js = ""
|
||||
|
||||
uresp['json'] = js
|
||||
uresp['status'] = status
|
||||
uresp['msg'] = message
|
||||
uresp['url'] = url
|
||||
|
||||
module.exit_json(**uresp)
|
||||
|
||||
# import module snippets
|
||||
import json
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.urls import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
16
playbook/debug/debugDumpAnsibleVars.yml
Normal file
16
playbook/debug/debugDumpAnsibleVars.yml
Normal file
@ -0,0 +1,16 @@
|
||||
|
||||
- name: Export all vars
|
||||
hosts: osx
|
||||
|
||||
tasks:
|
||||
- name: Dump all vars
|
||||
template:
|
||||
src: dumpall.j2
|
||||
dest: /tmp/ansible-vars.all
|
||||
|
||||
- name: Copy new file to host
|
||||
fetch:
|
||||
src: /tmp/ansible-vars.all
|
||||
dest: ansible-vars.all
|
||||
flat: yes
|
||||
|
19
playbook/debug/dumpall.j2
Normal file
19
playbook/debug/dumpall.j2
Normal file
@ -0,0 +1,19 @@
|
||||
Module Variables ("vars"):
|
||||
--------------------------------
|
||||
{{ vars | to_nice_json }}
|
||||
|
||||
Environment Variables ("environment"):
|
||||
--------------------------------
|
||||
{{ environment | to_nice_json }}
|
||||
|
||||
GROUP NAMES Variables ("group_names"):
|
||||
--------------------------------
|
||||
{{ group_names | to_nice_json }}
|
||||
|
||||
GROUPS Variables ("groups"):
|
||||
--------------------------------
|
||||
{{ groups | to_nice_json }}
|
||||
|
||||
HOST Variables ("hostvars"):
|
||||
--------------------------------
|
||||
{{ hostvars | to_nice_json }}
|
39
playbook/debug/roleTest.yml
Normal file
39
playbook/debug/roleTest.yml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
|
||||
- name: testing role
|
||||
hosts: localhost
|
||||
vars:
|
||||
|
||||
|
||||
|
||||
tasks:
|
||||
# - include_role:
|
||||
# name: ansible_discord_webhook
|
||||
# vars:
|
||||
# discord_message: "HI! I am nothing more then a test playbook\nPlease save me."
|
||||
|
||||
- name: Test if the container is alive
|
||||
uri:
|
||||
url: http://192.168.0.242:32401/web/index.html
|
||||
method: GET
|
||||
ignore_errors: true
|
||||
register: PlexStatus
|
||||
|
||||
- debug:
|
||||
msg: "{{ PlexStatus }}"
|
||||
|
||||
- name: Send Service Alive
|
||||
include_role:
|
||||
name: ansible_discord_webhook
|
||||
vars:
|
||||
discord_webhook: "{{ corgi_ansible }}"
|
||||
discord_message: "Plex Status: OK\nDebug: {{ PlexStatus.msg }}"
|
||||
when: PlexStatus.status == 200
|
||||
|
||||
- name: Service Offline
|
||||
include_role:
|
||||
name: ansible_discord_webhook
|
||||
vars:
|
||||
discord_webhook: "{{ corgi_ansible }}"
|
||||
discord_message: "Plex Status: Offline\nDebug: {{ PlexStatus.msg }}"
|
||||
when: PlexStatus.status == -1
|
30
playbook/docker/archive/collins.yml
Normal file
30
playbook/docker/archive/collins.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
|
||||
- name: Maintain Collins
|
||||
hosts: localhost
|
||||
|
||||
tasks:
|
||||
- name: Determine OSX Vars
|
||||
set_fact:
|
||||
path_root: ~/docker
|
||||
when: ansible_distribution == "MacOSX"
|
||||
|
||||
- name: stop collins
|
||||
docker_container:
|
||||
name: collins
|
||||
state: stopped
|
||||
ignore_errors: yes
|
||||
|
||||
- name: update collins
|
||||
docker_image:
|
||||
name: 'tumblr/collins'
|
||||
tag: latest
|
||||
|
||||
- name: Deploy Collins
|
||||
docker_container:
|
||||
name: collins
|
||||
image: 'tumblr/collins'
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- 9001:9000
|
18
playbook/docker/archive/deploy-awx.yml
Normal file
18
playbook/docker/archive/deploy-awx.yml
Normal file
@ -0,0 +1,18 @@
|
||||
---
|
||||
|
||||
- name: install awx
|
||||
hosts: awx
|
||||
become: true
|
||||
vars:
|
||||
#nodejs_version: "6.x"
|
||||
pip_install_packages:
|
||||
- name: docker
|
||||
|
||||
roles:
|
||||
#- geerlingguy.repo-epel
|
||||
- geerlingguy.git
|
||||
- geerlingguy.ansible
|
||||
- geerlingguy.docker
|
||||
- geerlingguy.pip
|
||||
- geerlingguy.nodejs
|
||||
- geerlingguy.awx
|
83
playbook/docker/archive/gitea_stack.yml
Normal file
83
playbook/docker/archive/gitea_stack.yml
Normal file
@ -0,0 +1,83 @@
|
||||
---
|
||||
- name: Deploy Gitea
|
||||
hosts: swarm-host
|
||||
become: true
|
||||
vars:
|
||||
containers:
|
||||
- "gitea_app_1"
|
||||
- "gitea_db_1"
|
||||
images:
|
||||
- "postgres"
|
||||
- "gitea/gitea:latest"
|
||||
vols:
|
||||
- "gitea_data"
|
||||
- "gitea_sql"
|
||||
|
||||
tasks:
|
||||
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
ignore_errors: true
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
source: pull
|
||||
loop: "{{ images }}"
|
||||
|
||||
- name: deploy containers
|
||||
docker_stack:
|
||||
state: present
|
||||
name: gitea
|
||||
compose:
|
||||
#project_name: gitea
|
||||
#definition:
|
||||
- version: "3"
|
||||
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
|
||||
volumes:
|
||||
gitea_data:
|
||||
gitea_sql:
|
||||
|
||||
services:
|
||||
app:
|
||||
image: gitea/gitea:latest
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- DB_TYPE=postgres
|
||||
- DB_HOST=db:5432
|
||||
- DB_NAME=gitea
|
||||
- DB_USER=gitea
|
||||
- DB_PASSWD=gitea
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- gitea_data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "222:22"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD=gitea
|
||||
- POSTGRES_DB=gitea
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- gitea_sql:/var/lib/postgresql/data
|
||||
|
17
playbook/docker/archive/nagios.yml
Normal file
17
playbook/docker/archive/nagios.yml
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
|
||||
- name: deploy nagios core
|
||||
hosts: awx
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_role:
|
||||
name: nagioscore
|
||||
vars:
|
||||
nagios_action: install
|
||||
pathEtc: '/docker/nagios/etc'
|
||||
pathVar: '/docker/nagios/var'
|
||||
pathPlugins: '/docker/nagios/plugins'
|
||||
pathNagiosGraphVar: '/docker/nagios/graph/var'
|
||||
pathNagiosGraphEtc: '/docker/nagios/graph/etc'
|
||||
port: 8080
|
48
playbook/docker/archive/portainer.yml
Normal file
48
playbook/docker/archive/portainer.yml
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
|
||||
# This maintains the portainer host
|
||||
|
||||
- name: maintain portainer host
|
||||
hosts: portainer
|
||||
become: true
|
||||
vars:
|
||||
d_name: portainer
|
||||
d_image: "{{ d_name }}/{{ d_name }}"
|
||||
d_data: "/docker/{{ d_name }}"
|
||||
|
||||
tasks:
|
||||
#- include_role:
|
||||
# name: common
|
||||
# vars:
|
||||
# docker: true
|
||||
|
||||
- name: Confirm portainer folder
|
||||
file:
|
||||
path: "{{ d_data }}"
|
||||
state: directory
|
||||
|
||||
- name: Check is portainer exists
|
||||
docker_container:
|
||||
name: "{{ d_name }}"
|
||||
image: "{{ d_image }}"
|
||||
state: stopped
|
||||
register: cfg_portainer
|
||||
ignore_errors: true
|
||||
|
||||
- name: Update portainer image if we can
|
||||
docker_image:
|
||||
name: "{{ d_image }}"
|
||||
tag: latest
|
||||
|
||||
- name: deploy portainer container
|
||||
docker_container:
|
||||
name: "{{ d_name }}"
|
||||
image: "{{ d_image }}"
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- 8000:8000
|
||||
- 9000:9000
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- "{{ d_data }}/data:/data"
|
||||
|
0
playbook/docker/archive/portainer_agents.yml
Normal file
0
playbook/docker/archive/portainer_agents.yml
Normal file
42
playbook/docker/archive/rancher.yml
Normal file
42
playbook/docker/archive/rancher.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
|
||||
- name: DSC Rancher
|
||||
hosts: rancher
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_role:
|
||||
name: common
|
||||
vars:
|
||||
linux: true
|
||||
docker: true
|
||||
|
||||
- name: Stop rancher if found
|
||||
docker_container:
|
||||
name: rancher
|
||||
state: stopped
|
||||
ignore_errors: yes
|
||||
|
||||
- name: download/update rancher
|
||||
docker_image:
|
||||
name: rancher/rancher
|
||||
|
||||
- name: Start Rancher
|
||||
docker_container:
|
||||
name: rancher
|
||||
image: 'rancher/rancher'
|
||||
state: started
|
||||
network_mode: host
|
||||
#env:
|
||||
|
||||
#ports:
|
||||
# - 80:80
|
||||
# - 443:443
|
||||
#volumes:
|
||||
# - "{{ hydra_config }}:/config"
|
||||
# - "{{ pathConfig }}/hydra:/config"
|
||||
# - "{{ pathDownloads }}:/downloads"
|
||||
#networks:
|
||||
# - name: "{{ dockerNetwork }}"
|
||||
|
||||
|
57
playbook/docker/archive/rancher_workers.yml
Normal file
57
playbook/docker/archive/rancher_workers.yml
Normal file
@ -0,0 +1,57 @@
|
||||
---
|
||||
|
||||
- name: Manage Rancher Workers
|
||||
hosts: rancher-worker
|
||||
become: true
|
||||
vars:
|
||||
token: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
30613833333861303239396233323731343562623565303962393536393462306336643534383235
|
||||
6637613737633931653532613463353838366261303765320a616464653364613737396265313739
|
||||
62363131353535386434616431343432393439636662363130616363616334656534326134623932
|
||||
6466613036363633360a343033373765646334643639383530343834656661643265363463303434
|
||||
37653032383161396265633433356433623463386165386538626366366665333361363939613364
|
||||
33343964623037356162643661666165666562366535656638663537653034626161636239306332
|
||||
316239663536613064353830333936326465
|
||||
ca: !vault |
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
66303462636433643737393864633234346333386139653762383330333661373337626462393063
|
||||
6433333266303337343937346231303661323039373135620a316263303734393537393232623932
|
||||
66396534613032666430613139636533616130353131653263646532326537343066383662366261
|
||||
3262306262393932390a646132323834363033363934376639396466396661346530323539326236
|
||||
61313263626134653963653433653234353061626135373738366361343134323331323737623632
|
||||
63386463306437306661363734666561366166326330646434626338323065373731616137616564
|
||||
62613563306666376664333564316435313431643336386466303164663363383032343431356263
|
||||
31623761653032636235
|
||||
|
||||
tasks:
|
||||
- include_role:
|
||||
name: common
|
||||
vars:
|
||||
#linux: true
|
||||
#docker: true
|
||||
|
||||
- name: stop agent if found
|
||||
docker_container:
|
||||
name: rancherworker
|
||||
state: stopped
|
||||
ignore_errors: true
|
||||
|
||||
- name: start agent
|
||||
docker_container:
|
||||
name: rancherworker
|
||||
image: rancher/rancher-agent:v2.3.2
|
||||
state: started
|
||||
network_mode: host
|
||||
privileged: true
|
||||
restart_policy: unless-stopped
|
||||
command: --worker --etcd --controlplane
|
||||
env:
|
||||
server=https://192.168.0.241
|
||||
#token=krgdcfchvhprzstmwgbsmzz2qj8kmcrgc8q26wpdklr9kfpdqgg5sg
|
||||
"token={{ token }}"
|
||||
#ca-checksum=a7077c8e0381f72a7091eda6e617a16b2259227113f66d042a453767174b2dbb
|
||||
volumes:
|
||||
- "/etc/kubernetes:/etc/kubernetes"
|
||||
- "/var/run:/var/run"
|
||||
# --worker
|
52
playbook/docker/common.yml
Normal file
52
playbook/docker/common.yml
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
|
||||
- name: Configure defaults for docker servers
|
||||
hosts: docker
|
||||
become: true
|
||||
vars:
|
||||
install_podmon: false
|
||||
|
||||
tasks:
|
||||
- name: install pip packages
|
||||
pip:
|
||||
name: "{{ pipPacks }}"
|
||||
state: present
|
||||
vars:
|
||||
pipPacks:
|
||||
- docker
|
||||
- docker-compose
|
||||
- jsondiff
|
||||
|
||||
# Does not work yet
|
||||
- name: Install Podmon
|
||||
block:
|
||||
- name: Add Repo
|
||||
apt_repository:
|
||||
repo: deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/
|
||||
state: absent
|
||||
filename: devel:kubic:libcontainers:stable.list
|
||||
|
||||
- name: update apt
|
||||
apt:
|
||||
update_cache: true
|
||||
name: podmon
|
||||
when: install_podmon == true
|
||||
|
||||
|
||||
- name: install docker
|
||||
include_role:
|
||||
name: geerlingguy.docker
|
||||
|
||||
- name: make /docker folder
|
||||
file:
|
||||
path: "/docker"
|
||||
state: directory
|
||||
|
||||
- name: make 'docker' group
|
||||
shell: groupadd docker
|
||||
ignore_errors: true
|
||||
|
||||
- name: add users to 'docker' group
|
||||
shell: gpasswd -a miharu docker
|
||||
ignore_errors: true
|
||||
|
38
playbook/docker/duckdns.yml
Normal file
38
playbook/docker/duckdns.yml
Normal file
@ -0,0 +1,38 @@
|
||||
---
|
||||
- name: Deploy DuckDNS
|
||||
hosts: duckdns
|
||||
become: true
|
||||
tasks:
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: duckdns_app_1
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: linuxserver/duckdns:latest
|
||||
source: pull
|
||||
|
||||
- name: deploy containers
|
||||
docker_compose:
|
||||
project_name: duckdns
|
||||
definition:
|
||||
version: "2"
|
||||
|
||||
networks:
|
||||
duckdns:
|
||||
external: false
|
||||
|
||||
services:
|
||||
app:
|
||||
image: linuxserver/duckdns:latest
|
||||
environment:
|
||||
SUBDOMAINS: luther38
|
||||
TOKEN: "{{ duckdns_token }}"
|
||||
restart: always
|
||||
networks:
|
||||
- duckdns
|
||||
|
||||
|
||||
|
26
playbook/docker/foldingathome.yml
Normal file
26
playbook/docker/foldingathome.yml
Normal file
@ -0,0 +1,26 @@
|
||||
|
||||
- name: Deploy Folding@home
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: deploy containers
|
||||
docker_compose:
|
||||
project_name: gitea
|
||||
definition:
|
||||
version: "2"
|
||||
services:
|
||||
app:
|
||||
image: johnktims/folding-at-home:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- /docker/cfg/gitea/data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "222:22"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
|
72
playbook/docker/gitea.yml
Normal file
72
playbook/docker/gitea.yml
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
- name: Deploy Gitea
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
containers:
|
||||
- "gitea_app_1"
|
||||
- "gitea_db_1"
|
||||
images:
|
||||
- "postgres"
|
||||
- "gitea/gitea:latest"
|
||||
|
||||
tasks:
|
||||
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
ignore_errors: true
|
||||
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
source: pull
|
||||
loop: "{{ images }}"
|
||||
|
||||
- name: deploy containers
|
||||
docker_compose:
|
||||
project_name: gitea
|
||||
definition:
|
||||
version: "2"
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
services:
|
||||
app:
|
||||
image: gitea/gitea:latest
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
- DB_TYPE=postgres
|
||||
- DB_HOST=db:5432
|
||||
- DB_NAME=gitea
|
||||
- DB_USER=gitea
|
||||
- DB_PASSWD=gitea
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /docker/cfg/gitea/data:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "222:22"
|
||||
depends_on:
|
||||
- db
|
||||
|
||||
db:
|
||||
image: postgres
|
||||
restart: always
|
||||
environment:
|
||||
- POSTGRES_USER=gitea
|
||||
- POSTGRES_PASSWORD=gitea
|
||||
- POSTGRES_DB=gitea
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /docker/cfg/gitea/sql:/var/lib/postgresql/data
|
||||
|
168
playbook/docker/mediaserver/back/backup.yml
Normal file
168
playbook/docker/mediaserver/back/backup.yml
Normal file
@ -0,0 +1,168 @@
|
||||
---
|
||||
- name: testing backup plan
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
backup: false
|
||||
tasks:
|
||||
- name: Ensure backup location is present
|
||||
file:
|
||||
path: /tmp/docker/backup
|
||||
state: directory
|
||||
|
||||
- name: Backup Search
|
||||
block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/hydra
|
||||
container: mediaback_search_1
|
||||
|
||||
- name: ensure backup dir is present
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: Check on old backups
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: Remove old backups
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: stop search
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: generate archive
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/backup.tgz"
|
||||
|
||||
- name: start start
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
#when: backup == true
|
||||
|
||||
- name: Backup son
|
||||
block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/sonarr
|
||||
container: mediaback_son_1
|
||||
|
||||
- name: ensure backup dir is present
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: Check on old backups
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: Remove old backups
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: stop son
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: generate archive
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/backup.tgz"
|
||||
|
||||
- name: start son
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
#when: backup == true
|
||||
|
||||
- name: Backup rad
|
||||
block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/radarr
|
||||
container: mediaback_rad_1
|
||||
|
||||
- name: ensure backup dir is present
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: Check on old backups
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: Remove old backups
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: stop rad
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: generate archive
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/backup.tgz"
|
||||
|
||||
- name: start rad
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
|
||||
#when: backup == true
|
||||
|
||||
- name: Backup get
|
||||
block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/nzbget
|
||||
container: mediaback_get_1
|
||||
|
||||
- name: ensure backup dir is present
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: Check on old backups
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: Remove old backups
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: stop get
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: generate archive
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/backup.tgz"
|
||||
|
||||
- name: start get
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
|
113
playbook/docker/mediaserver/back/deploy.yml
Normal file
113
playbook/docker/mediaserver/back/deploy.yml
Normal file
@ -0,0 +1,113 @@
|
||||
|
||||
- name: Configure Media Server
|
||||
hosts: mediaserver-back
|
||||
#hosts: swarm-host
|
||||
become: yes
|
||||
vars:
|
||||
update: false
|
||||
containers:
|
||||
- mediaback_search_1
|
||||
- mediaback_son_1
|
||||
- mediaback_get_1
|
||||
- mediaback_rad_1
|
||||
images:
|
||||
- 'linuxserver/nzbhydra2:version-v3.9.0'
|
||||
- 'linuxserver/sonarr:version-2.0.0.5344'
|
||||
- 'linuxserver/nzbget:version-v21.0'
|
||||
- 'linuxserver/radarr:version-3.0.1.4259'
|
||||
|
||||
tasks:
|
||||
- name: stop and remove containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
source: pull
|
||||
loop: "{{ images }}"
|
||||
|
||||
- name: deploy docker-compose
|
||||
docker_compose:
|
||||
project_name: mediaback
|
||||
definition:
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
mediaback:
|
||||
ipam:
|
||||
driver: default
|
||||
config:
|
||||
- subnet: 172.16.10.0/16
|
||||
|
||||
services:
|
||||
search:
|
||||
image: "{{ images[0] }}"
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/London
|
||||
ports:
|
||||
- 5076:5076
|
||||
volumes:
|
||||
- "{{ pathConfig }}/hydra:/config"
|
||||
- "{{ pathNfsTmp }}:/downloads"
|
||||
networks:
|
||||
mediaback:
|
||||
ipv4_address: 172.16.10.10
|
||||
|
||||
get:
|
||||
image: "{{ images[2] }}"
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/London
|
||||
ports:
|
||||
- 6789:6789
|
||||
volumes:
|
||||
- "{{ pathConfig }}/nzbget:/config"
|
||||
- "{{ pathNfsTmp }}:/downloads"
|
||||
networks:
|
||||
mediaback:
|
||||
ipv4_address: 172.16.10.11
|
||||
son:
|
||||
image: "{{ images[1] }}"
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/London
|
||||
- UMASK_SET=022 #optional
|
||||
ports:
|
||||
- 8989:8989 #http
|
||||
#- 9898:9898 #https
|
||||
volumes:
|
||||
- "{{ pathConfig }}/sonarr:/config"
|
||||
- "{{ pathNfsMedia }}:/tv"
|
||||
- "{{ pathNfsTmp}}:/downloads"
|
||||
networks:
|
||||
mediaback:
|
||||
ipv4_address: 172.16.10.12
|
||||
|
||||
rad:
|
||||
image: "{{ images[3] }}"
|
||||
restart: always
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/London
|
||||
- UMASK_SET=022 #optional
|
||||
ports:
|
||||
- 7878:7878
|
||||
volumes:
|
||||
- "{{ pathConfig }}/radarr:/config"
|
||||
- "{{ pathNfsMedia }}:/movies"
|
||||
- "{{ pathNfsTmp }}:/downloads"
|
||||
networks:
|
||||
mediaback:
|
||||
ipv4_address: 172.16.10.13
|
19
playbook/docker/mediaserver/back/organizr.yml
Normal file
19
playbook/docker/mediaserver/back/organizr.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
|
||||
- name: deploy Organizr
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Deploy Organizr
|
||||
docker_container:
|
||||
name: mediaback_organizr_1
|
||||
state: started
|
||||
image: organizrtools/organizr-v2
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- 8080:80
|
||||
volumes:
|
||||
- "{{ pathConfig }}/organizr:/config"
|
||||
networks:
|
||||
- name: "{{ dockerNetwork }}"
|
55
playbook/docker/mediaserver/back/restarts.yml
Normal file
55
playbook/docker/mediaserver/back/restarts.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
|
||||
- name: restart all containers
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
host_ip: '192.168.0.76'
|
||||
containers:
|
||||
- hydra:
|
||||
service_port: 5076
|
||||
- nzbget:
|
||||
- sonarr:
|
||||
- radarr:
|
||||
|
||||
tasks:
|
||||
# - name: stop containers
|
||||
# docker_container:
|
||||
# name: "{{ item }}"
|
||||
# state: stopped
|
||||
# loop: "{{ containers }}"
|
||||
|
||||
# - name: start containers
|
||||
# docker_container:
|
||||
# name: "{{ item }}"
|
||||
# state: started
|
||||
# loop: "{{ containers }}"
|
||||
|
||||
# - name: Wait 3 Minutes before checking services
|
||||
# wait_for:
|
||||
# timeout: 180
|
||||
|
||||
- name: Test Hydra
|
||||
uri:
|
||||
url: "http://{{ host_ip }}:{{ containers[0].service_port }}"
|
||||
method: GET
|
||||
ignore_errors: true
|
||||
register: HydraStatus
|
||||
|
||||
- debug:
|
||||
msg: "{{ HydraStatus }}"
|
||||
|
||||
# - include_role:
|
||||
# name: luther38.discord_webhook
|
||||
# vars:
|
||||
# discord_webhook: "{{ discord_test_hook }}"
|
||||
# discord_message: "Hydra Status: {{ HydraStatus.status }}\nDebug: {{ HydraStatus.msg }}"
|
||||
# when: HydraStatus.status == 200
|
||||
|
||||
# - include_role:
|
||||
# name: luther38.discord_webhook
|
||||
# vars:
|
||||
# discord_webhook: "{{ discord_test_hook }}"
|
||||
# discord_message: "Hydra Status: Offline\nDebug: {{ HydraStatus.msg }}"
|
||||
# when: HydraStatus.status == -1
|
||||
|
25
playbook/docker/mediaserver/back/restore.yml
Normal file
25
playbook/docker/mediaserver/back/restore.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: restore container data
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
container: "nzbget"
|
||||
mount: 'config'
|
||||
|
||||
tasks:
|
||||
- name: stop container
|
||||
docker_container:
|
||||
name: hydra
|
||||
state: stopped
|
||||
|
||||
- name: ensure restore point is present
|
||||
file:
|
||||
path: "/docker/cfg/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: unarchive old backup
|
||||
unarchive:
|
||||
remote_src: true
|
||||
src: "/docker/nfs/backup/{{ container }}/{{ mount }}.gz"
|
||||
dest: "/docker/cfg/"
|
30
playbook/docker/mediaserver/back/status.yml
Normal file
30
playbook/docker/mediaserver/back/status.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
|
||||
- name: Check on services
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- include_tasks: task-status-checkup.yml
|
||||
vars:
|
||||
container_url: 'http://192.168.0.76:5076'
|
||||
container_name: Hydra
|
||||
http_code: 200
|
||||
|
||||
- include_tasks: task-status-checkup.yml
|
||||
vars:
|
||||
container_url: 'http://192.168.0.76:6789'
|
||||
container_name: Nzbget
|
||||
http_code: 401
|
||||
|
||||
- include_tasks: task-status-checkup.yml
|
||||
vars:
|
||||
container_url: 'http://192.168.0.76:8989'
|
||||
container_name: Sonarr
|
||||
http_code: 200
|
||||
|
||||
- include_tasks: task-status-checkup.yml
|
||||
vars:
|
||||
container_url: 'http://192.168.0.76:7878'
|
||||
container_name: Radarr
|
||||
http_code: 200
|
20
playbook/docker/mediaserver/back/task-status-checkup.yml
Normal file
20
playbook/docker/mediaserver/back/task-status-checkup.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
|
||||
- name: Test if the container is alive
|
||||
uri:
|
||||
url: "{{ container_url }}"
|
||||
method: GET
|
||||
status_code: "{{ http_code }}"
|
||||
ignore_errors: true
|
||||
register: status
|
||||
|
||||
#- debug:
|
||||
# msg: "{{ status }}"
|
||||
|
||||
- include_role:
|
||||
name: luther38.discord_webhook
|
||||
vars:
|
||||
discord_webhook: "{{ discord_test_hook }}"
|
||||
discord_message: "{{ container_name }}\n - Status: {{ status.status }}\n - Message: {{ status.msg }}"
|
||||
#when: HydraStatus.status == -1
|
||||
|
68
playbook/docker/mediaserver/common.yml
Normal file
68
playbook/docker/mediaserver/common.yml
Normal file
@ -0,0 +1,68 @@
|
||||
---
|
||||
|
||||
- name: Ensure Mediaserver defaults
|
||||
hosts: mediaserver
|
||||
become: true
|
||||
# vars are stored in inventory
|
||||
|
||||
tasks:
|
||||
- name: "Ensure {{ pathNfsSync }} exists"
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
vars:
|
||||
folders:
|
||||
- "{{ pathDockerRoot }}"
|
||||
- "{{ pathConfig }}"
|
||||
- "{{ pathNfs }}"
|
||||
- "{{ pathNfsBackup }}"
|
||||
- "{{ pathNfsMedia }}"
|
||||
- "{{ pathNfsSync }}"
|
||||
loop: "{{ folders }}"
|
||||
|
||||
- name: Ensure {{ pathNfsBackup }} is mounted
|
||||
mount:
|
||||
src: "{{ nfsAddress }}:/{{ nfsDockerConfig }}"
|
||||
path: "{{ pathNfsBackup }}"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
|
||||
- name: Ensure {{ pathNfsMedia }} is mounted
|
||||
mount:
|
||||
src: "{{ nfsAddress }}:/{{ nfsMedia }}"
|
||||
path: "{{ pathNfsMedia }}"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
|
||||
- name: "Ensure {{ pathNfsSync }} is mounted"
|
||||
mount:
|
||||
src: "{{ nfsAddress }}:/sync"
|
||||
path: "{{ pathNfsSync }}"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
# Going to ignore errors because some nodes are unable to touch this
|
||||
ignore_errors: true
|
||||
|
||||
- name: "Ensure {{ pathNfsTmp }} is mounted"
|
||||
mount:
|
||||
src: "{{ nfsAddress }}:/tmp"
|
||||
path: "{{ pathNfsTmp }}"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
# Going to ignore errors because some nodes are unable to touch this
|
||||
ignore_errors: true
|
||||
|
||||
|
||||
- name: install docker pip package
|
||||
pip:
|
||||
name: docker
|
||||
state: present
|
||||
executable: pip3
|
||||
|
||||
- name: make docker network
|
||||
docker_network:
|
||||
name: "{{ dockerNetwork }}"
|
55
playbook/docker/mediaserver/front/backup.yml
Normal file
55
playbook/docker/mediaserver/front/backup.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: backup frontend services
|
||||
hosts: mediaserver-front
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- block:
|
||||
- name: stop container
|
||||
docker_container:
|
||||
name: plex
|
||||
state: stopped
|
||||
|
||||
- name: Copy db files
|
||||
copy:
|
||||
remote_src: true
|
||||
src: '/docker/cfg/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/'
|
||||
dest: '/tmp/docker/backup/plex/'
|
||||
|
||||
- name: Copy logs
|
||||
copy:
|
||||
remote_src: true
|
||||
src: '/docker/cfg/plex/Library/Application Support/Plex Media Server/Plug-in Support/Logs/'
|
||||
|
||||
- name: start container
|
||||
docker_container:
|
||||
name: plex
|
||||
state: started
|
||||
|
||||
- name: Archive db backups
|
||||
archive:
|
||||
path: '/tmp/docker/backup/plex/'
|
||||
dest: '/tmp/docker/backup/plex/databases.gz'
|
||||
|
||||
- name: Ensure nfs has a backup location
|
||||
file:
|
||||
path: '/docker/nfs/backup/plex'
|
||||
state: directory
|
||||
|
||||
- name: Copy archive to a safe place
|
||||
copy:
|
||||
src: '/tmp/docker/backup/plex/databases.gz'
|
||||
dest: '/docker/nfs/backup/plex/databases.gz'
|
||||
remote_src: true
|
||||
backup: true
|
||||
|
||||
- name: remove temp files
|
||||
file:
|
||||
path: '/tmp/docker/backup/plex'
|
||||
state: absent
|
||||
|
||||
- include_role:
|
||||
name: ansible_discord_webhook
|
||||
vars:
|
||||
discord_message: "Backup Job:\nJob has finished and services should be back online."
|
||||
|
77
playbook/docker/mediaserver/front/deploy.yml
Normal file
77
playbook/docker/mediaserver/front/deploy.yml
Normal file
@ -0,0 +1,77 @@
|
||||
---
|
||||
- name: deploy plex
|
||||
hosts: mediaserver-front
|
||||
become: true
|
||||
vars:
|
||||
pathDockerRoot: "/docker"
|
||||
pathConfig: "{{ pathDockerRoot }}/cfg"
|
||||
pathMedia: "/docker/nfs/media"
|
||||
update: false
|
||||
containers:
|
||||
- plex
|
||||
- plex_app_1
|
||||
- plex_logs_1
|
||||
images:
|
||||
- linuxserver/plex:latest
|
||||
- tautulli/tautulli
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Stop and remove Containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
ignore_errors: yes
|
||||
|
||||
#- name: Start Plex
|
||||
# docker_container:
|
||||
# name: plex
|
||||
# image: linuxserver/plex:1.18.2.2058-e67a4e892-ls70
|
||||
# state: started
|
||||
# network_mode: host
|
||||
# restart_policy: unless-stopped
|
||||
# env:
|
||||
# PUID=0
|
||||
# PGID=0
|
||||
# TZ="{{ ntp_timezone }}"
|
||||
# UMASK_SET=022
|
||||
# volumes:
|
||||
# - "{{ pathConfig }}/plex:/config"
|
||||
# - "{{ pathMedia }}:/tv"
|
||||
|
||||
- name: Ensure containers are running
|
||||
docker_compose:
|
||||
project_name: plex
|
||||
definition:
|
||||
version: "3.4"
|
||||
services:
|
||||
app:
|
||||
image: linuxserver/plex:version-1.20.3.3483-211702a9f
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ="{{ ntp_timezone }}"
|
||||
- UMASK_SET=022
|
||||
restart: always
|
||||
network_mode: host
|
||||
volumes:
|
||||
- "{{ pathConfig }}/plex:/config"
|
||||
- "{{ pathMedia }}:/tv"
|
||||
|
||||
logs:
|
||||
image: tautulli/tautulli:v2.6.0
|
||||
restart: always
|
||||
volumes:
|
||||
- "{{ pathConfig }}/tatulli:/config"
|
||||
- "{{ pathConfig }}/plex/Library/Application Support/Plex Media Server/Logs:/plex_logs:ro"
|
||||
environment:
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ="{{ ntp_timezone }}"
|
||||
- ADVANCED_GIT_BRANCH=master
|
||||
ports:
|
||||
- "8181:8181"
|
||||
|
||||
|
||||
|
22
playbook/docker/mediaserver/front/restarts.yml
Normal file
22
playbook/docker/mediaserver/front/restarts.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
|
||||
- name: restart frontend containers
|
||||
hosts: mediaserver-front
|
||||
become: true
|
||||
vars:
|
||||
containers:
|
||||
- plex
|
||||
|
||||
tasks:
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: stopped
|
||||
loop: "{{ containers }}"
|
||||
|
||||
- name: start containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: started
|
||||
loop: "{{ containers }}"
|
||||
|
25
playbook/docker/mediaserver/front/restore.yml
Normal file
25
playbook/docker/mediaserver/front/restore.yml
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
|
||||
- name: restore frontend
|
||||
hosts: mediaserver-front
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: stop container if active
|
||||
docker_container:
|
||||
name: plex
|
||||
state: stopped
|
||||
ignore_errors: true
|
||||
|
||||
- name: ensure restore point is ready
|
||||
file:
|
||||
path: '/docker/cfg/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/'
|
||||
state: directory
|
||||
|
||||
- name: restore from backup
|
||||
unarchive:
|
||||
remote_src: true
|
||||
src: '/docker/nfs/backup/plex/databases.gz'
|
||||
dest: '/docker/cfg/plex/Library/Application Support/Plex Media Server/Plug-in Support/Databases/'
|
||||
|
||||
|
42
playbook/docker/mediaserver/update.yml
Normal file
42
playbook/docker/mediaserver/update.yml
Normal file
@ -0,0 +1,42 @@
|
||||
---
|
||||
|
||||
# This is a job that will work on weekly rotation to update images.
|
||||
|
||||
- name: update containers
|
||||
hosts: mediaserver
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: download latest sonarr
|
||||
docker_image:
|
||||
name: linuxserver/sonarr
|
||||
tag: latest
|
||||
state: present
|
||||
source: pull
|
||||
|
||||
- name: download latest hydra
|
||||
docker_image:
|
||||
name: linuxserver/hydra2
|
||||
tag: latest
|
||||
state: present
|
||||
source: pull
|
||||
|
||||
- name: download latest radarr
|
||||
docker_image:
|
||||
name: linuxserver/radarr
|
||||
tag: latest
|
||||
state: present
|
||||
source: pull
|
||||
|
||||
- name: download latest nzbget
|
||||
docker_image:
|
||||
name: linuxserver/nzbget
|
||||
tag: latest
|
||||
state: present
|
||||
source: pull
|
||||
|
||||
#- name: remove old images
|
||||
# docker_prune:
|
||||
# images: true
|
||||
# images_filters:
|
||||
# dangling: true
|
55
playbook/docker/minecraft/deploy.yml
Normal file
55
playbook/docker/minecraft/deploy.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
# https://hub.docker.com/r/itzg/minecraft-server
|
||||
# https://www.curseforge.com/minecraft/modpacks/ftb-ultimate-reloaded/files
|
||||
|
||||
- name: Deploy minecraft
|
||||
hosts: mediaserver-front
|
||||
become: true
|
||||
vars:
|
||||
volData: "~/docker/minecraft/data"
|
||||
|
||||
#zip: FTBUltimateReloadedServer_1.9.0.zip
|
||||
zip: "SkyFactory-4_Server_4.2.2.zip"
|
||||
|
||||
tasks:
|
||||
- name: stop container
|
||||
docker_container:
|
||||
name: minecraft
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Ensure tmp is present
|
||||
file:
|
||||
path: /docker/mc/
|
||||
state: directory
|
||||
|
||||
- name: Copy zip to the host
|
||||
copy:
|
||||
src: "~/Downloads/{{ zip }}"
|
||||
dest: "/docker/mc/{{ zip }}"
|
||||
|
||||
# Curse is trash and curl and wget cant get the direct path
|
||||
#- name: Download ModPack
|
||||
# get_url:
|
||||
# url: https://www.curseforge.com/minecraft/modpacks/ftb-ultimate-reloaded/download/2778970/file?client=n
|
||||
# dest: "{{ zip }}"
|
||||
|
||||
- name: compose deploy
|
||||
docker_compose:
|
||||
project_name: minecraft
|
||||
definition:
|
||||
version: '2'
|
||||
|
||||
services:
|
||||
server:
|
||||
image: itzg/minecraft-server
|
||||
environment:
|
||||
- EULA=TRUE
|
||||
- TYPE=CURSEFORGE
|
||||
- CF_SERVER_MOD={{ zip }}
|
||||
restart: always
|
||||
volumes:
|
||||
- /docker/mc/:/data
|
||||
ports:
|
||||
- 25565:25565
|
||||
|
0
playbook/docker/monitoring/files/influxdb.conf
Normal file
0
playbook/docker/monitoring/files/influxdb.conf
Normal file
0
playbook/docker/monitoring/files/telegraf.h1.conf
Normal file
0
playbook/docker/monitoring/files/telegraf.h1.conf
Normal file
0
playbook/docker/monitoring/files/telegraf.h2.conf
Normal file
0
playbook/docker/monitoring/files/telegraf.h2.conf
Normal file
30
playbook/docker/monitoring/grafana.yml
Normal file
30
playbook/docker/monitoring/grafana.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
|
||||
|
||||
- name: Deploy Grafana in a container
|
||||
hosts: localhost
|
||||
|
||||
tasks:
|
||||
- name: Stop container
|
||||
docker_container:
|
||||
name: grafana
|
||||
state: stopped
|
||||
|
||||
- name: destroy container
|
||||
docker_container:
|
||||
name: grafana
|
||||
state: absent
|
||||
|
||||
- name: Deploy container
|
||||
docker_container:
|
||||
name: grafana
|
||||
image: grafana/grafana
|
||||
state: started
|
||||
env:
|
||||
GF_INSTALL_PLUGINS=andig-darksky-datasource
|
||||
#GF_SECURITY_ADMIN_PASSWORD=secret
|
||||
ports:
|
||||
- 3000:3000
|
||||
volumes:
|
||||
- "~/docker/grafana/data:/var/lib/grafana"
|
||||
#- "~/docker/grafana/config:/etc/grafana/"
|
96
playbook/docker/monitoring/influxdb.yml
Normal file
96
playbook/docker/monitoring/influxdb.yml
Normal file
@ -0,0 +1,96 @@
|
||||
---
|
||||
|
||||
- name: Deploy InfluxDB
|
||||
hosts: d1
|
||||
become: true
|
||||
vars:
|
||||
containers:
|
||||
- "monitor_db_1"
|
||||
- "monitor_web_1"
|
||||
- "monitor_alert_1"
|
||||
|
||||
images:
|
||||
- 'influxdb:1.8-alpine'
|
||||
- 'chronograf:1.8-alpine'
|
||||
- 'kapacitor:1.5-alpine'
|
||||
- 'grafana/grafana:6.7.2'
|
||||
|
||||
|
||||
tasks:
|
||||
- name: stop and remove containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
|
||||
- name: pull images
|
||||
docker_image:
|
||||
name: "{{ item }}"
|
||||
source: pull
|
||||
loop: "{{ images }}"
|
||||
|
||||
- name: Generate Telegraf vmware Config
|
||||
include_role:
|
||||
name: telegraf_cfg
|
||||
vars:
|
||||
telegraf_config_dir: /docker/influx/vmware/
|
||||
telegraf_target: vmware
|
||||
telegraf_vmware_hosts: "http://192.168.0.75/sdk, http://192.168.0.230/sdk"
|
||||
telegraf_vmware_username: root
|
||||
telegraf_vmware_password: Lm38iq
|
||||
|
||||
- name: Deploy Influx Stack
|
||||
docker_compose:
|
||||
project_name: monitor
|
||||
definition:
|
||||
version: "3"
|
||||
networks:
|
||||
influx:
|
||||
services:
|
||||
db:
|
||||
image: "{{ images[0] }}"
|
||||
restart: always
|
||||
ports:
|
||||
- 8086:8086
|
||||
volumes:
|
||||
- /docker/influx/db:/var/lib/influxdb
|
||||
networks:
|
||||
influx:
|
||||
|
||||
web:
|
||||
image: "{{ images[1] }}"
|
||||
restart: always
|
||||
ports:
|
||||
- 8888:8888
|
||||
volumes:
|
||||
- /docker/influx/web:/var/lib/chronograf
|
||||
networks:
|
||||
influx:
|
||||
|
||||
alert:
|
||||
image: "{{ images[2] }}"
|
||||
restart: always
|
||||
ports:
|
||||
- 9092:9092
|
||||
volumes:
|
||||
- /docker/influx/alert:/var/lib/kapacitor
|
||||
networks:
|
||||
influx:
|
||||
|
||||
vmware:
|
||||
image
|
||||
|
||||
|
||||
|
||||
#dash:
|
||||
# image: "{{ images[3] }}"
|
||||
# restart: always
|
||||
# ports:
|
||||
# - 3000:3000
|
||||
# volumes:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
107
playbook/docker/newsbot/backup.yml
Normal file
107
playbook/docker/newsbot/backup.yml
Normal file
@ -0,0 +1,107 @@
|
||||
---
|
||||
- name: testing backup plan
|
||||
hosts: newsbot
|
||||
become: true
|
||||
vars:
|
||||
backup: false
|
||||
tasks:
|
||||
- block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/newsbot/database
|
||||
container: newsbot_app_1
|
||||
|
||||
- name: "{{ container }} - Ensure backup dir is present"
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: "{{ container}} - Check on old backups"
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: "{{ container }} - Remove old backups"
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: "{{ container }} - Stop container"
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: "{{ container }} - Generate backup"
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/temp.tgz"
|
||||
|
||||
- name: "{{ container }} - Copy backup"
|
||||
copy:
|
||||
src: "{{ pathNfsBackup}}/{{ container }}/temp.tgz"
|
||||
dest: "{{ pathNfsBackup}}/{{ container }}/backup.tgz"
|
||||
backup: true
|
||||
remote_src: true
|
||||
|
||||
- name: "{{ container }} - Remove temp file"
|
||||
#shell: "rm {{ PathNfsBackup }}/{{ container }}/temp.tgz"
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}/temp.tgz"
|
||||
state: absent
|
||||
|
||||
|
||||
- name: "{{ container }} - Start container"
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
|
||||
- block:
|
||||
- set_fact:
|
||||
pathLocal: /docker/cfg/newsbot_sin/database
|
||||
container: newsbot_sin_1
|
||||
|
||||
- name: "{{ container }} - Ensure backup dir is present"
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
state: directory
|
||||
|
||||
- name: "{{ container}} - Check on old backups"
|
||||
find:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}"
|
||||
age: 4w
|
||||
register: searchRes
|
||||
|
||||
- name: "{{ container }} - Remove old backups"
|
||||
file:
|
||||
path: "{{ item.path }}"
|
||||
state: absent
|
||||
loop: "{{ searchRes.files }}"
|
||||
|
||||
- name: "{{ container }} - Stop container"
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: stopped
|
||||
|
||||
- name: "{{ container }} - Generate backup"
|
||||
community.general.archive:
|
||||
path: "{{ pathLocal }}"
|
||||
dest: "{{ pathNfsBackup }}/{{ container }}/temp.tgz"
|
||||
|
||||
- name: "{{ container }} - Copy backup"
|
||||
copy:
|
||||
src: "{{ pathNfsBackup}}/{{ container }}/temp.tgz"
|
||||
dest: "{{ pathNfsBackup}}/{{ container }}/backup.tgz"
|
||||
backup: true
|
||||
remote_src: true
|
||||
|
||||
- name: "{{ container }} - Remove temp file"
|
||||
file:
|
||||
path: "{{ pathNfsBackup }}/{{ container }}/temp.tgz"
|
||||
state: absent
|
||||
|
||||
- name: "{{ container }} - Start container"
|
||||
docker_container:
|
||||
name: "{{ container }}"
|
||||
state: started
|
||||
|
103
playbook/docker/newsbot/deploy.yml
Normal file
103
playbook/docker/newsbot/deploy.yml
Normal file
@ -0,0 +1,103 @@
|
||||
---
|
||||
|
||||
- name: Deploy Newsbot
|
||||
hosts: newsbot
|
||||
become: true
|
||||
vars:
|
||||
image: jtom38/newsbot:0.6.0
|
||||
|
||||
tasks:
|
||||
- debug:
|
||||
msg: "Deploying image: {{ image }}"
|
||||
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
loop:
|
||||
- "newsbot_app_1"
|
||||
- "newsbot_sin_1"
|
||||
|
||||
- name: Pull Images
|
||||
docker_image:
|
||||
name: "{{ image }}"
|
||||
source: pull
|
||||
state: present
|
||||
force_source: true
|
||||
|
||||
- name: Deploy Newsbot
|
||||
docker_compose:
|
||||
project_name: newsbot
|
||||
definition:
|
||||
version: "2"
|
||||
|
||||
networks:
|
||||
newsbot:
|
||||
external: false
|
||||
|
||||
services:
|
||||
app:
|
||||
image: "{{ image }}"
|
||||
environment:
|
||||
NEWSBOT_POGO_ENABLED: 'true'
|
||||
NEWSBOT_POGO_HOOK: "{{ mm_pogo_hooks }}"
|
||||
|
||||
NEWSBOT_PSO2_ENABLED: 'true'
|
||||
NEWSBOT_PSO2_HOOK: "{{ mm_pso2_hooks }}"
|
||||
|
||||
NEWSBOT_FFXIV_ALL: 'false'
|
||||
NEWSBOT_FFXIV_TOPICS: 'True'
|
||||
NEWSBOT_FFXIV_NOTICES: 'false'
|
||||
NEWSBOT_FFXIV_MAINTENANCE: 'false'
|
||||
NEWSBOT_FFXIV_UPDATES: 'false'
|
||||
NEWSBOT_FFXIV_STATUS: 'false'
|
||||
NEWSBOT_FFXIV_HOOK: "{{ mm_ffxiv_hooks }}"
|
||||
|
||||
NEWSBOT_REDDIT_SUB_0: "ProgrammerHumor"
|
||||
NEWSBOT_REDDIT_HOOK_0: "{{ mm_programmer_humor_hooks }}"
|
||||
|
||||
NEWSBOT_YOUTUBE_URL_0: 'https://www.youtube.com/user/loadingreadyrun/'
|
||||
NEWSBOT_YOUTUBE_NAME_0: "LoadingReadyRun"
|
||||
NEWSBOT_YOUTUBE_HOOK_0: "{{ mm_lrr_hooks }}"
|
||||
|
||||
#NEWSBOT_TWITTER_API_KEY: "{{ twitter_api_key }}"
|
||||
#NEWSBOT_TWITTER_API_KEY_SECRET: "{{ twitter_api_key_secret }}"
|
||||
#NEWSBOT_TWITTER_USER_NAME_0: "GenshinImpact"
|
||||
#NEWSBOT_TWITTER_USER_HOOK_0: "{{ mm_genshin_hooks }}"
|
||||
|
||||
NEWSBOT_INSTAGRAM_USER_NAME_1: madmax_fluffyroad
|
||||
NEWSBOT_INSTAGRAM_USER_HOOK_1: "{{ mm_happyfeed_hooks}}"
|
||||
NEWSBOT_INSTAGRAM_TAG_NAME_1: corgi
|
||||
NEWSBOT_INSTAGRAM_TAG_HOOK_1: "{{ mm_happyfeed_hooks }}"
|
||||
|
||||
NEWSBOT_TWITCH_CLIENT_ID: "{{ twitch_client_id }}"
|
||||
NEWSBOT_TWITCH_CLIENT_SECRET: "{{ twitch_client_secret }}"
|
||||
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
- newsbot
|
||||
|
||||
volumes:
|
||||
- /docker/cfg/newsbot/database:/app/mounts/database
|
||||
- /docker/logs/newsbot/logs:/app/mounts/logs
|
||||
|
||||
sin:
|
||||
image: "{{ image }}"
|
||||
environment:
|
||||
NEWSBOT_REDDIT_SUB_0: "Cringetopia"
|
||||
NEWSBOT_REDDIT_HOOK_0: "{{ sin_newsbot_hooks }}"
|
||||
|
||||
NEWSBOT_REDDIT_SUB_1: "cursedfood"
|
||||
NEWSBOT_REDDIT_HOOK_1: "{{ sin_newsbot_hooks }}"
|
||||
restart: always
|
||||
|
||||
networks:
|
||||
- newsbot
|
||||
|
||||
volumes:
|
||||
- /docker/cfg/newsbot_sin/database:/app/mounts/database
|
||||
- /docker/logs/newsbot_sin/logs:/app/mounts/logs
|
||||
|
||||
|
93
playbook/docker/nextcloud/deploy.yml
Normal file
93
playbook/docker/nextcloud/deploy.yml
Normal file
@ -0,0 +1,93 @@
|
||||
---
|
||||
|
||||
- name: Deploy NextCloud
|
||||
hosts: nextcloud
|
||||
become: true
|
||||
vars:
|
||||
removeLocalData: false
|
||||
localData:
|
||||
- /docker/cfg/nextcloud_app/
|
||||
- /docker/cfg/nextcloud_db/
|
||||
containers:
|
||||
- nextcloud_db_1
|
||||
- nextcloud_cache_1
|
||||
- nextcloud_app_1
|
||||
|
||||
tasks:
|
||||
- name: Remove Existing containers
|
||||
docker_container:
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ containers }}"
|
||||
|
||||
- name: Remove local data
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop: "{{ localData }}"
|
||||
when: removeLocalData == True
|
||||
|
||||
- name: Deploy containers
|
||||
docker_compose:
|
||||
project_name: "nextcloud"
|
||||
definition:
|
||||
version: "3"
|
||||
|
||||
networks:
|
||||
nextcloudBack:
|
||||
#ipam:
|
||||
# driver: default
|
||||
# config:
|
||||
# - subnet: 172.16.30.0/16
|
||||
|
||||
#nextcloudFront:
|
||||
# external: false
|
||||
|
||||
services:
|
||||
cache:
|
||||
image: redis:6.0.9-alpine
|
||||
ports:
|
||||
- 6379:6379
|
||||
networks:
|
||||
nextcloudBack:
|
||||
#ipv4_address: 172.16.30.10
|
||||
|
||||
db:
|
||||
image: postgres:13.0-alpine
|
||||
volumes:
|
||||
- /docker/cfg/nextcloud_db/:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_USER: nextcloud
|
||||
POSTGRES_PASSWORD: "pgcloud"
|
||||
POSTGRES_DB: nextcloud
|
||||
ports:
|
||||
- 5432:5432
|
||||
networks:
|
||||
nextcloudBack:
|
||||
#ipv4_address: 172.16.30.20
|
||||
|
||||
app:
|
||||
image: nextcloud:20.0.1
|
||||
volumes:
|
||||
- /docker/cfg/nextcloud_app/html:/var/www/html/
|
||||
#- /docker/cfg/nextcloud_app/data:/var/www/html/data
|
||||
#- /docker/cfg/nextcloud_app/custom_apps:/var/www/html/custom_apps
|
||||
#- /docker/cfg/nextcloud_app/config:/var/www/html/config
|
||||
environment:
|
||||
#REDIS_HOST: nextcloud_cache_1
|
||||
#REDIS_HOST_PORT: 6379
|
||||
|
||||
POSTGRES_DB: nextcloud
|
||||
POSTGRES_USER: nextcloud
|
||||
POSTGRES_PASSWORD: "pgcloud"
|
||||
POSTGRES_HOST: nextcloud_db_1
|
||||
ports:
|
||||
- 8090:80
|
||||
#- 8091:443
|
||||
networks:
|
||||
nextcloudBack:
|
||||
#ipv4_address: 172.16.30.30
|
||||
|
||||
|
||||
|
||||
|
39
playbook/docker/nginx.yml
Normal file
39
playbook/docker/nginx.yml
Normal file
@ -0,0 +1,39 @@
|
||||
---
|
||||
|
||||
- name: Ensure nginx is deployed
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
discord_webhook: "{{ discord_mmt_hook }}"
|
||||
discord_name: "Ansible Monitor - Backend"
|
||||
discord_type: "fancy"
|
||||
discord_title: "nginx deployment"
|
||||
discord_color: "12255487"
|
||||
|
||||
tasks:
|
||||
- include_role:
|
||||
name: luther38.discord_webhook
|
||||
vars:
|
||||
discord_message: "Nginx is getting rebooted... please wait..."
|
||||
|
||||
- name: Stop and destroy Container
|
||||
docker_container:
|
||||
name: nginx
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Pull image
|
||||
docker_image:
|
||||
name: nginx
|
||||
source: pull
|
||||
|
||||
- name: Deploy nginx
|
||||
docker_container:
|
||||
name: nginx
|
||||
image: nginx
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
ports:
|
||||
- 80:80
|
||||
volumes:
|
||||
- '/docker/cfg/nginx:/etc/nginx/'
|
55
playbook/docker/pihole.yml
Normal file
55
playbook/docker/pihole.yml
Normal file
@ -0,0 +1,55 @@
|
||||
---
|
||||
- name: Deploy PiHole
|
||||
hosts: pihole
|
||||
become: true
|
||||
vars:
|
||||
image: pihole/pihole:v5.2.1
|
||||
tasks:
|
||||
- name: stop containers
|
||||
docker_container:
|
||||
name: pihole_app_1
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Pull images
|
||||
docker_image:
|
||||
name: "{{ image }}"
|
||||
source: pull
|
||||
|
||||
- name: Disable resolved for pihole
|
||||
shell: sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf
|
||||
|
||||
- name: Update local resolved config
|
||||
shell: sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'
|
||||
|
||||
- name: restart resolved
|
||||
systemd:
|
||||
name: systemd-resolved
|
||||
state: restarted
|
||||
|
||||
- name: deploy containers
|
||||
docker_compose:
|
||||
project_name: pihole
|
||||
definition:
|
||||
version: "2"
|
||||
|
||||
services:
|
||||
app:
|
||||
image: "{{ image }}"
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'America/Los_Angeles'
|
||||
WEBPASSWORD: 'pihole'
|
||||
volumes:
|
||||
- /docker/pihole/etc/pihole/:/etc/pihole/
|
||||
- /docker/pihole/etc/dnsmasq:/etc/dnsmasq.d/
|
||||
restart: always
|
||||
#network_mode: host
|
||||
|
||||
|
||||
|
3
playbook/docker/readme.md
Normal file
3
playbook/docker/readme.md
Normal file
@ -0,0 +1,3 @@
|
||||
# docker
|
||||
|
||||
The folders here are made to container the tasks that get ran against the group. The files here are not really for interfacing with the program its self. This is more about the health and maintenance of containers.
|
48
playbook/docker/syncthing/syncthing.yml
Normal file
48
playbook/docker/syncthing/syncthing.yml
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: Deploy Syncthing
|
||||
hosts: mediaserver-back
|
||||
become: true
|
||||
vars:
|
||||
#pathConfig: ~/docker/syncthing/config
|
||||
# This will place the config in the common location
|
||||
#pathConfig: '/docker/cfg/syncthing/'
|
||||
|
||||
# No data should be stored on the host device
|
||||
#pathData: ~/docker/syncthing/data
|
||||
#pathData: '/docker/nfs/sync'
|
||||
|
||||
|
||||
tasks:
|
||||
- name: stop container
|
||||
docker_container:
|
||||
name: synct
|
||||
state: stopped
|
||||
ignore_errors: true
|
||||
|
||||
- name: Pull container
|
||||
docker_image:
|
||||
name: linuxserver/syncthing
|
||||
source: pull
|
||||
|
||||
- name: Destroy old Container
|
||||
docker_container:
|
||||
name: synct
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Deploy new container
|
||||
docker_container:
|
||||
name: synct
|
||||
image: linuxserver/syncthing
|
||||
state: started
|
||||
restart_policy: unless-stopped
|
||||
env:
|
||||
PUID=0
|
||||
PGID=0
|
||||
ports:
|
||||
- 8384:8384
|
||||
- 22000:22000
|
||||
- 21027:21027/udp
|
||||
volumes:
|
||||
- "{{ pathConfig }}/syncthing:/config"
|
||||
- "{{ pathNfsSync }}:/data"
|
2
playbook/infrastructure/containers/nextcloud.yml
Normal file
2
playbook/infrastructure/containers/nextcloud.yml
Normal file
@ -0,0 +1,2 @@
|
||||
---
|
||||
|
157
playbook/infrastructure/containers/nextcloud/main.tf
Normal file
157
playbook/infrastructure/containers/nextcloud/main.tf
Normal file
@ -0,0 +1,157 @@
|
||||
|
||||
provider "docker" {
|
||||
host = "http://192.168.0.241:2375"
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "docker_image" "nextcloud" {
|
||||
name = "nextcloud:19.0.1-apache"
|
||||
}
|
||||
|
||||
resource "docker_image" "postgres" {
|
||||
name = "postgres:12.3"
|
||||
}
|
||||
|
||||
resource "docker_image" "redis" {
|
||||
name = "redis:6.0.6-alpine"
|
||||
}
|
||||
|
||||
resource "docker_image" "proxy" {
|
||||
name = "nginx:1.19.1-alpine"
|
||||
}
|
||||
|
||||
|
||||
|
||||
resource "docker_volume" "nextcloud_web_data" {
|
||||
name = "nextcloud_web_data"
|
||||
}
|
||||
|
||||
resource "docker_volume" "nextcloud_db_data" {
|
||||
name = "nextcloud_db_data"
|
||||
}
|
||||
|
||||
resource "docker_network" "nextcloud" {
|
||||
name = "nextcloud"
|
||||
driver = "bridge"
|
||||
ipam_config {
|
||||
subnet = "172.200.0.0/16"
|
||||
gateway = "172.200.0.1"
|
||||
}
|
||||
}
|
||||
|
||||
resource "docker_container" "nextcloud_proxy" {
|
||||
count = 1
|
||||
name = "nextcloud_proxy_${count.index}"
|
||||
image = docker_image.proxy.latest
|
||||
|
||||
ports {
|
||||
internal = 80
|
||||
external = 80
|
||||
}
|
||||
|
||||
upload {
|
||||
file = "/etc/nginx/nginx.conf"
|
||||
#content = file("nextcloud.conf")
|
||||
content = <<EOF
|
||||
events { }
|
||||
http {
|
||||
|
||||
upstream nextcloud {
|
||||
server ${docker_container.nextcloud_web.network_data.ip_address}:80;
|
||||
}
|
||||
|
||||
server {
|
||||
server_name example.local;
|
||||
location / {
|
||||
proxy_pass http://nextcloud_web_0:80;
|
||||
}
|
||||
|
||||
location /nextcloud {
|
||||
proxy_pass http://nextcloud;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
networks_advanced {
|
||||
name = docker_network.nextcloud.name
|
||||
}
|
||||
}
|
||||
|
||||
resource "docker_container" "nextcloud_cache" {
|
||||
count = 1
|
||||
name = "nextcloud_cache_${count.index}"
|
||||
image = docker_image.redis.latest
|
||||
|
||||
ports {
|
||||
internal = 6379
|
||||
external = 6379
|
||||
}
|
||||
|
||||
#env = ["value"]
|
||||
|
||||
networks_advanced {
|
||||
name = docker_network.nextcloud.name
|
||||
}
|
||||
}
|
||||
|
||||
resource "docker_container" "nextcloud_db" {
|
||||
count = 1
|
||||
name = "nextcloud_db_${count.index}"
|
||||
image = docker_image.postgres.latest
|
||||
|
||||
ports {
|
||||
internal = 5432
|
||||
external = 5432
|
||||
}
|
||||
|
||||
volumes {
|
||||
volume_name = docker_volume.nextcloud_db_data.name
|
||||
container_path = "/var/lib/postgresql/data"
|
||||
}
|
||||
|
||||
env = [
|
||||
"POSTGRES_PASSWORD=password",
|
||||
"POSTGRES_DB=nextcloud",
|
||||
"POSTGRES_USER=nextcloudAdmin"
|
||||
]
|
||||
|
||||
networks_advanced {
|
||||
name = docker_network.nextcloud.name
|
||||
#ipv4_address = "172.200.0.11"
|
||||
}
|
||||
}
|
||||
|
||||
resource "docker_container" "nextcloud_web" {
|
||||
#count = 2
|
||||
#name = "nextcloud_web_${count.index}"
|
||||
name = "nextcloud_web_0"
|
||||
|
||||
image = docker_image.nextcloud.latest
|
||||
|
||||
ports {
|
||||
internal = 80
|
||||
#external = 8080
|
||||
}
|
||||
|
||||
volumes {
|
||||
volume_name = docker_volume.nextcloud_web_data.name
|
||||
container_path = "/var/www/html"
|
||||
}
|
||||
|
||||
env = [
|
||||
"POSTGRES_DB=nextcloud",
|
||||
"POSTGRES_USER=nextcloudAdmin",
|
||||
"POSTGRES_PASSWORD=password",
|
||||
"POSTGRES_HOST=nextcloud_db_0",
|
||||
"REDIS_HOST=nextcloud_cache_1",
|
||||
"REDIS_HOST_PORT=6379"
|
||||
]
|
||||
|
||||
networks_advanced {
|
||||
name = docker_network.nextcloud.name
|
||||
#ipv4_address = "172.200.0.10"
|
||||
}
|
||||
|
||||
}
|
10
playbook/kube/extract-kube-config.yaml
Normal file
10
playbook/kube/extract-kube-config.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
- name: Install Kube admin file into workstation
|
||||
hosts: kube-master
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: Copy config file from master
|
||||
ansible.builtin.fetch:
|
||||
src: /etc/kubernetes/admin.conf
|
||||
dest: ~/.kube/config
|
||||
flat: yes
|
43
playbook/kube/install-node.yml
Normal file
43
playbook/kube/install-node.yml
Normal file
@ -0,0 +1,43 @@
|
||||
|
||||
# Notes.
|
||||
# Disable swap for kube, it really wants it dead
|
||||
# https://askubuntu.com/questions/912623/how-to-permanently-disable-swap-file
|
||||
|
||||
# The difference between the master and node is defined in the inventory.
|
||||
|
||||
- name: Install Kubernetes
|
||||
hosts: kube
|
||||
become: true
|
||||
vars:
|
||||
kubernetes_allow_pods_on_master: false
|
||||
kubernetes_kubelet_extra_args: "--fail-swap-on=false"
|
||||
kubernetes_enable_web_ui: true
|
||||
kubernetes_web_ui_manifest_file: https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
|
||||
|
||||
tasks:
|
||||
#- include_role:
|
||||
# name: geerlingguy.docker
|
||||
|
||||
- name: Disable swap
|
||||
shell: swapoff -a
|
||||
|
||||
- include_role:
|
||||
name: geerlingguy.kubernetes
|
||||
|
||||
- name: Mount kube nfs share
|
||||
mount:
|
||||
src: "{{ kube_fs_ip }}:{{ kube_fs_mount }}"
|
||||
path: "{{ kube_fs_mount }}"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
ignore_errors: true
|
||||
|
||||
- name: Mount kube backups nfs share
|
||||
mount:
|
||||
src: "192.168.1.85:/kube/"
|
||||
path: "/mnt/kube/backup"
|
||||
fstype: nfs
|
||||
boot: yes
|
||||
state: mounted
|
||||
ignore_errors: true
|
22
playbook/kube/minecraftServer.yml
Normal file
22
playbook/kube/minecraftServer.yml
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
- name: Kube - Test
|
||||
hosts: kube-master
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
- name: copy configs to disk
|
||||
copy:
|
||||
dest: /tmp/kube/configs/
|
||||
src: ./
|
||||
|
||||
# https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#creating-a-deployment
|
||||
- name: run deployment
|
||||
shell: kubectl apply -f /tmp/kube/configs/deployments/minecraft.yml
|
||||
|
||||
- name: Run Services
|
||||
shell: kubectl apply -f /tmp/kube/configs/services/minecraft.yml
|
||||
|
||||
#- name: Remove configs from disk
|
||||
#file:
|
||||
#src: /tmp/kube/configs/
|
||||
#state: absent
|
110
playbook/kube/nextcloud.yml
Normal file
110
playbook/kube/nextcloud.yml
Normal file
@ -0,0 +1,110 @@
|
||||
----
|
||||
|
||||
- name: Deploy NextCloud on Kube
|
||||
hosts: kube-master
|
||||
|
||||
tasks:
|
||||
- name: define nextcloud storage
|
||||
community.kubernetes.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
|
||||
metadata:
|
||||
name: nextcloud-pv
|
||||
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
storageClassName: local-storage
|
||||
local:
|
||||
path: /kube/volumes/nextcloud
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: kubernetes.io/hostname
|
||||
operator: In
|
||||
values:
|
||||
- k8s-worker-01
|
||||
|
||||
- name: define storage claim
|
||||
community.kubernetes.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: nextcoud-pvc
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: local-storage
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
||||
- name: Define NextCloud Service
|
||||
community.kubernetes.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
metadata:
|
||||
name: nextcloud-service
|
||||
|
||||
spec:
|
||||
selector:
|
||||
app: nextcloud
|
||||
|
||||
type: NodePort
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8083
|
||||
targetPort: 80
|
||||
|
||||
- name: Create NextCloud Deployment
|
||||
community.kubernetes.k8s:
|
||||
state: present
|
||||
definition:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nextcloud-deployment
|
||||
labels:
|
||||
app: nextcloud
|
||||
|
||||
spec:
|
||||
replicas: 2
|
||||
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nextcloud
|
||||
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nextcloud
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nextcloud
|
||||
image: nextcloud:latest
|
||||
#env:
|
||||
# - name:
|
||||
ports:
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- name: nextcloud-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: nextcloud-pvc
|
||||
volumes:
|
||||
- name: local-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: nextcloud-pvc
|
28
playbook/kube/restore-app-backup.yaml
Normal file
28
playbook/kube/restore-app-backup.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
|
||||
# Notes.
|
||||
# Disable swap for kube, it really wants it dead
|
||||
# https://askubuntu.com/questions/912623/how-to-permanently-disable-swap-file
|
||||
|
||||
# The difference between the master and node is defined in the inventory.
|
||||
|
||||
- name: Install Kube commons
|
||||
hosts: kube_node01
|
||||
become: true
|
||||
vars:
|
||||
backup_path: "/mnt/kube/backup/backup/media-son-config/media-son-config-12282021.011010.tar.gz"
|
||||
|
||||
tasks:
|
||||
- name: Copy backup to node
|
||||
ansible.builtin.copy:
|
||||
src: "{{ backup_path }}"
|
||||
dest: /tmp
|
||||
mode: "0644"
|
||||
remote_src: true
|
||||
|
||||
- name: Unzip
|
||||
ansible.builtin.unarchive:
|
||||
src: "{{ backup_path }}"
|
||||
dest: "/tmp"
|
||||
remote_src: true
|
||||
|
||||
#- name: Move Backups
|
58
playbook/kube/setup-media-requirements.yaml
Normal file
58
playbook/kube/setup-media-requirements.yaml
Normal file
@ -0,0 +1,58 @@
|
||||
---
|
||||
- name: Install Kubernetes media requirements
|
||||
hosts: kube_media_node
|
||||
become: true
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Generate directory for /kube/son/config
|
||||
ansible.builtin.file:
|
||||
path: /kube/son/config
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Generate directory for /kube/son/download
|
||||
ansible.builtin.file:
|
||||
path: /kube/son/download
|
||||
state: absent
|
||||
mode: '0755'
|
||||
|
||||
- name: Generate directory for /kube/search/config
|
||||
ansible.builtin.file:
|
||||
path: /kube/search/config
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Generate directory for /kube/rad/config
|
||||
ansible.builtin.file:
|
||||
path: /kube/rad/config
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Add Cron job to rsync the data
|
||||
ansible.builtin.cron:
|
||||
name: "sync /kube/son/config"
|
||||
cron_file: "son_config_backup"
|
||||
job: rsync -r /kube/son/config/* /mnt/kube/backup/backup/media-son-config
|
||||
user: root
|
||||
hour: 0
|
||||
minute: 0
|
||||
|
||||
- name: Add Cron job to rsync the search config
|
||||
ansible.builtin.cron:
|
||||
name: "sync /kube/search/config"
|
||||
cron_file: "search_config_backup"
|
||||
job: rsync -r /kube/search/config/* /mnt/kube/backup/backup/media-search-config
|
||||
user: root
|
||||
hour: 0
|
||||
minute: 0
|
||||
|
||||
- name: Add Cron job to rsync the rad config
|
||||
ansible.builtin.cron:
|
||||
name: "sync /kube/rad/config"
|
||||
cron_file: "rad_config_backup"
|
||||
job: rsync -r /kube/rad/config/* /mnt/kube/backup/backup/media-rad-config
|
||||
user: root
|
||||
hour: 0
|
||||
minute: 0
|
||||
|
9
playbook/linux/ansible_config.yml
Normal file
9
playbook/linux/ansible_config.yml
Normal file
@ -0,0 +1,9 @@
|
||||
|
||||
|
||||
- name: Make Ansible user
|
||||
ansible.builtin.user:
|
||||
name: ansible
|
||||
state: present
|
||||
|
||||
- name:
|
||||
|
@ -1,16 +0,0 @@
|
||||
|
||||
---
|
||||
|
||||
- name: enable
|
||||
hosts: linux
|
||||
|
||||
tasks:
|
||||
- name: unattended-upgrades
|
||||
become: true
|
||||
include_role:
|
||||
name: jnv.unattended-upgrades
|
||||
vars:
|
||||
#unattended_package_blacklist: []
|
||||
unattended_automatic_reboot: true
|
||||
|
||||
|
17
playbook/linux/ceph/common.yml
Normal file
17
playbook/linux/ceph/common.yml
Normal file
@ -0,0 +1,17 @@
|
||||
|
||||
|
||||
- name: Install Ceph
|
||||
hosts: ceph
|
||||
become: true
|
||||
vars:
|
||||
ceph_hosts:
|
||||
- hostname: k8sfs
|
||||
address: 192.168.1.222
|
||||
- hostname: k8sfs02
|
||||
address: 192.168.1.225
|
||||
ceph_network: 192.168.1.1/24
|
||||
|
||||
roles:
|
||||
#- geerlingguy.docker
|
||||
- jtom38/ceph
|
||||
|
27
playbook/linux/ceph/test.yml
Normal file
27
playbook/linux/ceph/test.yml
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
|
||||
- name: debug
|
||||
hosts: ceph-primary
|
||||
become: true
|
||||
vars:
|
||||
ceph_network: 192.168.1.1/24
|
||||
ceph_monitors:
|
||||
- hostname: k8sfs
|
||||
address: 192.168.1.222
|
||||
- hostname: k8sfs02
|
||||
address: 192.168.1.225
|
||||
tasks:
|
||||
- debug:
|
||||
msg: "{{ item }}"
|
||||
with_items: "{{ ceph_monitors }}"
|
||||
|
||||
- file:
|
||||
path: /tmp/test
|
||||
state: touch
|
||||
|
||||
- name: write file
|
||||
lineinfile:
|
||||
path: /tmp/test
|
||||
line: "{{ item.address }} {{ item.hostname }}"
|
||||
with_items: "{{ ceph_monitors }}"
|
||||
|
11
playbook/linux/certbot.yml
Normal file
11
playbook/linux/certbot.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
- name: deploy certbot
|
||||
hosts: mediaserver
|
||||
become: true
|
||||
vars:
|
||||
certbot_auto_renew_user: miharu
|
||||
certbot_auto_renew_minute: "20"
|
||||
certbot_auto_renew_hour: "5"
|
||||
roles:
|
||||
- geerlingguy.certbot
|
19
playbook/linux/common.yml
Normal file
19
playbook/linux/common.yml
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
# This is a common playbooks for all linux servers
|
||||
# Contains all the basics needed for servers to get started
|
||||
# Once this is installed and maintained look for server based roles like docker servers
|
||||
|
||||
- name: Install Linux Requirement
|
||||
hosts: linux-all
|
||||
become: yes
|
||||
|
||||
vars:
|
||||
pip_package: python3-pip
|
||||
|
||||
roles:
|
||||
#- geerlingguy.git
|
||||
#- geerlingguy.pip
|
||||
#- geerlingguy.ntp
|
||||
#- jnv.unattended-upgrades
|
||||
#- jtom38.monit
|
||||
- jtom38.linux_common
|
@ -1,18 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC3TCCAcWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADASMRAwDgYDVQQDDAdTZW5z
|
||||
dUNBMB4XDTE5MDQyODE3NTMwMloXDTI0MDQyNjE3NTMwMlowITEOMAwGA1UEAwwF
|
||||
c2Vuc3UxDzANBgNVBAoMBmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBAMBFLZ/mgAOdKJ2YUkqzjZHKsRyvNxixX9I3LWXJCMfFnWuUOLau5UaE
|
||||
rS6ZbtO1N4djsi6xSyBhPSu2hjPt9KgniTesaKZDwlLO2HLrOpUpmKPPpLxnBym9
|
||||
m/nXWaeuTLAnnNtP/wU4Jwvp1u9qMu5tIYdy+hTd5LJSQcfjgrt5ydHzLbwn9UyE
|
||||
2pcMawEgOaoywY9i6Ofhfsr5hwLkR3/3VS5PfJ2sVsO0Ks2vBW091BaQSwQAarpR
|
||||
ExMHmTrcHoHtWFI0RiFxZ+MoakL5380VSmzhAs8QPxYWYc3PLndhYt4pH6TLcCOF
|
||||
LpY8qk6S/acHuWHgdl+GIgyk5jKqnkECAwEAAaMvMC0wCQYDVR0TBAIwADALBgNV
|
||||
HQ8EBAMCB4AwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDQYJKoZIhvcNAQEFBQADggEB
|
||||
AG/MiB8QHvJlGrF1Xa5UHs/ykFJj1n+JzeniC5p3nApnRmpgi9KNlDZqRXjotuww
|
||||
uvaDlsRpFp+X4NukUUR8aUUZpwYbIm/wgXJ376Su0nUmpFmCU2TrGkk/cMeqbAen
|
||||
OYe5WZxsmJnmmkwhHLybrvha/vsCTNV6GY2JcHNhI8R7Uvwna48ueg7/WBQ5oXqZ
|
||||
zdYXMaFD2ioBFaYZqVifWv+5d1av2VBveX1V5p7ZZ3LHsvNS8/eVWufu5I4mwJI9
|
||||
GRPakzY0emL9ZBbtsZtsNA7IA6w4l4WeQtu1DHPc2iYO+JwfpeUNVX65ANSicqjC
|
||||
ibyhYEZs3qI/rb3WPXy6l0I=
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEogIBAAKCAQEAwEUtn+aAA50onZhSSrONkcqxHK83GLFf0jctZckIx8Wda5Q4
|
||||
tq7lRoStLplu07U3h2OyLrFLIGE9K7aGM+30qCeJN6xopkPCUs7Ycus6lSmYo8+k
|
||||
vGcHKb2b+ddZp65MsCec20//BTgnC+nW72oy7m0hh3L6FN3kslJBx+OCu3nJ0fMt
|
||||
vCf1TITalwxrASA5qjLBj2Lo5+F+yvmHAuRHf/dVLk98naxWw7Qqza8FbT3UFpBL
|
||||
BABqulETEweZOtwege1YUjRGIXFn4yhqQvnfzRVKbOECzxA/FhZhzc8ud2Fi3ikf
|
||||
pMtwI4UuljyqTpL9pwe5YeB2X4YiDKTmMqqeQQIDAQABAoIBAFxnovLLa9DQ0jlT
|
||||
gJFIVAyydoaLqxYiASRdwmK9yIuCbRLL7KnXyncmwri3ouz6lhJqlrMcIDgSo7yD
|
||||
f2Irxb6fKbJpGO53eEgmAx7P8JrJoANygwDNH0MvTmw31G3jNhYfI6K/gpf2kcWG
|
||||
//aWep3eMxQO7SPkNMqC//xaWnVQ0FLigNQjyFlgQrIZ3L4x7qFxcrkvTUIODGio
|
||||
R6hs7fECwXZkvLB28//tiwLEuOHnWGkG64fDebXUBDHsFhY/ObtA9vJITGY2GlUi
|
||||
1KFt9ZJd1JdMoV7EH5IwnA5YUN1NOtb5bwRaCddCMFH2lWsjzV1hNTZ9MzNyFqIF
|
||||
eolkKKUCgYEA6xR0LR3/stMPOWvgdaiXACHsH2hLx7Yh1vOf97eBbdUgiqjeL7DW
|
||||
mUmXIBLOQwrKMWNX0+DAqeuY80ESBmQ5KhRR/Sws2FMXGcqgyNPdJYAruif8y4z9
|
||||
0fGdvES1Fe12lOzyfPJclJi6doglyTjoJS5KGXUz8womJH4eiWZd+98CgYEA0WFx
|
||||
SPttK8Oi9zKxh/6YzpvOaABm6pCUslg79smhPGdhj4M0sO1sS4KzOBBolcplT9e6
|
||||
T1awh7ML44dowIFuQ0FgySnz5ogZt6xnqGv6bbfSVbMNpU4B9O4tJ2z16uFOXDeM
|
||||
f0tS55fcbspJ1Dylc+ndyAurd5E/8z/2BnU6qd8CgYADs6bAryA/qKMsvE4kjCsU
|
||||
jXQyamoHEw8lW2DBfdpD6H9Cr7YP+jDm6QnAL4uf8qOMc4wGghuGkXcvHW8zOpDL
|
||||
4NYJrpBmN6i9dztg7jUlSgdmPwr0CZxVmgBp3osbdUnQvopy/T4H+P+2rh4qNQMy
|
||||
0q/IBthyk05WdMX2U+5W8QKBgFSBwqpVKBvYyyaAZFziKiSBiA47003q6skMia8y
|
||||
dAwgIaU9rH+YY/QaHWGMZdnHJZrTFBQ/heJPJoY/ucywsKMeeQTYFOO/nLmgMPou
|
||||
EpZD8fW63dARKwMDOmBGPv78zpazqNYbvatRhJuGs8OgcprVEjlSVHNewXPZJeA3
|
||||
YmT7AoGAJuMaSA6oZqn0uKJD0FDwIl4j0RfVhPJHe9Um1G1K2FpZ3DV705kcwx1t
|
||||
IUu9pHLFJubwpkQFiERX/6BRbjbp4oZhpPLcLRec5nXTT8LHoiCBMaQW2RtnDMeW
|
||||
XKt2xyhGFp0Drw4vWV0Nr8fJbuBbAqviZTQnBtj7ZJ41KRV1mU4=
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,17 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIICxDCCAaygAwIBAgIJAPX7448uFrdyMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
|
||||
BAMMB1NlbnN1Q0EwHhcNMTkwNDI4MTc1MjU3WhcNMjQwNDI2MTc1MjU3WjASMRAw
|
||||
DgYDVQQDDAdTZW5zdUNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA
|
||||
sI4ptnAIEJISxDYMVZIi6vF6GcnzXDyXl4Et9m86QF9+Zyfe4zomGDnfp7wfhddS
|
||||
6asPHMxcgXi9itY6qr33lzdDL4SaMysS/VwWLBwhmdl2hEELPvUKHBF96iyfuq4A
|
||||
lsQ3lAXr/3uqXdODNo38hGaxrK2n1ocKFEKZrGlmrFDvfYKJz1cYlDh5u0ghjJGQ
|
||||
E/MCDeQzGNOjcbSbNUo5nMR8P6nzPcMDHjtA0OS4DXSijvjibHPhZ/NU9KgoTz9W
|
||||
oL8FoePlL6Zq6cwiEKCOUsqivIPbM3nGGNkPBHmSE0dnYXn0le+LK3rkNX60ZdwE
|
||||
fqisAIaHSVQWVlTw4J8xlQIDAQABox0wGzAMBgNVHRMEBTADAQH/MAsGA1UdDwQE
|
||||
AwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAp1MPCS8tKdUGrT07yHosw7+Gxc++/ylM
|
||||
cmS9GLiwAfU4VU4QEy97ipL4K8VLWbrGVvJSpgxAApLA0jX7R2UcYTYeTk9ikuto
|
||||
BeQRxcj6QdR8BKD4N7Qtje6jBVMJ6Ssky3Kj1XXcEQu4iZx9uZCX2yeCeozXaLtS
|
||||
+Tw3r9NjgIXGvhLCp64JTC+rL74S7cMwAIW5YBRy/K4uBdLKBcjYIi7VQnivsfGu
|
||||
J2+28+kfNw7nNWBdVWtBf6MoJQNEDvpx+HGRBCJoSlgw+GTRgbgCqEPJrXBdbamU
|
||||
SDJtCEdYonQqUCqqCI083ckx8c31YBg1COTZBQnWQiYVpcIfXG7j/A==
|
||||
-----END CERTIFICATE-----
|
@ -1,18 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC3TCCAcWgAwIBAgIBATANBgkqhkiG9w0BAQUFADASMRAwDgYDVQQDDAdTZW5z
|
||||
dUNBMB4XDTE5MDQyODE3NTI1OVoXDTI0MDQyNjE3NTI1OVowITEOMAwGA1UEAwwF
|
||||
c2Vuc3UxDzANBgNVBAoMBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
|
||||
AQoCggEBALEltlZMg7u1rqFbnljmD+IeiVeRt0zzRiCEpjvQ4t+bBjT5onPAOxYI
|
||||
Q1d3MdPJqA+lyCRP/sXcEKa1l14UDj50WEruK0VqXKL+e2ETeJi4kJb8k8ansCAI
|
||||
Ask5Ok2d8bTSQLzJBCkjwvR5kfG49R5wfJFDSA3WLfTHq1myRibJIMgbFGB2UP3Q
|
||||
yyljZWn04IO72yWhK413CxwnwXKsIFT5/z0hVGZMr5wDWpfhBhtBi6uxqeKG3Zyy
|
||||
CV/f3yUcOL+A9yoxPu155TNYfvmz1rqarTeuOJJJU7TtAiHmue8OhkfRFanBBYj9
|
||||
hSOGPdLB9eKzoWsS8vLKLUTwaQwZ9IsCAwEAAaMvMC0wCQYDVR0TBAIwADALBgNV
|
||||
HQ8EBAMCBSAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDQYJKoZIhvcNAQEFBQADggEB
|
||||
ABPZUxDIGJ6C8hu1aOj5sY/r8yphotSnPVkghBTGVJbjmGHSci+IGbHX6yemVYvH
|
||||
mQWKI8qBdroiIpCOpMVvmG6oUR4s+h/vdKuDoy/x3lRZjJDQiReAGKwwyeiG++wJ
|
||||
x6eSCDGqcIWvk72Zgd+OGym3JGrDpU7ofat+ncqtIunAOh7rhQlyRJ42wYZpWDIi
|
||||
Aass4yn16aYhF/PppUIsBYrWk1UUlKbXOF/Z7WOG4Hg6h5HwwtJZq/PGsSzJqd/O
|
||||
s6XI8Am1pU9PwLwWm9Vad44OhTNWGxsidboUCxNa7Yc7p5CkAqT+Z2Lf7RfvgmcX
|
||||
SUCwSN9REpYGV3k9l47eljY=
|
||||
-----END CERTIFICATE-----
|
@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAsSW2VkyDu7WuoVueWOYP4h6JV5G3TPNGIISmO9Di35sGNPmi
|
||||
c8A7FghDV3cx08moD6XIJE/+xdwQprWXXhQOPnRYSu4rRWpcov57YRN4mLiQlvyT
|
||||
xqewIAgCyTk6TZ3xtNJAvMkEKSPC9HmR8bj1HnB8kUNIDdYt9MerWbJGJskgyBsU
|
||||
YHZQ/dDLKWNlafTgg7vbJaErjXcLHCfBcqwgVPn/PSFUZkyvnANal+EGG0GLq7Gp
|
||||
4obdnLIJX9/fJRw4v4D3KjE+7XnlM1h++bPWupqtN644kklTtO0CIea57w6GR9EV
|
||||
qcEFiP2FI4Y90sH14rOhaxLy8sotRPBpDBn0iwIDAQABAoIBAFtnsiXlZTO+E1V/
|
||||
CL2mOBvc1dExhvtVq6Gr0Hqc1fO68gDzrjc7wUElElpXToaRTv6D9DmIbVV6r7zV
|
||||
hj0s7Aydy9EeA4XV0+bmmJMGkPt8gF7oBPhEHkTo3UcnGEZkcQt0UaMXteXkZfvv
|
||||
nrazUQdb02rA5LT/Bsd/H5MwwbHQyipMXKQXpYyzALhoBUrXItc+aHfINHOELs0h
|
||||
UPSoFnNSsQo1VGSd/TCZJYYw2cpmeTqWO4sM6z8vYXJnNQTCb2saW+vywfQoYTJ7
|
||||
V6mSmX7EgYh512jNpNdzhQx8qN1hmWF/r5G9DC4QSnzVoN23fi4H+szB9CEfVlPy
|
||||
pGj6qUECgYEA1zwPaLjz9XgeZiHrLYDCFeNRYE4Noa9mFuuplYxmiIJGsBiUNHNJ
|
||||
bbMn8VpuBBptEUnSTCGJhAF39AGKfUHx+49hTKTUISmnTDOSHLeE1mKvZJWB3x4r
|
||||
3ezfsUVwV4BvidYQEv0FWuE+lniDmx2BVQk7vIiF5VjUxMmyqnB8cEUCgYEA0rLw
|
||||
LtSYod0VzFLs8NlMH9nhfQk7oSfyxqLVwpiAQVAtrI3xfQUaYP04BrV/XOI+YBcF
|
||||
Svg4Ou4tqcuGFFYtqNPAaGYfih7UzEY8Z6wH2rkyznCq7VQZexKKtTbPQCNSkJ5h
|
||||
fpNxfh4sXZSpYg/aIEr6OC8REuhcjRjhJBWJJo8CgYAsPN316j3KMBwfZc1Olu5N
|
||||
TWGGZ8SJfOGAyIMch7TzTcN1ojej6CYpc+87vhhqo3vTV9bvat020o5zCnYKdKll
|
||||
yPx4olAvWL5X/SmE2XtmDPZ7t/bvguYFQRBhASKr+Wvzapn3LSYSncUdbDuwgAn7
|
||||
DmDGyVCr6OwiXkpomaIZ+QKBgCZIpSOdNW6TwVYy6yKIGTDgYfxaJR+PJqm5BKYr
|
||||
F4LGksX7tJlGyBg/amKtr8qswTCsfiW1HGJ4zItBk8c2MW2vrBJMHAb4uymyyV78
|
||||
/yBa7kRcbHJbCZY3NEThBJ9ey63DWWuqVsDXsq/+RxiuUK/1b6mtw6hv2AE7OA1a
|
||||
bGU5AoGBANL+ssYI1JH1TFRwI8iTc/no2Loy2jZ2NGyZbU/gc3NhhVERNgtK8nmM
|
||||
dcYrgmewKKS20+AqqbM7zITYdJea6RTKU6ELJul2iKMDSwA65cEwueqAT6WY7x57
|
||||
z0fBzoaLRQp11SSuuPz9p0a096XGygQP1o2SabZCwY4b3+vtkbJM
|
||||
-----END RSA PRIVATE KEY-----
|
@ -1,56 +0,0 @@
|
||||
|
||||
- name: Install Docker CE
|
||||
hosts: linux
|
||||
|
||||
tasks:
|
||||
- name: Check if Docker is installed
|
||||
#failed_when: "'Failed' Docker was already installed."
|
||||
apt:
|
||||
name: docker
|
||||
state: absent
|
||||
|
||||
- name: Install dependancies
|
||||
become: true
|
||||
become_method: sudo
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
# state: absent
|
||||
vars:
|
||||
packages:
|
||||
- apt-transport-https
|
||||
- ca-certificates
|
||||
- curl
|
||||
- gnupg-agent
|
||||
- software-properties-common
|
||||
|
||||
- name: Install Docker GPG key
|
||||
become: true
|
||||
become_method: sudo
|
||||
apt_key:
|
||||
url: "https://download.docker.com/linux/ubuntu/gpg"
|
||||
state: present
|
||||
id: 9DC858229FC7DD38854AE2D88D81803C0EBFCD88
|
||||
|
||||
- name: Install Docker Repo
|
||||
become: true
|
||||
become_method: sudo
|
||||
apt_repository:
|
||||
repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu/ {{ ansible_distribution_release }} stable"
|
||||
state: present
|
||||
|
||||
- name: Update Repos
|
||||
become: true
|
||||
become_method: sudo
|
||||
apt:
|
||||
update_cache: yes
|
||||
|
||||
- name: Install Docker
|
||||
become: true
|
||||
become_method: sudo
|
||||
apt:
|
||||
name: "{{ packages }}"
|
||||
vars:
|
||||
packages:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
@ -1,12 +0,0 @@
|
||||
|
||||
- name: Deploy OwnCloud
|
||||
hosts: linux
|
||||
become_method: sudo
|
||||
|
||||
tasks:
|
||||
- name: Check if docker is installed.
|
||||
apt:
|
||||
name: docker
|
||||
state: present
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user