remove playbook examples from main repo. Fear not, these are now in the ansible/ansible-examples repo :)
This commit is contained in:
parent
293314f777
commit
5aad416ffe
50 changed files with 0 additions and 1773 deletions
|
@ -1,56 +0,0 @@
|
||||||
# ansible-pull setup
|
|
||||||
#
|
|
||||||
# on remote hosts, set up ansible to run periodically using the latest code
|
|
||||||
# from a particular checkout, in pull based fashion, inverting Ansible's
|
|
||||||
# usual push-based operating mode.
|
|
||||||
#
|
|
||||||
# This particular pull based mode is ideal for:
|
|
||||||
#
|
|
||||||
# (A) massive scale out
|
|
||||||
# (B) continual system remediation
|
|
||||||
#
|
|
||||||
# DO NOT RUN THIS AGAINST YOUR HOSTS WITHOUT CHANGING THE repo_url
|
|
||||||
# TO SOMETHING YOU HAVE PERSONALLY VERIFIED
|
|
||||||
#
|
|
||||||
#
|
|
||||||
---
|
|
||||||
|
|
||||||
- hosts: pull_mode_hosts
|
|
||||||
user: root
|
|
||||||
|
|
||||||
vars:
|
|
||||||
|
|
||||||
# schedule is fed directly to cron
|
|
||||||
schedule: '*/15 * * * *'
|
|
||||||
|
|
||||||
# User to run ansible-pull as from cron
|
|
||||||
cron_user: root
|
|
||||||
|
|
||||||
# File that ansible will use for logs
|
|
||||||
logfile: /var/log/ansible-pull.log
|
|
||||||
|
|
||||||
# Directory to where repository will be cloned
|
|
||||||
workdir: /var/lib/ansible/local
|
|
||||||
|
|
||||||
# Repository to check out -- YOU MUST CHANGE THIS
|
|
||||||
# repo must contain a local.yml file at top level
|
|
||||||
#repo_url: git://github.com/sfromm/ansible-playbooks.git
|
|
||||||
repo_url: SUPPLY_YOUR_OWN_GIT_URL_HERE
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Install ansible
|
|
||||||
action: yum pkg=ansible state=installed
|
|
||||||
|
|
||||||
- name: Create local directory to work from
|
|
||||||
action: file path={{workdir}} state=directory owner=root group=root mode=0751
|
|
||||||
|
|
||||||
- name: Copy ansible inventory file to client
|
|
||||||
action: copy src=/etc/ansible/hosts dest=/etc/ansible/hosts
|
|
||||||
owner=root group=root mode=0644
|
|
||||||
|
|
||||||
- name: Create crontab entry to clone/pull git repository
|
|
||||||
action: template src=templates/etc_cron.d_ansible-pull.j2 dest=/etc/cron.d/ansible-pull owner=root group=root mode=0644
|
|
||||||
|
|
||||||
- name: Create logrotate entry for ansible-pull.log
|
|
||||||
action: template src=templates/etc_logrotate.d_ansible-pull.j2 dest=/etc/logrotate.d/ansible-pull owner=root group=root mode=0644
|
|
|
@ -1,19 +0,0 @@
|
||||||
# ordinarily, without the 'serial' keyword set, ansible will control all of your machines in a play at once, in parallel.
|
|
||||||
# if you want to perform a rolling update, so that each play completes all the way through on a certain number of hosts
|
|
||||||
# before moving on to the remaining hosts, use the 'serial' keyword like so:
|
|
||||||
|
|
||||||
---
|
|
||||||
- hosts: all
|
|
||||||
serial: 3
|
|
||||||
|
|
||||||
# now each of the tasks below will complete on 3 hosts before moving on to the next 3, regardless of how many
|
|
||||||
# hosts are selected by the "hosts:" line
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: ping
|
|
||||||
action: ping
|
|
||||||
- name: ping2
|
|
||||||
action: ping
|
|
||||||
|
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
---
|
|
||||||
# This playbook demonstrates how to use the ansible cloudformation module to launch an AWS CloudFormation stack.
|
|
||||||
#
|
|
||||||
# This module requires that the boto python library is installed, and that you have your AWS credentials
|
|
||||||
# in $HOME/.boto
|
|
||||||
|
|
||||||
#The thought here is to bring up a bare infrastructure with CloudFormation, but use ansible to configure it.
|
|
||||||
#I generally do this in 2 different playbook runs as to allow the ec2.py inventory to be updated.
|
|
||||||
|
|
||||||
#This module also uses "complex arguments" which were introduced in ansible 1.1 allowing you to specify the
|
|
||||||
#Cloudformation template parameters
|
|
||||||
|
|
||||||
#This example launches a 3 node AutoScale group, with a security group, and an InstanceProfile with root permissions.
|
|
||||||
|
|
||||||
#If a stack does not exist, it will be created. If it does exist and the template file has changed, the stack will be updated.
|
|
||||||
#If the parameters are different, the stack will also be updated.
|
|
||||||
|
|
||||||
#CloudFormation stacks can take awhile to provision, if you are curious about its status, use the AWS
|
|
||||||
#web console or one of the CloudFormation CLI's.
|
|
||||||
|
|
||||||
#Example update -- try first launching the stack with 3 as the ClusterSize. After it is launched, change it to 4
|
|
||||||
#and run the playbook again.
|
|
||||||
|
|
||||||
- name: provision stack
|
|
||||||
hosts: localhost
|
|
||||||
connection: local
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
# Launch the cloudformation-example.json template. Register the output.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: launch ansible cloudformation example
|
|
||||||
cloudformation: >
|
|
||||||
stack_name="ansible-cloudformation" state=present
|
|
||||||
region=us-east-1 disable_rollback=true
|
|
||||||
template=files/cloudformation-example.json
|
|
||||||
args:
|
|
||||||
template_parameters:
|
|
||||||
KeyName: jmartin
|
|
||||||
DiskType: ephemeral
|
|
||||||
InstanceType: m1.small
|
|
||||||
ClusterSize: 3
|
|
||||||
register: stack
|
|
||||||
- name: show stack outputs
|
|
||||||
debug: msg="My stack outputs are {{stack.stack_outputs}}"
|
|
|
@ -1,50 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# this is a bit of an advanced topic.
|
|
||||||
#
|
|
||||||
# generally Ansible likes to pass simple key=value arguments to modules. It occasionally comes up though
|
|
||||||
# that you might want to write a module that takes COMPLEX arguments, like lists and dictionaries.
|
|
||||||
#
|
|
||||||
# happen, at least right now, it should be a Python module, so it can leverage some common code in Ansible that
|
|
||||||
# makes this easy. If you write a non-Python module, you can still pass data across, but only hashes that
|
|
||||||
# do not contain lists or other hashes. If you write the Python module, you can do anything.
|
|
||||||
#
|
|
||||||
# note that if you were to use BOTH the key=value form and the 'args' form for passing data in, the key=value
|
|
||||||
# parameters take a higher priority, so you can use them for defaults, which can be useful.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
gather_facts: no
|
|
||||||
|
|
||||||
vars:
|
|
||||||
defaults:
|
|
||||||
state: stopped
|
|
||||||
complex:
|
|
||||||
ghostbusters: [ 'egon', 'ray', 'peter', 'winston' ]
|
|
||||||
mice: [ 'pinky', 'brain', 'larry' ]
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: this is the basic way data passing works for any module
|
|
||||||
action: ping data='Hi Mom'
|
|
||||||
|
|
||||||
- name: of course this can also be written like so, which is shorter
|
|
||||||
ping: data='Hi Mom'
|
|
||||||
|
|
||||||
- name: but what if you have a complex module that needs complicated data?
|
|
||||||
action: ping
|
|
||||||
args:
|
|
||||||
data:
|
|
||||||
moo: cow
|
|
||||||
asdf: [1,2,3,4]
|
|
||||||
|
|
||||||
- name: can we make that cleaner? sure!
|
|
||||||
action: ping
|
|
||||||
args: { data: $complex }
|
|
||||||
|
|
||||||
- name: here is an example of how it works with defaults, notice the key=value format wins
|
|
||||||
action: service name=httpd state=running
|
|
||||||
args: $defaults
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
---
|
|
||||||
# this is a demo of conditional imports. This is a powerful concept
|
|
||||||
# and can be used to use the same recipe for different types of hosts,
|
|
||||||
# based on variables that bubble up from the hosts from tools such
|
|
||||||
# as ohai or facter.
|
|
||||||
#
|
|
||||||
# Here's an example use case:
|
|
||||||
#
|
|
||||||
# what to do if the service for apache is named 'httpd' on CentOS
|
|
||||||
# but is named 'apache' on Debian?
|
|
||||||
|
|
||||||
|
|
||||||
# there is only one play in this playbook, it runs on all hosts
|
|
||||||
# as root
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
# we have a common list of variables stored in /vars/external_vars.yml
|
|
||||||
# that we will always import
|
|
||||||
|
|
||||||
# next, we want to import files that are different per operating system
|
|
||||||
# and if no per operating system file is found, load a defaults file.
|
|
||||||
# for instance, if the OS was "CentOS", we'd try to load vars/CentOS.yml.
|
|
||||||
# if that was found, we would immediately stop. However if that wasn't
|
|
||||||
# present, we'd try to load vars/defaults.yml. If that in turn was not
|
|
||||||
# found, we would fail immediately, because we had gotten to the end of
|
|
||||||
# the list without importing anything.
|
|
||||||
|
|
||||||
vars_files:
|
|
||||||
|
|
||||||
- "vars/external_vars.yml"
|
|
||||||
|
|
||||||
- [ "vars/{{ facter_operatingsystem }}.yml", "vars/defaults.yml" ]
|
|
||||||
|
|
||||||
# and this is just a regular task line from a playbook, as we're used to.
|
|
||||||
# but with variables in it that come from above. Note that the variables
|
|
||||||
# from above are *also* available in templates
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: ensure apache is latest
|
|
||||||
action: "{{ packager }} pkg={{ apache }} state=latest"
|
|
||||||
|
|
||||||
- name: ensure apache is running
|
|
||||||
action: service name={{ apache }} state=running
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
---
|
|
||||||
# this is a demo of conditional executions using 'when' statements, which can skip
|
|
||||||
# certain tasks on machines/platforms/etc where they do not apply.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
vars:
|
|
||||||
favcolor: "red"
|
|
||||||
dog: "fido"
|
|
||||||
cat: "whiskers"
|
|
||||||
ssn: 8675309
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: "do this if my favcolor is blue, and my dog is named fido"
|
|
||||||
action: shell /bin/false
|
|
||||||
when: favcolor == 'blue' and dog == 'fido'
|
|
||||||
|
|
||||||
- name: "do this if my favcolor is not blue, and my dog is named fido"
|
|
||||||
action: shell /bin/true
|
|
||||||
when: favcolor != 'blue' and dog == 'fido'
|
|
||||||
|
|
||||||
- name: "do this if my SSN is over 9000"
|
|
||||||
action: shell /bin/true
|
|
||||||
when: ssn > 9000
|
|
||||||
|
|
||||||
- name: "do this if I have one of these SSNs"
|
|
||||||
action: shell /bin/true
|
|
||||||
when: ssn in [ 8675309, 8675310, 8675311 ]
|
|
||||||
|
|
||||||
- name: "do this if a variable named hippo is NOT defined"
|
|
||||||
action: shell /bin/true
|
|
||||||
when: hippo is not defined
|
|
||||||
|
|
||||||
- name: "do this if a variable named hippo is defined"
|
|
||||||
action: shell /bin/true
|
|
||||||
when: hippo is defined
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
- name: Demonstrate custom jinja2 filters
|
|
||||||
hosts: all
|
|
||||||
tasks:
|
|
||||||
- action: template src=templates/custom-filters.j2 dest=/tmp/custom-filters.txt
|
|
|
@ -1,39 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# this is an example of how we can perform actions on a given host on behalf of all the hosts
|
|
||||||
# in a play.
|
|
||||||
#
|
|
||||||
# The two main uses of this would be signalling an outage window for hosts that
|
|
||||||
# we are going to start upgrading, or to take a machine out of rotation by talking to a load
|
|
||||||
# balancer.
|
|
||||||
#
|
|
||||||
# This example cheats by replacing the load balancer script with the 'echo' command,
|
|
||||||
# leaving actual communication with the load balancer as an exercise to the reader. In reality,
|
|
||||||
# you could call anything you want, the main thing is that it should do something with
|
|
||||||
# {{inventory_hostname}}
|
|
||||||
|
|
||||||
# NOTE: see batch_size_control.yml for an example of the 'serial' keyword, which you almost certainly
|
|
||||||
# want to use in this kind of example. Here we have a mocked up example that does something to
|
|
||||||
# 5 hosts at a time
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
serial: 5
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: take the machine out of rotation
|
|
||||||
action: command echo taking out of rotation {{inventory_hostname}}
|
|
||||||
delegate_to: 127.0.0.1
|
|
||||||
|
|
||||||
# here's an alternate notation if you are delegating to 127.0.0.1, you can use 'local_action'
|
|
||||||
# instead of 'action' and leave off the 'delegate_to' part.
|
|
||||||
#
|
|
||||||
# - local_action: command echo taking out of rotation {{inventory_hostname}}
|
|
||||||
|
|
||||||
- name: do several things on the actual host
|
|
||||||
action: command echo hi mom {{inventory_hostname}}
|
|
||||||
|
|
||||||
- name: put machine back into rotation
|
|
||||||
action: command echo inserting into rotation {{inventory_hostname}}
|
|
||||||
delegate_to: 127.0.0.1
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# it is often useful to be able to set the environment for one command and have that environment be totally
|
|
||||||
# different for another. An example is you might use a HTTP proxy for some packages but not for others.
|
|
||||||
#
|
|
||||||
# in Ansible 1.1 and later, you can pass the environment to any module using either a dictionary variable
|
|
||||||
# or a dictionary itself.
|
|
||||||
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
# here we make a variable named "env" that is a dictionary
|
|
||||||
vars:
|
|
||||||
env:
|
|
||||||
HI: test2
|
|
||||||
http_proxy: http://proxy.example.com:8080
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# here we just define the dictionary directly and use it
|
|
||||||
# (here $HI is the shell variable as nothing in Ansible will replace it)
|
|
||||||
|
|
||||||
- shell: echo $HI
|
|
||||||
environment:
|
|
||||||
HI: test1
|
|
||||||
|
|
||||||
# here we are using the $env variable above
|
|
||||||
|
|
||||||
- shell: echo $HI
|
|
||||||
environment: env
|
|
||||||
|
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
---
|
|
||||||
# This playbook is an example for deploying multiple instances into EC2/Euca and "doing something" with them.
|
|
||||||
# - uses the ec2 and ec2_vol module.
|
|
||||||
#
|
|
||||||
# Run this with ansible-playbook and supply the private key for your EC2/Euca user (to access the instance in the second play), e.g:
|
|
||||||
# ansible-playbook eucalyptus-ec2-deploy.yml -v --private-key=/path/to/ec2/pri/key
|
|
||||||
|
|
||||||
- name: Stage instance(s)
|
|
||||||
hosts: local
|
|
||||||
connection: local
|
|
||||||
user: root
|
|
||||||
gather_facts: false
|
|
||||||
|
|
||||||
vars:
|
|
||||||
keypair: mykeypair
|
|
||||||
instance_type: m1.small
|
|
||||||
security_group: default
|
|
||||||
image: emi-048B3A37
|
|
||||||
|
|
||||||
# Launch 5 instances with the following parameters. Register the output.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Launch instance
|
|
||||||
local_action: ec2 keypair={{keypair}} group={{security_group}} instance_type={{instance_type}} image={{image}} wait=true count=5
|
|
||||||
register: ec2
|
|
||||||
|
|
||||||
# Use with_items to add each instances public IP to a new hostgroup for use in the next play.
|
|
||||||
|
|
||||||
- name: Add new instances to host group
|
|
||||||
local_action: add_host hostname={{item.public_ip}} groupname=deploy
|
|
||||||
with_items: ${ec2.instances}
|
|
||||||
|
|
||||||
# Use the ec2_vol module to create volumes for attachment to each instance. Use with_items to attach to each instance (by returned id) launched previously.
|
|
||||||
|
|
||||||
- name: Create a volume and attach
|
|
||||||
local_action: ec2_vol volume_size=20 instance={{item.id}}
|
|
||||||
with_items: ${ec2.instances}
|
|
||||||
|
|
||||||
# This play targets the new host group
|
|
||||||
|
|
||||||
- name: Configure instance
|
|
||||||
hosts: deploy
|
|
||||||
user: root
|
|
||||||
gather_facts: True
|
|
||||||
|
|
||||||
# Do some stuff on each instance ....
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Ensure NTP is up and running
|
|
||||||
action: service name=ntpd state=started
|
|
||||||
|
|
||||||
- name: Install Apache Web Server
|
|
||||||
action: yum pkg=httpd state=latest
|
|
|
@ -1,18 +0,0 @@
|
||||||
---
|
|
||||||
# This is a demo of how to manage the selinux context using the file module
|
|
||||||
- hosts: test
|
|
||||||
user: root
|
|
||||||
tasks:
|
|
||||||
- name: Change setype of /etc/exports to non-default value
|
|
||||||
action: file path=/etc/exports setype=etc_t
|
|
||||||
- name: Change seuser of /etc/exports to non-default value
|
|
||||||
action: file path=/etc/exports seuser=unconfined_u
|
|
||||||
- name: Set selinux context back to default value
|
|
||||||
action: file path=/etc/exports context=default
|
|
||||||
- name: Create empty file
|
|
||||||
action: command /bin/touch /tmp/foo
|
|
||||||
- name: Change setype of /tmp/foo
|
|
||||||
action: file path=/tmp/foo setype=default_t
|
|
||||||
- name: Try to set secontext to default, but this will fail
|
|
||||||
because of the lack of a default in the policy
|
|
||||||
action: file path=/tmp/foo context=default
|
|
|
@ -1,399 +0,0 @@
|
||||||
{
|
|
||||||
"Outputs" : {
|
|
||||||
"ClusterSecGroup" : {
|
|
||||||
"Description" : "Name of RegionalManagerSecGroup",
|
|
||||||
"Value" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"AWSTemplateFormatVersion" : "2010-09-09",
|
|
||||||
"Description" : "Launches an example cluster",
|
|
||||||
"Mappings" : {
|
|
||||||
"ebs" : {
|
|
||||||
"ap-northeast-1" : {
|
|
||||||
"AMI" : "ami-4e6cd34f"
|
|
||||||
},
|
|
||||||
"ap-southeast-1" : {
|
|
||||||
"AMI" : "ami-a6a7e7f4"
|
|
||||||
},
|
|
||||||
"eu-west-1" : {
|
|
||||||
"AMI" : "ami-c37474b7"
|
|
||||||
},
|
|
||||||
"sa-east-1" : {
|
|
||||||
"AMI" : "ami-1e08d103"
|
|
||||||
},
|
|
||||||
"us-east-1" : {
|
|
||||||
"AMI" : "ami-1624987f"
|
|
||||||
},
|
|
||||||
"us-west-1" : {
|
|
||||||
"AMI" : "ami-1bf9de5e"
|
|
||||||
},
|
|
||||||
"us-west-2" : {
|
|
||||||
"AMI" : "ami-2a31bf1a"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"ephemeral" : {
|
|
||||||
"ap-northeast-1" : {
|
|
||||||
"AMI" : "ami-5a6cd35b"
|
|
||||||
},
|
|
||||||
"ap-southeast-1" : {
|
|
||||||
"AMI" : "ami-a8a7e7fa"
|
|
||||||
},
|
|
||||||
"eu-west-1" : {
|
|
||||||
"AMI" : "ami-b57474c1"
|
|
||||||
},
|
|
||||||
"sa-east-1" : {
|
|
||||||
"AMI" : "ami-1608d10b"
|
|
||||||
},
|
|
||||||
"us-east-1" : {
|
|
||||||
"AMI" : "ami-e8249881"
|
|
||||||
},
|
|
||||||
"us-west-1" : {
|
|
||||||
"AMI" : "ami-21f9de64"
|
|
||||||
},
|
|
||||||
"us-west-2" : {
|
|
||||||
"AMI" : "ami-2e31bf1e"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Parameters" : {
|
|
||||||
"ClusterSize" : {
|
|
||||||
"Description" : "Number of nodes in the cluster",
|
|
||||||
"Type" : "String"
|
|
||||||
},
|
|
||||||
"DiskType" : {
|
|
||||||
"AllowedValues" : [
|
|
||||||
"ephemeral",
|
|
||||||
"ebs"
|
|
||||||
],
|
|
||||||
"Default" : "ephemeral",
|
|
||||||
"Description" : "Type of Disk to use ( ephemeral/ebs )",
|
|
||||||
"Type" : "String"
|
|
||||||
},
|
|
||||||
"InstanceType" : {
|
|
||||||
"AllowedValues" : [
|
|
||||||
"t1.micro",
|
|
||||||
"m1.small",
|
|
||||||
"m1.medium",
|
|
||||||
"m1.large",
|
|
||||||
"m1.xlarge",
|
|
||||||
"m2.xlarge",
|
|
||||||
"m2.2xlarge",
|
|
||||||
"m2.4xlarge",
|
|
||||||
"c1.medium",
|
|
||||||
"c1.xlarge",
|
|
||||||
"cc1.4xlarge"
|
|
||||||
],
|
|
||||||
"ConstraintDescription" : "must be valid instance type. ",
|
|
||||||
"Default" : "m1.large",
|
|
||||||
"Description" : "Type of EC2 instance for cluster",
|
|
||||||
"Type" : "String"
|
|
||||||
},
|
|
||||||
"KeyName" : {
|
|
||||||
"Description" : "Name of an existing EC2 KeyPair to enable SSH access to the cluster",
|
|
||||||
"Type" : "String"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Resources" : {
|
|
||||||
"ApplicationWaitCondition" : {
|
|
||||||
"DependsOn" : "ClusterServerGroup",
|
|
||||||
"Properties" : {
|
|
||||||
"Handle" : {
|
|
||||||
"Ref" : "ApplicationWaitHandle"
|
|
||||||
},
|
|
||||||
"Timeout" : "4500"
|
|
||||||
},
|
|
||||||
"Type" : "AWS::CloudFormation::WaitCondition"
|
|
||||||
},
|
|
||||||
"ApplicationWaitHandle" : {
|
|
||||||
"Type" : "AWS::CloudFormation::WaitConditionHandle"
|
|
||||||
},
|
|
||||||
"CFNInitUser" : {
|
|
||||||
"Properties" : {
|
|
||||||
"Path" : "/",
|
|
||||||
"Policies" : [
|
|
||||||
{
|
|
||||||
"PolicyDocument" : {
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Action" : [
|
|
||||||
"cloudformation:DescribeStackResource",
|
|
||||||
"s3:GetObject"
|
|
||||||
],
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Resource" : "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"PolicyName" : "AccessForCFNInit"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Type" : "AWS::IAM::User"
|
|
||||||
},
|
|
||||||
"CFNKeys" : {
|
|
||||||
"Properties" : {
|
|
||||||
"UserName" : {
|
|
||||||
"Ref" : "CFNInitUser"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Type" : "AWS::IAM::AccessKey"
|
|
||||||
},
|
|
||||||
"ClusterCommunication1" : {
|
|
||||||
"Properties" : {
|
|
||||||
"FromPort" : "-1",
|
|
||||||
"GroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"IpProtocol" : "icmp",
|
|
||||||
"SourceSecurityGroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"ToPort" : "-1"
|
|
||||||
},
|
|
||||||
"Type" : "AWS::EC2::SecurityGroupIngress"
|
|
||||||
},
|
|
||||||
"ClusterCommunication2" : {
|
|
||||||
"Properties" : {
|
|
||||||
"FromPort" : "1",
|
|
||||||
"GroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"IpProtocol" : "tcp",
|
|
||||||
"SourceSecurityGroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"ToPort" : "65356"
|
|
||||||
},
|
|
||||||
"Type" : "AWS::EC2::SecurityGroupIngress"
|
|
||||||
},
|
|
||||||
"ClusterCommunication3" : {
|
|
||||||
"Properties" : {
|
|
||||||
"FromPort" : "1",
|
|
||||||
"GroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"IpProtocol" : "udp",
|
|
||||||
"SourceSecurityGroupName" : {
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
},
|
|
||||||
"ToPort" : "65356"
|
|
||||||
},
|
|
||||||
"Type" : "AWS::EC2::SecurityGroupIngress"
|
|
||||||
},
|
|
||||||
"InstanceSecurityGroup" : {
|
|
||||||
"Properties" : {
|
|
||||||
"GroupDescription" : "Enable SSH access via port 22",
|
|
||||||
"SecurityGroupIngress" : [
|
|
||||||
{
|
|
||||||
"CidrIp" : "0.0.0.0/0",
|
|
||||||
"FromPort" : "22",
|
|
||||||
"IpProtocol" : "tcp",
|
|
||||||
"ToPort" : "22"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Type" : "AWS::EC2::SecurityGroup"
|
|
||||||
},
|
|
||||||
"LaunchConfig" : {
|
|
||||||
"Properties" : {
|
|
||||||
"IamInstanceProfile" : {
|
|
||||||
"Ref" : "RootInstanceProfile"
|
|
||||||
},
|
|
||||||
"ImageId" : {
|
|
||||||
"Fn::FindInMap" : [
|
|
||||||
{
|
|
||||||
"Ref" : "DiskType"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"Ref" : "AWS::Region"
|
|
||||||
},
|
|
||||||
"AMI"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"InstanceType" : {
|
|
||||||
"Ref" : "InstanceType"
|
|
||||||
},
|
|
||||||
"KeyName" : {
|
|
||||||
"Ref" : "KeyName"
|
|
||||||
},
|
|
||||||
"SecurityGroups" : [
|
|
||||||
{
|
|
||||||
"Ref" : "InstanceSecurityGroup"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"UserData" : {
|
|
||||||
"Fn::Base64" : {
|
|
||||||
"Fn::Join" : [
|
|
||||||
"\n",
|
|
||||||
[
|
|
||||||
"#!/bin/bash -v",
|
|
||||||
"exec > >(tee /var/log/cfn-data.log|logger -t user-data -s 2>/dev/console) 2>&1",
|
|
||||||
"",
|
|
||||||
"sleep 10",
|
|
||||||
"",
|
|
||||||
"function retry {",
|
|
||||||
" nTrys=0",
|
|
||||||
" maxTrys=5",
|
|
||||||
" status=256",
|
|
||||||
" until [ $status == 0 ] ; do",
|
|
||||||
" $1",
|
|
||||||
" status=$?",
|
|
||||||
" nTrys=$(($nTrys + 1))",
|
|
||||||
" if [ $nTrys -gt $maxTrys ] ; then",
|
|
||||||
" echo \"Number of re-trys exceeded. Exit code: $status\"",
|
|
||||||
" exit $status",
|
|
||||||
" fi",
|
|
||||||
" if [ $status != 0 ] ; then",
|
|
||||||
" echo \"Failed (exit code $status)... retry $nTrys\"",
|
|
||||||
" sleep 10",
|
|
||||||
" fi",
|
|
||||||
" done",
|
|
||||||
"}",
|
|
||||||
"",
|
|
||||||
"yum update -y aws-cfn-bootstrap",
|
|
||||||
"",
|
|
||||||
"#for all the stuff that complains about sudo and tty",
|
|
||||||
"sed -i 's,Defaults requiretty,#Defaults requiretty,g' /etc/sudoers",
|
|
||||||
"",
|
|
||||||
"function error_exit",
|
|
||||||
"{",
|
|
||||||
{
|
|
||||||
"Fn::Join" : [
|
|
||||||
"",
|
|
||||||
[
|
|
||||||
" /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '",
|
|
||||||
{
|
|
||||||
"Ref" : "ApplicationWaitHandle"
|
|
||||||
},
|
|
||||||
"'"
|
|
||||||
]
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"}",
|
|
||||||
"yum update -y aws-cfn-bootstrap",
|
|
||||||
"#this runs the first stage of cfinit",
|
|
||||||
{
|
|
||||||
"Fn::Join" : [
|
|
||||||
"",
|
|
||||||
[
|
|
||||||
"#/opt/aws/bin/cfn-init -c ascending -v --region ",
|
|
||||||
{
|
|
||||||
"Ref" : "AWS::Region"
|
|
||||||
},
|
|
||||||
" -s ",
|
|
||||||
{
|
|
||||||
"Ref" : "AWS::StackName"
|
|
||||||
},
|
|
||||||
" -r ",
|
|
||||||
"LaunchConfig",
|
|
||||||
" --access-key ",
|
|
||||||
{
|
|
||||||
"Ref" : "CFNKeys"
|
|
||||||
},
|
|
||||||
" --secret-key ",
|
|
||||||
{
|
|
||||||
"Fn::GetAtt" : [
|
|
||||||
"CFNKeys",
|
|
||||||
"SecretAccessKey"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
" || error_exit 'Failed to initialize client using cfn-init'"
|
|
||||||
]
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"",
|
|
||||||
"result_code=$?",
|
|
||||||
{
|
|
||||||
"Fn::Join" : [
|
|
||||||
"",
|
|
||||||
[
|
|
||||||
"/opt/aws/bin/cfn-signal -e $result_code '",
|
|
||||||
{
|
|
||||||
"Ref" : "ApplicationWaitHandle"
|
|
||||||
},
|
|
||||||
"'"
|
|
||||||
]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Type" : "AWS::AutoScaling::LaunchConfiguration"
|
|
||||||
},
|
|
||||||
"ClusterServerGroup" : {
|
|
||||||
"Properties" : {
|
|
||||||
"AvailabilityZones" : {
|
|
||||||
"Fn::GetAZs" : ""
|
|
||||||
},
|
|
||||||
"LaunchConfigurationName" : {
|
|
||||||
"Ref" : "LaunchConfig"
|
|
||||||
},
|
|
||||||
"MaxSize" : {
|
|
||||||
"Ref" : "ClusterSize"
|
|
||||||
},
|
|
||||||
"MinSize" : {
|
|
||||||
"Ref" : "ClusterSize"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"Type" : "AWS::AutoScaling::AutoScalingGroup"
|
|
||||||
},
|
|
||||||
"RolePolicies" : {
|
|
||||||
"Properties" : {
|
|
||||||
"PolicyDocument" : {
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Action" : "*",
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Resource" : "*"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"PolicyName" : "root",
|
|
||||||
"Roles" : [
|
|
||||||
{
|
|
||||||
"Ref" : "RootRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Type" : "AWS::IAM::Policy"
|
|
||||||
},
|
|
||||||
"RootInstanceProfile" : {
|
|
||||||
"Properties" : {
|
|
||||||
"Path" : "/",
|
|
||||||
"Roles" : [
|
|
||||||
{
|
|
||||||
"Ref" : "RootRole"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Type" : "AWS::IAM::InstanceProfile"
|
|
||||||
},
|
|
||||||
"RootRole" : {
|
|
||||||
"Properties" : {
|
|
||||||
"AssumeRolePolicyDocument" : {
|
|
||||||
"Statement" : [
|
|
||||||
{
|
|
||||||
"Action" : [
|
|
||||||
"sts:AssumeRole"
|
|
||||||
],
|
|
||||||
"Effect" : "Allow",
|
|
||||||
"Principal" : {
|
|
||||||
"Service" : [
|
|
||||||
"ec2.amazonaws.com"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"Path" : "/"
|
|
||||||
},
|
|
||||||
"Type" : "AWS::IAM::Role"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
# (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
|
|
||||||
#
|
|
||||||
# This file is part of Ansible
|
|
||||||
#
|
|
||||||
# Ansible is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# Ansible is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
''' Custom filters are loaded by FilterModule objects '''
|
|
||||||
|
|
||||||
def filters(self):
|
|
||||||
''' FilterModule objects return a dict mapping filter names to
|
|
||||||
filter functions. '''
|
|
||||||
return {
|
|
||||||
'generate_answer': self.generate_answer,
|
|
||||||
}
|
|
||||||
|
|
||||||
def generate_answer(self, value):
|
|
||||||
return '42'
|
|
|
@ -1,16 +0,0 @@
|
||||||
---
|
|
||||||
- hosts: webservers
|
|
||||||
vars:
|
|
||||||
- jquery_directory: /var/www/html/javascript
|
|
||||||
- person: 'Susie%20Smith'
|
|
||||||
tasks:
|
|
||||||
- name: Create directory for jQuery
|
|
||||||
action: file dest={{jquery_directory}} state=directory mode=0755
|
|
||||||
- name: Grab a bunch of jQuery stuff
|
|
||||||
action: get_url url=http://code.jquery.com/{{item}} dest={{jquery_directory}} mode=0444
|
|
||||||
with_items:
|
|
||||||
- jquery.min.js
|
|
||||||
- mobile/latest/jquery.mobile.min.js
|
|
||||||
- ui/jquery-ui-git.css
|
|
||||||
#- name: Pass urlencoded name to CGI
|
|
||||||
# action: get_url url=http://example.com/name.cgi?name='{{person}}' dest=/tmp/test
|
|
|
@ -1,35 +0,0 @@
|
||||||
---
|
|
||||||
# Example playbook to demonstrate the group_by action plugin.
|
|
||||||
#
|
|
||||||
# as we know, the setup module will automatically run in each play, and sets up various
|
|
||||||
# facts. We can then create temporary (in memory only) groups based on those facts, which
|
|
||||||
# are useful ways of selecting similar sets of hosts.
|
|
||||||
#
|
|
||||||
# Additionally, we can use the 'register' keyword in Ansible to set similar variables
|
|
||||||
# and use those for grouping. This is not shown in this example.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Create a group of all hosts by operating system
|
|
||||||
action: group_by key={{ansible_distribution}}-{{ansible_distribution_version}}
|
|
||||||
|
|
||||||
# the following host group does not exist in inventory and was created by the group_by
|
|
||||||
# module.
|
|
||||||
|
|
||||||
- hosts: CentOS-6.2
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: ping all CentOS 6.2 hosts
|
|
||||||
action: ping
|
|
||||||
|
|
||||||
- hosts: CentOS-6.3
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: ping all CentOS 6.3 hosts
|
|
||||||
action: ping
|
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
---
|
|
||||||
# This is a demo of how the group command works.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
sudo: yes
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# Walk through group creation, modification, and deletion
|
|
||||||
- name: create a group
|
|
||||||
action: group name=tset
|
|
||||||
|
|
||||||
# You can only modify the group's gid
|
|
||||||
- action: group name=tset gid=7777
|
|
||||||
|
|
||||||
# And finally remove the group
|
|
||||||
- action: group name=tset state=absent
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# this is an example to show that handlers can be included from yaml files,
|
|
||||||
# to promote reuse between different plays or even playbooks. They work
|
|
||||||
# just like normal handlers.
|
|
||||||
|
|
||||||
- name: restart apache
|
|
||||||
action: service name=httpd state=restarted
|
|
||||||
- name: restart memcached
|
|
||||||
action: service name=memcached state=restarted
|
|
|
@ -1,91 +0,0 @@
|
||||||
---
|
|
||||||
# see intro_example.yml first!
|
|
||||||
# This file explains some more advanced features of playbooks.
|
|
||||||
# because of the comments it's less concise than it normally is. But feel
|
|
||||||
# free to comment your playbooks if you like.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
# we can define variables the normal way...
|
|
||||||
|
|
||||||
vars:
|
|
||||||
release: 2.0
|
|
||||||
|
|
||||||
# but they can also come from other files. This can be a relative
|
|
||||||
# or absolute path. This is a good way to store 'secret' variable
|
|
||||||
# files but still keep the playbook in public source control
|
|
||||||
|
|
||||||
vars_files:
|
|
||||||
- vars/external_vars.yml
|
|
||||||
|
|
||||||
# as with before, every play has a list of tasks in it
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# tasks can be written the normal way...
|
|
||||||
|
|
||||||
- name: arbitrary command
|
|
||||||
action: command /bin/true
|
|
||||||
|
|
||||||
# or we can promote reuse and simplicity by including tasks
|
|
||||||
# from other files, for instance, to reuse common tasks
|
|
||||||
|
|
||||||
- include: tasks/base.yml
|
|
||||||
|
|
||||||
# we could also have done something like:
|
|
||||||
# - include: wordpress.yml user=timmy
|
|
||||||
# and had access to the template variable $user in the
|
|
||||||
# included file, if we wanted to. Variables from vars
|
|
||||||
# and vars_files are also available inside include files
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
|
|
||||||
# handlers can also be included from files, to promote reuse
|
|
||||||
# and simpler recipes, you may wish to only have one
|
|
||||||
# handler file for all your plays and playbooks. This example really
|
|
||||||
# doesn't notify any handlers, it is just showing you how they would
|
|
||||||
# be included (see intro_example for usage).
|
|
||||||
|
|
||||||
- include: handlers/handlers.yml
|
|
||||||
|
|
||||||
# you can mix things that are directly in the file with things
|
|
||||||
# that are included. Order is executed as written, but only
|
|
||||||
# handlers that have been notified get executed
|
|
||||||
|
|
||||||
- name: restart foo
|
|
||||||
action: service name=foo state=restarted
|
|
||||||
|
|
||||||
# ===============================================================
|
|
||||||
|
|
||||||
# Here's a second play in the same playbook. This will be run
|
|
||||||
# after the first playbook completes on all hosts. You may want
|
|
||||||
# a different play for each class of systems, or may want a different
|
|
||||||
# play for each stage in a complex multi-node deployment push
|
|
||||||
# process. How you use them are up to you.
|
|
||||||
|
|
||||||
# any play in a playbook can be executed by a user other than root
|
|
||||||
# if you want. sudo support is coming too.
|
|
||||||
|
|
||||||
- hosts: webservers
|
|
||||||
user: mdehaan
|
|
||||||
|
|
||||||
# vars must be specified again for the next play in the playbook
|
|
||||||
# but can be reused by including from vars_files if you want
|
|
||||||
# you can use vars, vars_files, or both. vars_files overrides
|
|
||||||
# those set in vars.
|
|
||||||
|
|
||||||
vars:
|
|
||||||
release: 2.0
|
|
||||||
vars_files:
|
|
||||||
- vars/external_vars.yml
|
|
||||||
|
|
||||||
|
|
||||||
# these all runs as the user 'mdehaan'. If there were any handlers
|
|
||||||
# they would as well.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: some random command
|
|
||||||
action: command /bin/true
|
|
||||||
|
|
||||||
|
|
|
@ -1,76 +0,0 @@
|
||||||
---
|
|
||||||
# this is an annotated example of some features available in playbooks
|
|
||||||
# it shows how to make sure packages are updated, how to make sure
|
|
||||||
# services are running, and how to template files. It also demos
|
|
||||||
# change handlers that can restart things (or trigger other actions)
|
|
||||||
# when resources change. For more advanced examples, see example2.yml
|
|
||||||
|
|
||||||
# on all hosts, run as the user root...
|
|
||||||
|
|
||||||
- name: example play
|
|
||||||
hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
# could have also have done:
|
|
||||||
# user: mdehaan
|
|
||||||
# sudo: yes
|
|
||||||
|
|
||||||
# make these variables available inside of templates
|
|
||||||
# for when we use the 'template' action/module later on...
|
|
||||||
|
|
||||||
vars:
|
|
||||||
http_port: 80
|
|
||||||
max_clients: 200
|
|
||||||
|
|
||||||
# define the tasks that are part of this play...
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# task #1 is to run an arbitrary command
|
|
||||||
# we'll simulate a long running task, wait for up to 45 seconds, poll every 5
|
|
||||||
# obviously this does nothing useful but you get the idea
|
|
||||||
|
|
||||||
- name: longrunner
|
|
||||||
action: command /bin/sleep 15
|
|
||||||
async: 45
|
|
||||||
poll: 5
|
|
||||||
|
|
||||||
# let's demo file operations.
|
|
||||||
#
|
|
||||||
# We can 'copy' files or 'template' them instead, using jinja2
|
|
||||||
# as the templating engine. This is done using the variables
|
|
||||||
# from the vars section above mixed in with variables bubbled up
|
|
||||||
# automatically from tools like facter and ohai. 'copy'
|
|
||||||
# works just like 'template' but does not do variable subsitution.
|
|
||||||
#
|
|
||||||
# If and only if the file changes, restart apache at the very
|
|
||||||
# end of the playbook run
|
|
||||||
|
|
||||||
- name: write some_random_foo configuration
|
|
||||||
action: template src=templates/foo.j2 dest=/etc/some_random_foo.conf
|
|
||||||
notify:
|
|
||||||
- restart apache
|
|
||||||
|
|
||||||
# make sure httpd is installed at the latest version
|
|
||||||
|
|
||||||
- name: install httpd
|
|
||||||
action: yum pkg=httpd state=latest
|
|
||||||
|
|
||||||
# make sure httpd is running
|
|
||||||
|
|
||||||
- name: httpd start
|
|
||||||
action: service name=httpd state=running
|
|
||||||
|
|
||||||
# handlers are only run when things change, at the very end of each
|
|
||||||
# play. Let's define some. The names are significant and must
|
|
||||||
# match the 'notify' sections above
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
|
|
||||||
# this particular handler is run when some_random_foo.conf
|
|
||||||
# is changed, and only then
|
|
||||||
|
|
||||||
- name: restart apache
|
|
||||||
action: service name=httpd state=restarted
|
|
||||||
|
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
# this is a trivial example of how to do a nested loop.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
tasks:
|
|
||||||
- shell: echo "nested test a={{ item[0] }} b={{ item[1] }} c={{ item[2] }}"
|
|
||||||
with_nested:
|
|
||||||
- [ 'red', 'blue', 'green' ]
|
|
||||||
- [ 1, 2, 3 ]
|
|
||||||
- [ 'up', 'down', 'strange']
|
|
|
@ -1,20 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# in addition to loop_with_items, the loop that works over a variable, ansible can do more sophisticated looping.
|
|
||||||
|
|
||||||
# developer types: these are powered by 'lookup_plugins' should you ever decide to write your own
|
|
||||||
# see lib/ansible/runner/lookup_plugins/fileglob.py -- they can do basically anything!
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
gather_facts: no
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# this will copy a bunch of config files over -- dir must be created first
|
|
||||||
|
|
||||||
- file: dest=/etc/fooapp state=directory
|
|
||||||
|
|
||||||
- copy: src={{ item }} dest=/etc/fooapp/ owner=root mode=600
|
|
||||||
with_fileglob: /playbooks/files/fooapp/*
|
|
||||||
|
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
---
|
|
||||||
# this is an example of how to run repeated task elements over lists
|
|
||||||
# of items, for example, installing multiple packages or configuring
|
|
||||||
# multiple users
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: install packages
|
|
||||||
action: yum name={{ item }} state=installed
|
|
||||||
with_items:
|
|
||||||
- cobbler
|
|
||||||
- httpd
|
|
||||||
|
|
||||||
- name: configure users
|
|
||||||
action: user name={{ item }} state=present groups=wheel
|
|
||||||
with_items:
|
|
||||||
- testuser1
|
|
||||||
- testuser2
|
|
||||||
|
|
||||||
- name: remove users
|
|
||||||
action: user name={{ item }} state=absent
|
|
||||||
with_items:
|
|
||||||
- testuser1
|
|
||||||
- testuser2
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,18 +0,0 @@
|
||||||
##
|
|
||||||
# Example Ansible playbook that uses the MySQL module.
|
|
||||||
#
|
|
||||||
|
|
||||||
---
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Create database user
|
|
||||||
action: mysql_user user=bob password=12345 priv=*.*:ALL state=present
|
|
||||||
|
|
||||||
- name: Create database
|
|
||||||
action: mysql_db db=bobdata state=present
|
|
||||||
|
|
||||||
- name: Ensure no user named 'sally' exists and delete if found.
|
|
||||||
action: mysql_user user=sally state=absent
|
|
|
@ -1,26 +0,0 @@
|
||||||
---
|
|
||||||
# it is possible to have top level playbook files import other playbook
|
|
||||||
# files. For example, a playbook called could include three
|
|
||||||
# different playbooks, such as webservers, workers, dbservers, etc.
|
|
||||||
#
|
|
||||||
# Running the site playbook would run all playbooks, while individual
|
|
||||||
# playbooks could still be run directly. This is somewhat like
|
|
||||||
# the tag feature and can be used in conjunction for very fine grained
|
|
||||||
# control over what you want to target when running ansible.
|
|
||||||
|
|
||||||
- name: this is a play at the top level of a file
|
|
||||||
hosts: all
|
|
||||||
user: root
|
|
||||||
tasks:
|
|
||||||
- name: say hi
|
|
||||||
tags: foo
|
|
||||||
action: shell echo "hi..."
|
|
||||||
|
|
||||||
# and this is how we include another playbook, be careful and
|
|
||||||
# don't recurse infinitely or anything. Note you can't use
|
|
||||||
# any variables in the include path here.
|
|
||||||
|
|
||||||
- include: intro_example.yml
|
|
||||||
|
|
||||||
# and if we wanted, we can continue with more includes here,
|
|
||||||
# or more plays inline in this file
|
|
|
@ -1,25 +0,0 @@
|
||||||
---
|
|
||||||
#
|
|
||||||
# NetScaler module example
|
|
||||||
#
|
|
||||||
|
|
||||||
- hosts: web-pool
|
|
||||||
serial: 3
|
|
||||||
vars:
|
|
||||||
nsc_host: nsc.example.com
|
|
||||||
nsc_user: admin
|
|
||||||
nsc_pass: nimda
|
|
||||||
# type of the netscaler object you want to manipulate
|
|
||||||
type: service
|
|
||||||
# netscaler object name
|
|
||||||
name: "{{facter_fqdn}}:8080"
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: disable service in the lb
|
|
||||||
action: netscaler nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=disable
|
|
||||||
|
|
||||||
- name: deploy new code
|
|
||||||
action: shell yum upgrade -y
|
|
||||||
|
|
||||||
- name: enable in the lb
|
|
||||||
action: netscaler nsc_host={{nsc_host}} user={{nsc_user}} password={{nsc_pass}} name={{name}} type={{type}} action=enable
|
|
|
@ -1,41 +0,0 @@
|
||||||
##
|
|
||||||
# Example Ansible playbook that uses the PostgreSQL module.
|
|
||||||
#
|
|
||||||
# This installs PostgreSQL on an Ubuntu system, creates a database called
|
|
||||||
# "myapp" and a user called "django" with password "mysupersecretpassword"
|
|
||||||
# with access to the "myapp" database.
|
|
||||||
#
|
|
||||||
---
|
|
||||||
- hosts: webservers
|
|
||||||
sudo: yes
|
|
||||||
gather_facts: no
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: ensure apt cache is up to date
|
|
||||||
action: apt update_cache=yes
|
|
||||||
- name: ensure packages are installed
|
|
||||||
action: apt name={{item}}
|
|
||||||
with_items:
|
|
||||||
- postgresql
|
|
||||||
- libpq-dev
|
|
||||||
- python-psycopg2
|
|
||||||
|
|
||||||
- hosts: webservers
|
|
||||||
sudo: yes
|
|
||||||
sudo_user: postgres
|
|
||||||
gather_facts: no
|
|
||||||
|
|
||||||
vars:
|
|
||||||
dbname: myapp
|
|
||||||
dbuser: django
|
|
||||||
dbpassword: mysupersecreetpassword
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: ensure database is created
|
|
||||||
action: postgresql_db db={{dbname}}
|
|
||||||
|
|
||||||
- name: ensure user has access to database
|
|
||||||
action: postgresql_user db={{dbname}} user={{dbuser}} password={{dbpassword}} priv=ALL
|
|
||||||
|
|
||||||
- name: ensure user does not have unnecessary privilege
|
|
||||||
action: postgresql_user user={{dbuser}} role_attr_flags=NOSUPERUSER,NOCREATEDB
|
|
|
@ -1,60 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# it is possible to ask for variables from the user at the start
|
|
||||||
# of a playbook run, for example, as part of a release script.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
# regular variables are a dictionary of keys and values
|
|
||||||
|
|
||||||
vars:
|
|
||||||
this_is_a_regular_var: 'moo'
|
|
||||||
so_is_this: 'quack'
|
|
||||||
|
|
||||||
# alternatively, they can ALSO be passed in from the outside:
|
|
||||||
# ansible-playbook foo.yml --extra-vars="foo=100 bar=101"
|
|
||||||
# or through external inventory scripts (see online API docs)
|
|
||||||
|
|
||||||
# here's basic mode prompting. Specify a hash of variable names and a prompt for
|
|
||||||
# each.
|
|
||||||
#
|
|
||||||
# vars_prompt:
|
|
||||||
# release_version: "product release version"
|
|
||||||
|
|
||||||
# prompts can also be specified like this, allowing for hiding the prompt as
|
|
||||||
# entered. In the future, this may also be used to support crypted variables
|
|
||||||
|
|
||||||
vars_prompt:
|
|
||||||
- name: "some_password"
|
|
||||||
prompt: "Enter password"
|
|
||||||
private: yes
|
|
||||||
|
|
||||||
- name: "release_version"
|
|
||||||
prompt: "Product release version"
|
|
||||||
default: "my_default_version"
|
|
||||||
private: no
|
|
||||||
|
|
||||||
- name: "my_password2"
|
|
||||||
prompt: "Enter password2"
|
|
||||||
private: yes
|
|
||||||
encrypt: "md5_crypt"
|
|
||||||
confirm: yes
|
|
||||||
salt_size: 7
|
|
||||||
salt: "foo"
|
|
||||||
|
|
||||||
# this is just a simple example to show that vars_prompt works, but
|
|
||||||
# you might ask for a tag to use with the git module or perhaps
|
|
||||||
# a package version to use with the yum module.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: imagine this did something interesting with {{release_version}}
|
|
||||||
action: shell echo foo >> /tmp/{{release_version}}-alpha
|
|
||||||
|
|
||||||
- name: look we crypted a password
|
|
||||||
action: shell echo my password is {{my_password2}}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
---
|
|
||||||
- hosts: rabbitmq
|
|
||||||
sudo: true
|
|
||||||
vars:
|
|
||||||
rabbitmq_version: 3.0.2-1
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: ensure python-software-properties is installed
|
|
||||||
apt: pkg=python-software-properties state=installed
|
|
||||||
|
|
||||||
- name: add rabbitmq official apt repository
|
|
||||||
apt_repository: repo='deb http://www.rabbitmq.com/debian/ testing main' state=present
|
|
||||||
|
|
||||||
- name: install rabbitmq
|
|
||||||
apt: pkg=rabbitmq-server={{rabbitmq_version}} state=installed force=yes
|
|
||||||
|
|
||||||
- name: enable rabbitmq plugins
|
|
||||||
rabbitmq_plugin: names=rabbitmq_management,rabbitmq_tracing,rabbitmq_federation state=enabled
|
|
||||||
notify:
|
|
||||||
- restart rabbitmq
|
|
||||||
|
|
||||||
- name: add users
|
|
||||||
rabbitmq_user: user={{item}} password=changeme tags=administrator,{{item}} vhost=/ configure_priv=.* write_priv=.* read_priv=.* state=present
|
|
||||||
with_items:
|
|
||||||
- user1
|
|
||||||
- user2
|
|
||||||
|
|
||||||
- name: remove default guest user
|
|
||||||
rabbitmq_user: user=guest state=absent
|
|
||||||
|
|
||||||
- name: ensure vhost /test is present
|
|
||||||
rabbitmq_vhost: name=/test state=present
|
|
||||||
|
|
||||||
- name: set federation local-username
|
|
||||||
rabbitmq_parameter: component=federation name=local-username value='"user1"' state=present
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- name: restart rabbitmq
|
|
||||||
service: name=rabbitmq-server state=restarted
|
|
|
@ -1,33 +0,0 @@
|
||||||
# here's a cool advanced topic about how to perform conditional logic in ansible without resorting
|
|
||||||
# to writing your own module that defines facts. You can do that too, and it's easy to do, but
|
|
||||||
# often you just want to run a command and then decide whether to run some steps or not. That's
|
|
||||||
# easy to do, and here we'll show you how.
|
|
||||||
|
|
||||||
- name: test playbook
|
|
||||||
user: root
|
|
||||||
hosts: all
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# it is possible to save the result of any command in a named register. This variable will be made
|
|
||||||
# available to tasks and templates made further down in the execution flow.
|
|
||||||
|
|
||||||
- action: shell grep hi /etc/motd
|
|
||||||
ignore_errors: yes
|
|
||||||
register: motd_result
|
|
||||||
|
|
||||||
# and here we access the register. Note that variable is structured data because
|
|
||||||
# it is a return from the command module. The shell module makes available variables such as
|
|
||||||
# as 'stdout', 'stderr', and 'rc'.
|
|
||||||
|
|
||||||
# here we run the next action only if the previous grep returned true
|
|
||||||
|
|
||||||
- action: shell echo "motd contains the word hi"
|
|
||||||
when: motd_result.rc == 0
|
|
||||||
|
|
||||||
# alternatively:
|
|
||||||
|
|
||||||
- action: shell echo "motd contains the word hi"
|
|
||||||
when: motd_result.stdout.find('hi') != -1
|
|
||||||
|
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
This is a file
|
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
- name: blippy
|
|
||||||
shell: echo notifier called, and the value of x is '{{ x }}'
|
|
||||||
|
|
||||||
# within a role, it's possible to include other task files as well. By default, we
|
|
||||||
# can reference files in the same directory without doing anything special:
|
|
||||||
|
|
||||||
# - include: other.yml
|
|
||||||
|
|
|
@ -1,14 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
- name: copy operation
|
|
||||||
copy: src=foo.txt dest=/tmp/roles_test1.txt
|
|
||||||
|
|
||||||
- name: template operation
|
|
||||||
template: src=foo.j2 dest=/tmp/roles_test2.txt
|
|
||||||
notify:
|
|
||||||
- blippy
|
|
||||||
|
|
||||||
- name: demo that parameterized roles work
|
|
||||||
shell: echo just FYI, param1={{ param1 }}, param2 ={{ param2 }}
|
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
I am a {{ ansible_os_family }} distribution.
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
x: '{{ ansible_machine }}'
|
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
# in Ansible 1.2 and later, roles allow easy best-practices organization of content
|
|
||||||
# and maximize shareability of ansible building blocks.
|
|
||||||
#
|
|
||||||
# suppose a playbook applied to a group of hosts includes two roles, foo and bar.
|
|
||||||
#
|
|
||||||
# what do roles do in this case?
|
|
||||||
#
|
|
||||||
# listing the roles as foo and bar will auto include the following:
|
|
||||||
#
|
|
||||||
# tasks from ./roles/foo/tasks/main.yml, then ./roles/bar/tasks/main.yml
|
|
||||||
# handlers from ./roles/foo/handlers/main.yml, then ./roles/bar/handlers/main.yml
|
|
||||||
# vars from ./roles/foo/vars/main.yml, then ./roles/bar/vars/main.yml
|
|
||||||
#
|
|
||||||
# should any of these files not exist, that is ok, and they will simply not be loaded.
|
|
||||||
#
|
|
||||||
# should the task file in foo/tasks/main.yml want to include subtasks in other files, that
|
|
||||||
# is also permitted.
|
|
||||||
#
|
|
||||||
# templates and copy operations also get smarter about where to look for content when using
|
|
||||||
# roles.
|
|
||||||
#
|
|
||||||
# as an example, a task in foo/tasks/main.yml could copy or template a file by
|
|
||||||
# referencing a "src=foo.j2" rather than having to explicitly path src=roles/foo/templates/foo.j2.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
pre_tasks:
|
|
||||||
|
|
||||||
# these tasks are executed prior to roles.
|
|
||||||
# this might be a good time to signal an outage window or take a host out of a load balanced pool
|
|
||||||
|
|
||||||
- local_action: shell echo "hi this is a pre_task step about {{ inventory_hostname }}"
|
|
||||||
|
|
||||||
roles:
|
|
||||||
|
|
||||||
# a role can be listed flat like this:
|
|
||||||
#
|
|
||||||
# - common
|
|
||||||
# - webservers
|
|
||||||
|
|
||||||
# but you can also pass variables to them, so they can be parameterized. You can call
|
|
||||||
# a role more than once with different parameters too. It might look like the section
|
|
||||||
# below. Note I can also declare tags at this time.
|
|
||||||
|
|
||||||
- { role: foo, param1: 1000, param2: 2000, tags: [ 'foo', 'bar' ] }
|
|
||||||
- { role: foo, param1: 8000, param2: 9000, tags: [ 'baz' ] }
|
|
||||||
|
|
||||||
# add as many roles as you like, roles takes a list of roles names
|
|
||||||
# these paths can be qualified, but if bare, it will look from them in
|
|
||||||
# roles/{{rolename}} relative to the playbook
|
|
||||||
|
|
||||||
# explicit tasks and handlers can be used, but are not required.
|
|
||||||
# they will run after the roles if present.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# you can still have loose tasks/handlers and they will execute after roles are applied
|
|
||||||
|
|
||||||
- shell: echo 'this is a loose task'
|
|
||||||
|
|
||||||
post_tasks:
|
|
||||||
|
|
||||||
# just to provide a syntactic mirroring to 'pre_tasks', these run absolute last in the play.
|
|
||||||
# this might be a good time to put a host back in a load balanced pool or end an outage window
|
|
||||||
|
|
||||||
- local_action: shell echo 'this is a post_task about {{ inventory_hostname }}'
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,57 +0,0 @@
|
||||||
# in Ansible 1.2 and later, roles allow easy best-practices organization of content
|
|
||||||
# and maximize shareability of ansible building blocks.
|
|
||||||
#
|
|
||||||
# suppose a playbook applied to a group of hosts includes two roles, foo and bar.
|
|
||||||
#
|
|
||||||
# what do roles do in this case?
|
|
||||||
#
|
|
||||||
# listing the roles as foo and bar will auto include the following:
|
|
||||||
#
|
|
||||||
# tasks from ./roles/foo/tasks/main.yml, then ./roles/bar/tasks/main.yml
|
|
||||||
# handlers from ./roles/foo/handlers/main.yml, then ./roles/bar/handlers/main.yml
|
|
||||||
# vars from ./roles/foo/vars/main.yml, then ./roles/bar/vars/main.yml
|
|
||||||
#
|
|
||||||
# should any of these files not exist, that is ok, and they will simply not be loaded.
|
|
||||||
#
|
|
||||||
# should the task file in foo/tasks/main.yml want to include subtasks in other files, that
|
|
||||||
# is also permitted.
|
|
||||||
#
|
|
||||||
# templates and copy operations also get smarter about where to look for content when using
|
|
||||||
# roles.
|
|
||||||
#
|
|
||||||
# as an example, a task in foo/tasks/main.yml could copy or template a file by
|
|
||||||
# referencing a "src=foo.j2" rather than having to explicitly path src=roles/foo/templates/foo.j2.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
roles:
|
|
||||||
|
|
||||||
# a role can be listed flat like this:
|
|
||||||
#
|
|
||||||
# - common
|
|
||||||
# - webservers
|
|
||||||
|
|
||||||
# but you can also pass variables to them, so they can be parameterized. You can call
|
|
||||||
# a role more than once with different parameters too. It might look like this:
|
|
||||||
|
|
||||||
- role: foo
|
|
||||||
param1: '{{ item }}'
|
|
||||||
param2: '{{ item + "/" + item }}'
|
|
||||||
with_items: ['a','b','c']
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
|
|
||||||
# add as many roles as you like, roles takes a list of roles names
|
|
||||||
# these paths can be qualified, but if bare, it will look from them in
|
|
||||||
# roles/{{rolename}} relative to the playbook
|
|
||||||
|
|
||||||
# explicit tasks and handlers can be used, but are not required.
|
|
||||||
# they will run after the roles if present.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# you can still have loose tasks/handlers and they will execute after roles
|
|
||||||
|
|
||||||
- shell: echo 'this is a loose task'
|
|
||||||
|
|
||||||
|
|
|
@ -1,28 +0,0 @@
|
||||||
---
|
|
||||||
# this is an example of how to template a file over using some variables derived
|
|
||||||
# from the system. For instance, if you wanted to have different configuration
|
|
||||||
# templates by OS version, this is a neat way to do it. Any Ansible facts, facter facts,
|
|
||||||
# or ohai facts could be used to do this.
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: template a config file
|
|
||||||
action: template dest=/etc/imaginary_file.conf
|
|
||||||
first_available_file:
|
|
||||||
|
|
||||||
# first see if we have a file for this specific host
|
|
||||||
- /srv/whatever/{{ansible_hostname}}.conf
|
|
||||||
|
|
||||||
# next try to load something like CentOS6.2.conf
|
|
||||||
- /srv/whatever/{{ansible_distribution}}{{ansible_distribution_version}}.conf
|
|
||||||
|
|
||||||
# next see if there's a CentOS.conf
|
|
||||||
- /srv/whatever/{{ansible_distribution}}.conf
|
|
||||||
|
|
||||||
# finally give up and just use something generic
|
|
||||||
- /srv/whatever/default
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
---
|
|
||||||
# tags allow us to run all of a playbook or part of it.
|
|
||||||
#
|
|
||||||
# assume: ansible-playbook tags.yml --tags foo
|
|
||||||
#
|
|
||||||
# try this with:
|
|
||||||
# --tags foo
|
|
||||||
# --tags bar
|
|
||||||
# --tags extra
|
|
||||||
#
|
|
||||||
# the value of a 'tags:' element can be a string or list
|
|
||||||
# of tag names. Variables are not usable in tag names.
|
|
||||||
|
|
||||||
- name: example play one
|
|
||||||
hosts: all
|
|
||||||
user: root
|
|
||||||
|
|
||||||
# any tags applied to the play are shorthand to applying
|
|
||||||
# the tag to all tasks in it. Here, each task is given
|
|
||||||
# the tag extra
|
|
||||||
|
|
||||||
tags:
|
|
||||||
- extra
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# this task will run if you don't specify any tags,
|
|
||||||
# if you specify 'foo' or if you specify 'extra'
|
|
||||||
|
|
||||||
- name: hi
|
|
||||||
tags: foo
|
|
||||||
action: shell echo "first task ran"
|
|
||||||
|
|
||||||
- name: example play two
|
|
||||||
hosts: all
|
|
||||||
user: root
|
|
||||||
tasks:
|
|
||||||
- name: hi
|
|
||||||
tags:
|
|
||||||
- bar
|
|
||||||
action: shell echo "second task ran"
|
|
||||||
- include: tasks/base.yml tags=base
|
|
||||||
|
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
---
|
|
||||||
|
|
||||||
# this is the example of an included tasks file. It contains a flat list of tasks
|
|
||||||
# they can notify other tasks, and have full access to variables from 'vars'
|
|
||||||
# or 'vars_files' directives. Further, if ohai or facter were installed on
|
|
||||||
# the remote machines, variables from those tools can be accessed on the 'action'
|
|
||||||
# line or in templates. Just prefix with 'facter_' or 'ohai_' before the particular
|
|
||||||
# variable.
|
|
||||||
|
|
||||||
# possible uses for a included yaml file might be to represent a 'class' of a system
|
|
||||||
# like defining what makes up a webserver, or you might have a common 'base.yml'
|
|
||||||
# (like this) that might be applied to all your systems as well.
|
|
||||||
|
|
||||||
- name: no selinux
|
|
||||||
action: command /usr/sbin/setenforce 0
|
|
||||||
|
|
||||||
- name: no iptables
|
|
||||||
action: service name=iptables state=stopped
|
|
||||||
|
|
||||||
- name: made up task just to show variables work here
|
|
||||||
action: command /bin/echo release is $release
|
|
|
@ -1 +0,0 @@
|
||||||
1 + 1 = {{ '1+1' | generate_answer }}
|
|
|
@ -1,2 +0,0 @@
|
||||||
# Cron job to git clone/pull a repo and then run locally
|
|
||||||
{{ schedule }} {{ cron_user }} ansible-pull -d {{ workdir }} -U {{ repo_url }} >>{{ logfile }} 2>&1
|
|
|
@ -1,7 +0,0 @@
|
||||||
{{ logfile }} {
|
|
||||||
rotate 7
|
|
||||||
daily
|
|
||||||
compress
|
|
||||||
missingok
|
|
||||||
notifempty
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
# This is a very simple Jinja2 template representing an imaginary configuration file
|
|
||||||
# for an imaginary app.
|
|
||||||
|
|
||||||
# this is an example of loading a fact from the setup module
|
|
||||||
system={{ ansible_system }}
|
|
||||||
|
|
||||||
# here is a variable that could be set in a playbook or inventory file
|
|
||||||
http_port={{ http_port }}
|
|
||||||
|
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
# example of how to get the ipaddress of every machine in the webservers group
|
|
||||||
# for use in a template
|
|
||||||
|
|
||||||
{% for host in groups['webservers'] %}
|
|
||||||
HOST: {{ host }} IP: {{ hostvars[host]['ansible_all_ipv4_addresses'][0] }}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
# this just shows some tricks possible with variables in Ansible 1.2 and later.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
|
|
||||||
vars:
|
|
||||||
a_list:
|
|
||||||
- a
|
|
||||||
- b
|
|
||||||
- c
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- shell: echo hello {{ ansible_hostname.upper() }}
|
|
||||||
|
|
||||||
- shell: echo match
|
|
||||||
when: 2 == 2
|
|
||||||
|
|
||||||
- shell: echo no match
|
|
||||||
when: 2 == 2 + 1
|
|
||||||
|
|
||||||
- shell: echo {{ ansible_os_family }}
|
|
||||||
|
|
||||||
- shell: echo {{ item }}
|
|
||||||
with_items: a_list
|
|
||||||
|
|
||||||
- shell: echo 'RedHat'
|
|
||||||
when: ansible_os_family == 'RedHat'
|
|
||||||
|
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
---
|
|
||||||
# this is a demo of how the user commands work and how to reference salted passwords
|
|
||||||
# in vars sections. You could also use vars_files if you like (see other examples)
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
user: root
|
|
||||||
vars:
|
|
||||||
# created with:
|
|
||||||
# python -c 'import crypt; print crypt.crypt("This is my Password", "$1$SomeSalt$")'
|
|
||||||
password: $1$SomeSalt$UqddPX3r4kH3UL5jq5/ZI.
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
# Walk through account creation, modification, and deletion
|
|
||||||
- name: test basic user account creation
|
|
||||||
action: user name=tset comment=TsetUser group=users shell=/sbin/nologin createhome=no
|
|
||||||
|
|
||||||
# the following is just a simple example of how you don't have to include
|
|
||||||
# the 'name' element for each task
|
|
||||||
|
|
||||||
- action: user name=tset comment=NyetUser
|
|
||||||
- action: user name=tset password={{password}}
|
|
||||||
|
|
||||||
# The following will add the user to supplementary groups.
|
|
||||||
|
|
||||||
# Add the user to the groups dialout and uucp.
|
|
||||||
- action: user name=tset groups=dialout,uucp
|
|
||||||
|
|
||||||
# Add the user to the groups dialout and wheel,
|
|
||||||
# This will remove tset from the group uucp.
|
|
||||||
- action: user name=tset groups=dialout,wheel
|
|
||||||
|
|
||||||
# Add the user to the group uucp. Because append=yes, the user
|
|
||||||
# will not be removed from the groups dialout and wheel.
|
|
||||||
- action: user name=tset groups=uucp append=yes
|
|
||||||
|
|
||||||
# Finally, remove the user.
|
|
||||||
- action: user name=tset state=absent
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
apache: httpd
|
|
||||||
packager: yum
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
packager: apt
|
|
||||||
apache: apache
|
|
|
@ -1,3 +0,0 @@
|
||||||
---
|
|
||||||
alpha: one
|
|
||||||
beta: two
|
|
|
@ -1,33 +0,0 @@
|
||||||
---
|
|
||||||
##
|
|
||||||
# Example Ansible playbook that uses the Zfs module.
|
|
||||||
#
|
|
||||||
|
|
||||||
- hosts: webservers
|
|
||||||
gather_facts: no
|
|
||||||
sudo: yes
|
|
||||||
|
|
||||||
vars:
|
|
||||||
pool: rpool
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
|
|
||||||
- name: Create a zfs file system
|
|
||||||
action: zfs name={{pool}}/var/log/httpd state=present
|
|
||||||
|
|
||||||
- name: Create a zfs file system with quota of 10GiB and visible snapdir
|
|
||||||
action: zfs name={{pool}}/ansible quota='10G' snapdir=visible state=present
|
|
||||||
|
|
||||||
- name: Crate zfs snapshot of the above file system
|
|
||||||
action: zfs name={{pool}}/ansible@mysnapshot state=present
|
|
||||||
|
|
||||||
- name: Create zfs volume named smallvol with a size of 10MiB
|
|
||||||
action: zfs name={{pool}}/smallvol volsize=10M state=present
|
|
||||||
|
|
||||||
- name: Removes snapshot of rpool/oldfs
|
|
||||||
action: zfs name={{pool}}/oldfs@oldsnapshot state=absent
|
|
||||||
|
|
||||||
- name: Removes file system rpool/oldfs
|
|
||||||
action: zfs name={{pool}}/oldfs state=absent
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue