mirror of
https://github.com/geerlingguy/mac-dev-playbook
synced 2024-11-25 05:10:19 +00:00
Laying the first stone
This commit is contained in:
commit
19cc54dc4d
2 changed files with 137 additions and 0 deletions
108
Vagrantfile
vendored
Normal file
108
Vagrantfile
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant::Config.run do |config|
|
||||
# All Vagrant configuration is done here. The most common configuration
|
||||
# options are documented and commented below. For a complete reference,
|
||||
# please see the online documentation at vagrantup.com.
|
||||
|
||||
# Every Vagrant virtual environment requires a box to build off of.
|
||||
config.vm.box = 'precise64'
|
||||
config.vm.network :bridged, :bridge => 'en1: Wi-Fi (AirPort)'
|
||||
config.vm.define :chat1
|
||||
config.vm.define :chat2
|
||||
|
||||
# This enables provisioning through Ansible. The only current issue is that when this is enabled, it seems that Vagrant only boots up one VM and provisions it, not two, and certainly not two concurrently...
|
||||
# config.vm.provision :ansible do |ansible|
|
||||
# ansible.playbook = 'zeromq.yml'
|
||||
# ansible.hosts = 'chat_clients'
|
||||
# end
|
||||
|
||||
# The url from where the 'config.vm.box' box will be fetched if it
|
||||
# doesn't already exist on the user's system.
|
||||
# config.vm.box_url = 'http://domain.com/path/to/above.box'
|
||||
|
||||
# Boot with a GUI so you can see the screen. (Default is headless)
|
||||
# config.vm.boot_mode = :gui
|
||||
|
||||
# Assign this VM to a host-only network IP, allowing you to access it
|
||||
# via the IP. Host-only networks can talk to the host machine as well as
|
||||
# any other machines on the same network, but cannot be accessed (through this
|
||||
# network interface) by any external networks.
|
||||
# config.vm.network :hostonly, '192.168.33.10'
|
||||
|
||||
# Assign this VM to a bridged network, allowing you to connect directly to a
|
||||
# network using the host's network device. This makes the VM appear as another
|
||||
# physical device on your network.
|
||||
# config.vm.network :bridged
|
||||
|
||||
# Forward a port from the guest to the host, which allows for outside
|
||||
# computers to access the VM, whereas host only networking does not.
|
||||
# config.vm.forward_port 80, 8080
|
||||
|
||||
# Share an additional folder to the guest VM. The first argument is
|
||||
# an identifier, the second is the path on the guest to mount the
|
||||
# folder, and the third is the path on the host to the actual folder.
|
||||
# config.vm.share_folder 'v-data', '/vagrant_data', '../data'
|
||||
|
||||
# Enable provisioning with Puppet stand alone. Puppet manifests
|
||||
# are contained in a directory path relative to this Vagrantfile.
|
||||
# You will need to create the manifests directory and a manifest in
|
||||
# the file precise64.pp in the manifests_path directory.
|
||||
#
|
||||
# An example Puppet manifest to provision the message of the day:
|
||||
#
|
||||
# # group { 'puppet':
|
||||
# # ensure => 'present',
|
||||
# # }
|
||||
# #
|
||||
# # File { owner => 0, group => 0, mode => 0644 }
|
||||
# #
|
||||
# # file { '/etc/motd':
|
||||
# # content => 'Welcome to your Vagrant-built virtual machine!
|
||||
# # Managed by Puppet.\n'
|
||||
# # }
|
||||
#
|
||||
# config.vm.provision :puppet do |puppet|
|
||||
# puppet.manifests_path = 'manifests'
|
||||
# puppet.manifest_file = 'precise64.pp'
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef solo, specifying a cookbooks path, roles
|
||||
# path, and data_bags path (all relative to this Vagrantfile), and adding
|
||||
# some recipes and/or roles.
|
||||
#
|
||||
# config.vm.provision :chef_solo do |chef|
|
||||
# chef.cookbooks_path = '../my-recipes/cookbooks'
|
||||
# chef.roles_path = '../my-recipes/roles'
|
||||
# chef.data_bags_path = '../my-recipes/data_bags'
|
||||
# chef.add_recipe 'mysql'
|
||||
# chef.add_role 'web'
|
||||
#
|
||||
# # You may also specify custom JSON attributes:
|
||||
# chef.json = { :mysql_password => 'foo' }
|
||||
# end
|
||||
|
||||
# Enable provisioning with chef server, specifying the chef server URL,
|
||||
# and the path to the validation key (relative to this Vagrantfile).
|
||||
#
|
||||
# The Opscode Platform uses HTTPS. Substitute your organization for
|
||||
# ORGNAME in the URL and validation key.
|
||||
#
|
||||
# If you have your own Chef Server, use the appropriate URL, which may be
|
||||
# HTTP instead of HTTPS depending on your configuration. Also change the
|
||||
# validation key to validation.pem.
|
||||
#
|
||||
# config.vm.provision :chef_client do |chef|
|
||||
# chef.chef_server_url = 'https://api.opscode.com/organizations/ORGNAME'
|
||||
# chef.validation_key_path = 'ORGNAME-validator.pem'
|
||||
# end
|
||||
#
|
||||
# If you're using the Opscode platform, your validator client is
|
||||
# ORGNAME-validator, replacing ORGNAME with your organization name.
|
||||
#
|
||||
# IF you have your own Chef Server, the default validation client name is
|
||||
# chef-validator, unless you changed the configuration.
|
||||
#
|
||||
# chef.validation_client_name = 'ORGNAME-validator'
|
||||
end
|
29
zeromq.yml
Normal file
29
zeromq.yml
Normal file
|
@ -0,0 +1,29 @@
|
|||
# This command will need to be run in order to copy your public key from your management computer, to the server:
|
||||
# scp ~/.ssh/id_rsa.pub username@111.222.333.444:~/.ssh/authorized_keys
|
||||
---
|
||||
- hosts: chat_clients
|
||||
user: vagrant
|
||||
sudo: yes # Is it possible to only run certain actions as sudo ansible?
|
||||
tasks:
|
||||
- name: Install zeroMQ dependancies
|
||||
action: apt pkg=libtool,autoconf,automake,uuid-dev state=installed
|
||||
- name: Get a C++ compiler package # Is this the best way to go about doing this?
|
||||
action: apt pkg=build-essential state=installed
|
||||
- name: Create directory for zeroMQ build
|
||||
action: file path=/zeromq/ state=directory mode=0744
|
||||
- name: Get zeroMQ tarball
|
||||
action: get_url url=http://download.zeromq.org/zeromq-3.2.2.tar.gz dest=/zeromq/ mode=0744
|
||||
- name: Untar and uncompress (g-zip compression) the zeroMQ tarball
|
||||
action: command tar -xvf zeromq-3.2.2.tar.gz chdir=/zeromq/
|
||||
- name: Run zeroMQ configuration
|
||||
action: command ./configure chdir=/zeromq/zeromq-3.2.2/ # Do I have to change the directory for every command performed in this directory? How may I get the current working directory?
|
||||
- name: Make zeroMQ
|
||||
action: command make chdir=/zeromq/zeromq-3.2.2/
|
||||
- name: Run a system-wide install of zeroMQ
|
||||
action: command make install chdir=/zeromq/zeromq-3.2.2/
|
||||
- name: Create nessecary links
|
||||
action: command ldconfig chdir=/zeromq/zeromq-3.2.2/
|
||||
- name: Install PIP and python-dev
|
||||
action: apt pkg=python-pip,python-dev state=installed
|
||||
- name: Install the Python binding (driver) for zeroMQ
|
||||
action: pip name=pyzmq
|
Loading…
Reference in a new issue