Removed aws and azure resources, unit and integration tests

Signed-off-by: Nikita Mathur <nikita.mathur@chef.io>
This commit is contained in:
Nikita Mathur 2022-03-10 16:14:12 +05:30
parent 7f4271e216
commit 41ba05f901
189 changed files with 0 additions and 19193 deletions

172
Rakefile
View file

@ -277,178 +277,6 @@ namespace :test do
sh("sh", "-c", sh_cmd) sh("sh", "-c", sh_cmd)
end end
project_dir = File.dirname(__FILE__)
namespace :aws do
%w{default minimal}.each do |account|
integration_dir = File.join(project_dir, "test", "integration", "aws", account)
attribute_file = File.join(integration_dir, ".attribute.yml")
task :"setup:#{account}", :tf_workspace do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must either call the top-level test:aws:#{account} task, or set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Setup"
abort("You must set the environment variable AWS_REGION") unless ENV["AWS_REGION"]
puts "----> Checking for required AWS profile..."
sh("aws configure get aws_access_key_id --profile inspec-aws-test-#{account} > /dev/null")
sh("cd #{integration_dir}/build/ && terraform init -upgrade")
sh("cd #{integration_dir}/build/ && terraform workspace new #{tf_workspace}")
sh("cd #{integration_dir}/build/ && AWS_PROFILE=inspec-aws-test-#{account} terraform plan -out inspec-aws-#{account}.plan")
sh("cd #{integration_dir}/build/ && AWS_PROFILE=inspec-aws-test-#{account} terraform apply -auto-approve inspec-aws-#{account}.plan")
Rake::Task["test:aws:dump_attrs:#{account}"].execute
end
task :"dump_attrs:#{account}" do
sh("cd #{integration_dir}/build/ && AWS_PROFILE=inspec-aws-test-#{account} terraform output > #{attribute_file}")
raw_output = File.read(attribute_file)
yaml_output = raw_output.gsub(" = ", " : ")
File.open(attribute_file, "w") { |file| file.puts yaml_output }
end
task :"run:#{account}" do
puts "----> Run"
sh("bundle exec inspec exec #{integration_dir}/verify -t aws://${AWS_REGION}/inspec-aws-test-#{account} --attrs #{attribute_file}")
end
task :"cleanup:#{account}", :tf_workspace do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must either call the top-level test:aws:#{account} task, or set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Cleanup"
sh("cd #{integration_dir}/build/ && AWS_PROFILE=inspec-aws-test-#{account} terraform destroy -force")
sh("cd #{integration_dir}/build/ && terraform workspace select default")
sh("cd #{integration_dir}/build && terraform workspace delete #{tf_workspace}")
end
task :"#{account}" do
tf_workspace = ENV["INSPEC_TERRAFORM_ENV"] || prompt("Please enter a workspace for your integration tests to run in: ")
begin
Rake::Task["test:aws:setup:#{account}"].execute({ tf_workspace: tf_workspace })
Rake::Task["test:aws:run:#{account}"].execute
rescue
abort("Integration testing has failed for the #{account} account")
ensure
Rake::Task["test:aws:cleanup:#{account}"].execute({ tf_workspace: tf_workspace })
end
end
end
end
desc "Perform AWS Integration Tests"
task aws: %i{aws:default aws:minimal}
namespace :azure do
# Specify the directory for the integration tests
integration_dir = File.join(project_dir, "test", "integration", "azure")
tf_vars_file = File.join(integration_dir, "build", "terraform.tfvars")
attribute_file = File.join(integration_dir, ".attribute.yml")
task :setup, :tf_workspace do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must either call the top-level test:azure task, or set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Setup Terraform Workspace"
sh("cd #{integration_dir}/build/ && terraform init -upgrade")
sh("cd #{integration_dir}/build/ && terraform workspace new #{tf_workspace}")
Rake::Task["test:azure:vars"].execute
Rake::Task["test:azure:plan"].execute
Rake::Task["test:azure:apply"].execute
end
desc "Generate terraform.tfvars file"
task :vars do |t, args|
next if File.exist?(tf_vars_file)
puts "----> Generating Vars"
# Generate Azure crendentials
connection = Train.create("azure").connection
creds = connection.options
# Determine the storage account name and the admin password
require "securerandom"
sa_name = ("a".."z").to_a.sample(15).join
admin_password = SecureRandom.alphanumeric 72
# Use the first 4 characters of the storage account to create a suffix
suffix = sa_name[0..3]
content = <<~VARS
subscription_id = "#{creds[:subscription_id]}"
client_id = "#{creds[:client_id]}"
client_secret = "#{creds[:client_secret]}"
tenant_id = "#{creds[:tenant_id]}"
storage_account_name = "#{sa_name}"
admin_password = "#{admin_password}"
suffix = "#{suffix}"
VARS
content << "location = \"#{ENV["AZURE_LOCATION"]}\"\n" if ENV["AZURE_LOCATION"]
File.write(tf_vars_file, content)
end
desc "generate plan from state using terraform.tfvars file"
task :plan, [:tf_workspace] => [:vars] do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Generating Plan"
sh("cd #{integration_dir}/build/ && terraform plan -out inspec-azure.plan")
end
desc "apply terraform plan"
task :apply, [:tf_workspace] => [:plan] do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Applying Plan"
sh("cd #{integration_dir}/build/ && terraform workspace select #{tf_workspace}")
sh("cd #{integration_dir}/build/ && terraform apply inspec-azure.plan")
Rake::Task["test:azure:dump_attrs"].execute
end
task :dump_attrs do
sh("cd #{integration_dir}/build/ && terraform output > #{attribute_file}")
raw_output = File.read(attribute_file)
yaml_output = raw_output.gsub(" = ", " : ")
File.open(attribute_file, "w") { |file| file.puts yaml_output }
end
task :run do
puts "----> Run"
sh("bundle exec inspec exec #{integration_dir}/verify -t azure://1e0b427a-d58b-494e-ae4f-ee558463ebbf")
end
task :cleanup, :tf_workspace do |t, args|
tf_workspace = args[:tf_workspace] || ENV["INSPEC_TERRAFORM_ENV"]
abort("You must either call the top-level test:azure task, or set the INSPEC_TERRAFORM_ENV variable.") unless tf_workspace
puts "----> Cleanup"
sh("cd #{integration_dir}/build/ && terraform destroy -force ")
sh("cd #{integration_dir}/build/ && terraform workspace select default")
sh("cd #{integration_dir}/build && terraform workspace delete #{tf_workspace}")
File.delete(tf_vars_file)
end
end
desc "Perform Azure Integration Tests"
task :azure do
tf_workspace = ENV["INSPEC_TERRAFORM_ENV"] || prompt("Please enter a workspace for your integration tests to run in: ")
begin
Rake::Task["test:azure:setup"].execute({ tf_workspace: tf_workspace })
Rake::Task["test:azure:run"].execute
rescue
abort("Integration testing has failed")
ensure
Rake::Task["test:azure:cleanup"].execute({ tf_workspace: tf_workspace })
end
end
end end
# Print the current version of this gem or update it. # Print the current version of this gem or update it.

View file

@ -9,20 +9,6 @@
require "inspec/resource" require "inspec/resource"
# Detect if we are running the stripped-down inspec-core
# This relies on AWS being stripped from the inspec-core gem
inspec_core_only = ENV["NO_AWS"] || !File.exist?(File.join(File.dirname(__FILE__), "..", "resource_support", "aws.rb"))
# Do not attempt to load cloud resources if we are in inspec-core mode
unless inspec_core_only
require "resource_support/aws"
require "resources/azure/azure_backend"
require "resources/azure/azure_generic_resource"
require "resources/azure/azure_resource_group"
require "resources/azure/azure_virtual_machine"
require "resources/azure/azure_virtual_machine_data_disk"
end
require "inspec/resources/aide_conf" require "inspec/resources/aide_conf"
require "inspec/resources/apache" require "inspec/resources/apache"
require "inspec/resources/apache_conf" require "inspec/resources/apache_conf"

View file

@ -1,76 +0,0 @@
# Main AWS loader file. The intent is for this to be
# loaded only if AWS resources are needed.
require "aws-sdk-core"
require "aws-sdk-cloudtrail"
require "aws-sdk-cloudwatch"
require "aws-sdk-cloudwatchlogs"
require "aws-sdk-costandusagereportservice"
require "aws-sdk-configservice"
require "aws-sdk-ec2"
require "aws-sdk-ecs"
require "aws-sdk-eks"
require "aws-sdk-elasticloadbalancing"
require "aws-sdk-iam"
require "aws-sdk-kms"
require "aws-sdk-rds"
require "aws-sdk-s3"
require "aws-sdk-sqs"
require "aws-sdk-sns"
require "resource_support/aws/aws_backend_factory_mixin"
require "resource_support/aws/aws_resource_mixin"
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
# Load all AWS resources
# TODO: loop over and load entire directory
# for f in ls lib/resources/aws/*; do t=$(echo $f | cut -c 5- | cut -f1 -d. ); echo "require '${t}'"; done
require "resources/aws/aws_billing_report"
require "resources/aws/aws_billing_reports"
require "resources/aws/aws_cloudtrail_trail"
require "resources/aws/aws_cloudtrail_trails"
require "resources/aws/aws_cloudwatch_alarm"
require "resources/aws/aws_cloudwatch_log_metric_filter"
require "resources/aws/aws_config_delivery_channel"
require "resources/aws/aws_config_recorder"
require "resources/aws/aws_ec2_instance"
require "resources/aws/aws_ebs_volume"
require "resources/aws/aws_ebs_volumes"
require "resources/aws/aws_flow_log"
require "resources/aws/aws_ec2_instances"
require "resources/aws/aws_ecs_cluster"
require "resources/aws/aws_eks_cluster"
require "resources/aws/aws_elb"
require "resources/aws/aws_elbs"
require "resources/aws/aws_iam_access_key"
require "resources/aws/aws_iam_access_keys"
require "resources/aws/aws_iam_group"
require "resources/aws/aws_iam_groups"
require "resources/aws/aws_iam_password_policy"
require "resources/aws/aws_iam_policies"
require "resources/aws/aws_iam_policy"
require "resources/aws/aws_iam_role"
require "resources/aws/aws_iam_root_user"
require "resources/aws/aws_iam_user"
require "resources/aws/aws_iam_users"
require "resources/aws/aws_kms_key"
require "resources/aws/aws_kms_keys"
require "resources/aws/aws_rds_instance"
require "resources/aws/aws_route_table"
require "resources/aws/aws_route_tables"
require "resources/aws/aws_s3_bucket"
require "resources/aws/aws_s3_bucket_object"
require "resources/aws/aws_s3_buckets"
require "resources/aws/aws_security_group"
require "resources/aws/aws_security_groups"
require "resources/aws/aws_sns_subscription"
require "resources/aws/aws_sns_topic"
require "resources/aws/aws_sns_topics"
require "resources/aws/aws_sqs_queue"
require "resources/aws/aws_subnet"
require "resources/aws/aws_subnets"
require "resources/aws/aws_vpc"
require "resources/aws/aws_vpcs"

View file

@ -1,12 +0,0 @@
class AwsBackendBase
attr_reader :aws_transport
class << self; attr_accessor :aws_client_class end
def initialize(inspec = nil)
@aws_transport = inspec ? inspec.backend : nil
end
def aws_service_client
aws_transport.aws_client(self.class.aws_client_class)
end
end

View file

@ -1,12 +0,0 @@
# Intended to be pulled in via extend, not include
module AwsBackendFactoryMixin
def create(inspec)
@selected_backend.new(inspec)
end
def select(klass)
@selected_backend = klass
end
alias set_default_backend select
end

View file

@ -1,24 +0,0 @@
require "resource_support/aws/aws_resource_mixin"
require "resource_support/aws/aws_backend_factory_mixin"
module AwsPluralResourceMixin
include AwsResourceMixin
attr_reader :table
# This sets up a class, AwsSomeResource::BackendFactory, that
# provides a mechanism to create and use backends without
# having to know which is selected. This is mainly used for
# unit testing.
# TODO: DRY up. This code exists in both the Singular and Plural mixins.
# We'd like to put it in AwsResourceMixin, but included only sees the
# directly-including class - we can't see second-order includers.
def self.included(base)
# Create a new class, whose body is simply to extend the
# backend factory mixin
resource_backend_factory_class = Class.new(Object) do
extend AwsBackendFactoryMixin
end
# Name that class
base.const_set("BackendFactory", resource_backend_factory_class)
end
end

View file

@ -1,69 +0,0 @@
module AwsResourceMixin
def initialize(resource_params = {})
Inspec.deprecate(:aws_resources_in_resource_pack,
"Resource '#{@__resource_name__ ||= self.class.to_s}'")
validate_params(resource_params).each do |param, value|
instance_variable_set(:"@#{param}", value)
end
catch_aws_errors do
fetch_from_api
end
rescue ArgumentError => e
# continue with ArgumentError if testing
raise unless respond_to?(:inspec) && inspec
raise Inspec::Exceptions::ResourceFailed, e.message
end
# Default implementation of validate params accepts everything.
def validate_params(resource_params)
resource_params
end
def check_resource_param_names(raw_params: {}, allowed_params: [], allowed_scalar_name: nil, allowed_scalar_type: nil)
# Some resources allow passing in a single ID value. Check and convert to hash if so.
if allowed_scalar_name && !raw_params.is_a?(Hash)
value_seen = raw_params
if value_seen.is_a?(allowed_scalar_type)
raw_params = { allowed_scalar_name => value_seen }
else
raise ArgumentError, "If you pass a single value to the resource, it must " \
"be a #{allowed_scalar_type}, not an #{value_seen.class}."
end
end
# Remove all expected params from the raw param hash
recognized_params = {}
allowed_params.each do |expected_param|
recognized_params[expected_param] = raw_params.delete(expected_param) if raw_params.key?(expected_param)
end
# Any leftovers are unwelcome
unless raw_params.empty?
raise ArgumentError, "Unrecognized resource param '#{raw_params.keys.first}'. Expected parameters: #{allowed_params.join(", ")}"
end
recognized_params
end
def inspec_runner
# When running under inspec-cli, we have an 'inspec' method that
# returns the runner. When running under unit tests, we don't
# have that, but we still have to call this to pass something
# (nil is OK) to the backend.
# TODO: remove with https://github.com/chef/inspec-aws/issues/216
inspec if respond_to?(:inspec)
end
# Intercept AWS exceptions
def catch_aws_errors
yield
rescue Aws::Errors::MissingCredentialsError
# The AWS error here is unhelpful:
# "unable to sign request without credentials set"
Inspec::Log.error "It appears that you have not set your AWS credentials. You may set them using environment variables, or using the 'aws://region/aws_credentials_profile' target. See https://docs.chef.io/inspec/platforms/ for details."
fail_resource("No AWS credentials available")
rescue Aws::Errors::ServiceError => e
fail_resource e.message
end
end

View file

@ -1,27 +0,0 @@
require "resource_support/aws/aws_resource_mixin"
require "resource_support/aws/aws_backend_factory_mixin"
module AwsSingularResourceMixin
include AwsResourceMixin
def exists?
@exists
end
# This sets up a class, AwsSomeResource::BackendFactory, that
# provides a mechanism to create and use backends without
# having to know which is selected. This is mainly used for
# unit testing.
# TODO: DRY up. This code exists in both the Singular and Plural mixins.
# We'd like to put it in AwsResourceMixin, but included only sees the
# directly-including class - we can't see second-order includers.
def self.included(base)
# Create a new class, whose body is simply to extend the
# backend factory mixin
resource_backend_factory_class = Class.new(Object) do
extend AwsBackendFactoryMixin
end
# Name that class
base.const_set("BackendFactory", resource_backend_factory_class)
end
end

View file

@ -1,105 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-costandusagereportservice"
class AwsBillingReport < Inspec.resource(1)
name "aws_billing_report"
supports platform: "aws"
desc "Verifies settings for AWS Cost and Billing Reports."
example <<~EXAMPLE
describe aws_billing_report('inspec1') do
its('report_name') { should cmp 'inspec1' }
its('time_unit') { should cmp 'hourly' }
end
describe aws_billing_report(report: 'inspec1') do
it { should exist }
end
EXAMPLE
include AwsSingularResourceMixin
attr_reader :report_name, :time_unit, :format, :compression, :s3_bucket,
:s3_prefix, :s3_region
def to_s
"AWS Billing Report #{report_name}"
end
def hourly?
exists? ? time_unit.eql?("hourly") : nil
end
def daily?
exists? ? time_unit.eql?("daily") : nil
end
def zip?
exists? ? compression.eql?("zip") : nil
end
def gzip?
exists? ? compression.eql?("gzip") : nil
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:report_name],
allowed_scalar_name: :report_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide the parameter 'report_name' to aws_billing_report."
end
validated_params
end
def fetch_from_api
report = find_report(report_name)
@exists = !report.nil?
if exists?
@time_unit = report.time_unit.downcase
@format = report.format.downcase
@compression = report.compression.downcase
@s3_bucket = report.s3_bucket
@s3_prefix = report.s3_prefix
@s3_region = report.s3_region
end
end
def find_report(report_name)
pagination_opts = {}
found_report_def = nil
while found_report_def.nil?
api_result = backend.describe_report_definitions(pagination_opts)
next_token = api_result.next_token
found_report_def = api_result.report_definitions.find { |report_def| report_def.report_name == report_name }
pagination_opts = { next_token: next_token }
next if found_report_def.nil? && next_token # Loop again: didn't find it, but there are more results
break if found_report_def.nil? && next_token.nil? # Give up: didn't find it, no more results
end
found_report_def
end
def backend
@backend ||= BackendFactory.create(inspec_runner)
end
class Backend
class AwsClientApi < AwsBackendBase
AwsBillingReport::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CostandUsageReportService::Client
def describe_report_definitions(query = {})
aws_service_client.describe_report_definitions(query)
end
end
end
end

View file

@ -1,74 +0,0 @@
require "inspec/utils/filter"
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-costandusagereportservice"
class AwsBillingReports < Inspec.resource(1)
name "aws_billing_reports"
supports platform: "aws"
desc "Verifies settings for AWS Cost and Billing Reports."
example <<~EXAMPLE
describe aws_billing_reports do
its('report_names') { should include 'inspec1' }
its('s3_buckets') { should include 'inspec1-s3-bucket' }
end
describe aws_billing_reports.where { report_name =~ /inspec.*/ } do
its ('report_names') { should include ['inspec1'] }
its ('time_units') { should include ['DAILY'] }
its ('s3_buckets') { should include ['inspec1-s3-bucket'] }
end
EXAMPLE
include AwsPluralResourceMixin
filtertable = FilterTable.create
filtertable.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
.register_column(:report_names, field: :report_name)
.register_column(:time_units, field: :time_unit, style: :simple)
.register_column(:formats, field: :format, style: :simple)
.register_column(:compressions, field: :compression, style: :simple)
.register_column(:s3_buckets, field: :s3_bucket, style: :simple)
.register_column(:s3_prefixes, field: :s3_prefix, style: :simple)
.register_column(:s3_regions, field: :s3_region, style: :simple)
filtertable.install_filter_methods_on_resource(self, :table)
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_billing_reports does not accept resource parameters."
end
resource_params
end
def to_s
"AWS Billing Reports"
end
def fetch_from_api
@table = []
pagination_opts = {}
backend = BackendFactory.create(inspec_runner)
loop do
api_result = backend.describe_report_definitions(pagination_opts)
api_result.report_definitions.each do |raw_report|
report = raw_report.to_h
%i{time_unit compression}.each { |field| report[field].downcase! }
@table << report
end
pagination_opts = { next_token: api_result.next_token }
break unless api_result.next_token
end
end
class Backend
class AwsClientApi < AwsBackendBase
AwsBillingReports::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CostandUsageReportService::Client
def describe_report_definitions(options = {})
aws_service_client.describe_report_definitions(options)
end
end
end
end

View file

@ -1,97 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-cloudtrail"
class AwsCloudTrailTrail < Inspec.resource(1)
name "aws_cloudtrail_trail"
desc "Verifies settings for an individual AWS CloudTrail Trail"
example <<~EXAMPLE
describe aws_cloudtrail_trail('trail-name') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :cloud_watch_logs_log_group_arn, :cloud_watch_logs_role_arn, :home_region,
:kms_key_id, :s3_bucket_name, :trail_arn
def to_s
"CloudTrail #{@trail_name}"
end
def multi_region_trail?
@is_multi_region_trail
end
def log_file_validation_enabled?
@log_file_validation_enabled
end
def encrypted?
!kms_key_id.nil?
end
def delivered_logs_days_ago
query = { name: @trail_name }
catch_aws_errors do
resp = BackendFactory.create(inspec_runner).get_trail_status(query).to_h
((Time.now - resp[:latest_cloud_watch_logs_delivery_time]) / (24 * 60 * 60)).to_i unless resp[:latest_cloud_watch_logs_delivery_time].nil?
rescue Aws::CloudTrail::Errors::TrailNotFoundException
nil
end
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:trail_name],
allowed_scalar_name: :trail_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide the parameter 'trail_name' to aws_cloudtrail_trail."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
query = { trail_name_list: [@trail_name] }
resp = backend.describe_trails(query)
@trail = resp.trail_list[0].to_h
@exists = !@trail.empty?
@s3_bucket_name = @trail[:s3_bucket_name]
@is_multi_region_trail = @trail[:is_multi_region_trail]
@trail_arn = @trail[:trail_arn]
@log_file_validation_enabled = @trail[:log_file_validation_enabled]
@cloud_watch_logs_role_arn = @trail[:cloud_watch_logs_role_arn]
@cloud_watch_logs_log_group_arn = @trail[:cloud_watch_logs_log_group_arn]
@kms_key_id = @trail[:kms_key_id]
@home_region = @trail[:home_region]
end
class Backend
class AwsClientApi < AwsBackendBase
AwsCloudTrailTrail::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CloudTrail::Client
def describe_trails(query)
aws_service_client.describe_trails(query)
end
def get_trail_status(query)
aws_service_client.get_trail_status(query)
end
end
end
end

View file

@ -1,51 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-cloudtrail"
class AwsCloudTrailTrails < Inspec.resource(1)
name "aws_cloudtrail_trails"
desc "Verifies settings for AWS CloudTrail Trails in bulk"
example <<~EXAMPLE
describe aws_cloudtrail_trails do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_cloudtrail_trails does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:trail_arns, field: :trail_arn)
filter.register_column(:names, field: :name)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"CloudTrail Trails"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = backend.describe_trails({}).to_h[:trail_list]
end
class Backend
class AwsClientApi < AwsBackendBase
AwsCloudTrailTrails::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CloudTrail::Client
def describe_trails(query)
aws_service_client.describe_trails(query)
end
end
end
end

View file

@ -1,67 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-cloudwatch"
class AwsCloudwatchAlarm < Inspec.resource(1)
name "aws_cloudwatch_alarm"
desc <<~EXAMPLE
# Look for a specific alarm
aws_cloudwatch_alarm(
metric_name: 'my-metric-name',
metric_namespace: 'my-metric-namespace',
) do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :alarm_actions, :alarm_name, :metric_name, :metric_namespace
private
def validate_params(raw_params)
recognized_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{metric_name metric_namespace}
)
validated_params = {}
# Currently you must specify exactly metric_name and metric_namespace
%i{metric_name metric_namespace}.each do |param|
raise ArgumentError, "Missing resource param #{param}" unless recognized_params.key?(param)
validated_params[param] = recognized_params.delete(param)
end
validated_params
end
def fetch_from_api
aws_alarms = BackendFactory.create(inspec_runner).describe_alarms_for_metric(
metric_name: @metric_name,
namespace: @metric_namespace
)
if aws_alarms.metric_alarms.empty?
@exists = false
elsif aws_alarms.metric_alarms.count > 1
alarms = aws_alarms.metric_alarms.map(&:alarm_name)
raise "More than one Cloudwatch Alarm was matched. Try using " \
"more specific resource parameters. Alarms matched: #{alarms.join(", ")}"
else
@alarm_actions = aws_alarms.metric_alarms.first.alarm_actions
@alarm_name = aws_alarms.metric_alarms.first.alarm_name
@exists = true
end
end
class Backend
class AwsClientApi < AwsBackendBase
AwsCloudwatchAlarm::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CloudWatch::Client
def describe_alarms_for_metric(query)
aws_service_client.describe_alarms_for_metric(query)
end
end
end
end

View file

@ -1,105 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-cloudwatchlogs"
class AwsCloudwatchLogMetricFilter < Inspec.resource(1)
name "aws_cloudwatch_log_metric_filter"
desc "Verifies individual Cloudwatch Log Metric Filters"
example <<~EXAMPLE
# Look for a LMF by its filter name and log group name. This combination
# will always either find at most one LMF - no duplicates.
describe aws_cloudwatch_log_metric_filter(
filter_name: 'my-filter',
log_group_name: 'my-log-group'
) do
it { should exist }
end
# Search for an LMF by pattern and log group.
# This could result in an error if the results are not unique.
describe aws_cloudwatch_log_metric_filter(
log_group_name: 'my-log-group',
pattern: 'my-filter'
) do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :filter_name, :log_group_name, :metric_name, :metric_namespace, :pattern
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{filter_name log_group_name pattern}
)
if validated_params.empty?
raise ArgumentError, "You must provide either filter_name, log_group, or pattern to aws_cloudwatch_log_metric_filter."
end
validated_params
end
def fetch_from_api
# get a backend
backend = BackendFactory.create(inspec_runner)
# Perform query with remote filtering
aws_search_criteria = {}
aws_search_criteria[:filter_name] = filter_name if filter_name
aws_search_criteria[:log_group_name] = log_group_name if log_group_name
begin
aws_results = backend.describe_metric_filters(aws_search_criteria)
rescue Aws::CloudWatchLogs::Errors::ResourceNotFoundException
@exists = false
return
end
# Then perform local filtering
if pattern
aws_results.select! { |lmf| lmf.filter_pattern == pattern }
end
# Check result count. We're a singular resource and can tolerate
# 0 or 1 results, not multiple.
if aws_results.count > 1
raise "More than one result was returned, but aws_cloudwatch_log_metric_filter "\
"can only handle a single AWS resource. Consider passing more resource "\
"parameters to narrow down the search."
elsif aws_results.empty?
@exists = false
else
@exists = true
# Unpack the funny-shaped object we got back from AWS into our instance vars
lmf = aws_results.first
@filter_name = lmf.filter_name
@log_group_name = lmf.log_group_name
@pattern = lmf.filter_pattern # Note inconsistent name
# AWS SDK returns an array of metric transformations
# but only allows one (mandatory) entry, let's flatten that
@metric_name = lmf.metric_transformations.first.metric_name
@metric_namespace = lmf.metric_transformations.first.metric_namespace
end
end
class Backend
# Uses the cloudwatch API to really talk to AWS
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::CloudWatchLogs::Client
def describe_metric_filters(criteria)
query = {}
query[:filter_name_prefix] = criteria[:filter_name] if criteria[:filter_name]
query[:log_group_name] = criteria[:log_group_name] if criteria[:log_group_name]
# 'pattern' is not available as a remote filter,
# we filter it after the fact locally
# TODO: handle pagination? Max 50/page. Maybe you want a plural resource?
aws_response = aws_service_client.describe_metric_filters(query)
aws_response.metric_filters
end
end
end
end

View file

@ -1,74 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-configservice"
class AwsConfigDeliveryChannel < Inspec.resource(1)
name "aws_config_delivery_channel"
desc "Verifies settings for AWS Config Delivery Channel"
example <<~EXAMPLE
describe aws_config_delivery_channel do
it { should exist }
its('s3_bucket_name') { should eq 'my_bucket' }
its('sns_topic_arn') { should eq arn:aws:sns:us-east-1:721741954427:sns_topic' }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :channel_name, :s3_bucket_name, :s3_key_prefix, :sns_topic_arn,
:delivery_frequency_in_hours
def to_s
"Config_Delivery_Channel: #{@channel_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:channel_name],
allowed_scalar_name: :channel_name,
allowed_scalar_type: String
)
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
query = @channel_name ? { delivery_channel_names: [@channel_name] } : {}
response = backend.describe_delivery_channels(query)
@exists = !response.delivery_channels.empty?
return unless exists?
channel = response.delivery_channels.first.to_h
@channel_name = channel[:name]
@s3_bucket_name = channel[:s3_bucket_name]
@s3_key_prefix = channel[:s3_key_prefix]
@sns_topic_arn = channel[:sns_topic_arn]
@delivery_frequency_in_hours = channel.dig(:config_snapshot_delivery_properties, :delivery_frequency)
frequencies = {
"One_Hour" => 1,
"TwentyFour_Hours" => 24,
"Three_Hours" => 3,
"Six_Hours" => 6,
"Twelve_Hours" => 12,
}
@delivery_frequency_in_hours = frequencies[@delivery_frequency_in_hours]
rescue Aws::ConfigService::Errors::NoSuchDeliveryChannelException
@exists = false
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::ConfigService::Client
def describe_delivery_channels(query = {})
aws_service_client.describe_delivery_channels(query)
end
end
end
end

View file

@ -1,99 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-configservice"
class AwsConfigurationRecorder < Inspec.resource(1)
name "aws_config_recorder"
desc "Verifies settings for AWS Configuration Recorder"
example <<~EXAMPLE
describe aws_config_recorder('My_Recorder') do
it { should exist }
it { should be_recording }
it { should be_all_supported }
it { should have_include_global_resource_types }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :role_arn, :resource_types, :recorder_name
def to_s
"Configuration_Recorder: #{@recorder_name}"
end
def recording_all_resource_types?
@recording_all_resource_types
end
def recording_all_global_types?
@recording_all_global_types
end
def status
return {} unless @exists
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
response = backend.describe_configuration_recorder_status(configuration_recorder_names: [@recorder_name])
@status = response.configuration_recorders_status.first.to_h
end
end
def recording?
return unless @exists
status[:recording]
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:recorder_name],
allowed_scalar_name: :recorder_name,
allowed_scalar_type: String
)
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
query = @recorder_name ? { configuration_recorder_names: [@recorder_name] } : {}
response = backend.describe_configuration_recorders(query)
@exists = !response.configuration_recorders.empty?
return unless exists?
if response.configuration_recorders.count > 1
raise ArgumentError, "Internal error: unexpectedly received multiple AWS Config Recorder objects from API; expected to be singleton per-region. Please file a bug report at https://github.com/chef/inspec/issues ."
end
recorder = response.configuration_recorders.first.to_h
@recorder_name = recorder[:name]
@role_arn = recorder[:role_arn]
@recording_all_resource_types = recorder[:recording_group][:all_supported]
@recording_all_global_types = recorder[:recording_group][:include_global_resource_types]
@resource_types = recorder[:recording_group][:resource_types]
rescue Aws::ConfigService::Errors::NoSuchConfigurationRecorderException
@exists = false
nil
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::ConfigService::Client
def describe_configuration_recorders(query)
aws_service_client.describe_configuration_recorders(query)
end
def describe_configuration_recorder_status(query)
aws_service_client.describe_configuration_recorder_status(query)
end
end
end
end

View file

@ -1,127 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsEbsVolume < Inspec.resource(1)
name "aws_ebs_volume"
desc "Verifies settings for an EBS volume"
example <<~EXAMPLE
describe aws_ebs_volume('vol-123456') do
it { should be_encrypted }
its('size') { should cmp 8 }
end
describe aws_ebs_volume(name: 'my-volume') do
its('encrypted') { should eq true }
its('iops') { should cmp 100 }
end
EXAMPLE
supports platform: "aws"
# TODO: rewrite to avoid direct injection, match other resources, use AwsSingularResourceMixin
def initialize(opts, conn = nil)
@opts = opts
@display_name = opts.is_a?(Hash) ? @opts[:name] : opts
@ec2_client = conn ? conn.ec2_client : inspec_runner.backend.aws_client(Aws::EC2::Client)
@ec2_resource = conn ? conn.ec2_resource : inspec_runner.backend.aws_resource(Aws::EC2::Resource, {})
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_resource_mixin.rb
def catch_aws_errors
yield
rescue Aws::Errors::MissingCredentialsError
# The AWS error here is unhelpful:
# "unable to sign request without credentials set"
Inspec::Log.error "It appears that you have not set your AWS credentials. You may set them using environment variables, or using the 'aws://region/aws_credentials_profile' target. See https://docs.chef.io/inspec/platforms/ for details."
fail_resource("No AWS credentials available")
rescue Aws::Errors::ServiceError => e
fail_resource(e.message)
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_singular_resource_mixin.rb
def inspec_runner
# When running under inspec-cli, we have an 'inspec' method that
# returns the runner. When running under unit tests, we don't
# have that, but we still have to call this to pass something
# (nil is OK) to the backend.
# TODO: remove with https://github.com/chef/inspec-aws/issues/216
# TODO: remove after rewrite to include AwsSingularResource
inspec if respond_to?(:inspec)
end
def id
return @volume_id if defined?(@volume_id)
catch_aws_errors do
if @opts.is_a?(Hash)
first = @ec2_resource.volumes(
{
filters: [{
name: "tag:Name",
values: [@opts[:name]],
}],
}
).first
# catch case where the volume is not known
@volume_id = first.id unless first.nil?
else
@volume_id = @opts
end
end
end
alias volume_id id
def exists?
!volume.nil?
end
def encrypted?
volume.encrypted
end
# attributes that we want to expose
%w{
availability_zone encrypted iops kms_key_id size snapshot_id state volume_type
}.each do |attribute|
define_method attribute do
catch_aws_errors do
volume.send(attribute) if volume
end
end
end
# Don't document this - it's a bit hard to use. Our current doctrine
# is to use dumb things, like arrays of strings - use security_group_ids instead.
def security_groups
catch_aws_errors do
@security_groups ||= volume.security_groups.map do |sg|
{ id: sg.group_id, name: sg.group_name }
end
end
end
def security_group_ids
catch_aws_errors do
@security_group_ids ||= volume.security_groups.map(&:group_id)
end
end
def tags
catch_aws_errors do
@tags ||= volume.tags.map { |tag| { key: tag.key, value: tag.value } }
end
end
def to_s
"EBS Volume #{@display_name}"
end
private
def volume
catch_aws_errors { @volume ||= @ec2_resource.volume(id) }
end
end

View file

@ -1,69 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsEbsVolumes < Inspec.resource(1)
name "aws_ebs_volumes"
desc "Verifies settings for AWS EBS Volumes in bulk"
example <<~EXAMPLE
describe aws_ebs_volumes do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_ebs_volumes does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:volume_ids, field: :volume_id)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"EBS Volumes"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = {}
loop do
api_result = backend.describe_volumes(pagination_opts)
@table += unpack_describe_volumes_response(api_result.volumes)
break unless api_result.next_token
pagination_opts = { next_token: api_result.next_token }
end
end
def unpack_describe_volumes_response(volumes)
volume_rows = []
volumes.each do |res|
volume_rows += res.attachments.map do |volume_struct|
{
volume_id: volume_struct.volume_id,
}
end
end
volume_rows
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_volumes(query)
aws_service_client.describe_volumes(query)
end
end
end
end

View file

@ -1,162 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsEc2Instance < Inspec.resource(1)
name "aws_ec2_instance"
desc "Verifies settings for an EC2 instance"
example <<~EXAMPLE
describe aws_ec2_instance('i-123456') do
it { should be_running }
it { should have_roles }
end
describe aws_ec2_instance(name: 'my-instance') do
it { should be_running }
it { should have_roles }
end
EXAMPLE
supports platform: "aws"
# TODO: rewrite to avoid direct injection, match other resources, use AwsSingularResourceMixin
def initialize(opts, conn = nil)
@opts = opts
@opts.is_a?(Hash) ? @display_name = @opts[:name] : @display_name = opts
@ec2_client = conn ? conn.ec2_client : inspec_runner.backend.aws_client(Aws::EC2::Client)
@ec2_resource = conn ? conn.ec2_resource : inspec_runner.backend.aws_resource(Aws::EC2::Resource, {})
@iam_resource = conn ? conn.iam_resource : inspec_runner.backend.aws_resource(Aws::IAM::Resource, {})
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_resource_mixin.rb
def catch_aws_errors
yield
rescue Aws::Errors::MissingCredentialsError
# The AWS error here is unhelpful:
# "unable to sign request without credentials set"
Inspec::Log.error "It appears that you have not set your AWS credentials. You may set them using environment variables, or using the 'aws://region/aws_credentials_profile' target. See https://docs.chef.io/inspec/platforms/ for details."
fail_resource("No AWS credentials available")
rescue Aws::Errors::ServiceError => e
fail_resource e.message
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_singular_resource_mixin.rb
def inspec_runner
# When running under inspec-cli, we have an 'inspec' method that
# returns the runner. When running under unit tests, we don't
# have that, but we still have to call this to pass something
# (nil is OK) to the backend.
# TODO: remove with https://github.com/chef/inspec-aws/issues/216
# TODO: remove after rewrite to include AwsSingularResource
inspec if respond_to?(:inspec)
end
def id
return @instance_id if defined?(@instance_id)
catch_aws_errors do
if @opts.is_a?(Hash)
first = @ec2_resource.instances(
{
filters: [{
name: "tag:Name",
values: [@opts[:name]],
}],
}
).first
# catch case where the instance is not known
@instance_id = first.id unless first.nil?
else
@instance_id = @opts
end
end
end
alias instance_id id
def exists?
return false if instance.nil?
instance.exists?
end
# returns the instance state
def state
catch_aws_errors do
instance&.state&.name
end
end
# helper methods for each state
%w{
pending running shutting-down
terminated stopping stopped unknown
}.each do |state_name|
define_method state_name.tr("-", "_") + "?" do
state == state_name
end
end
# attributes that we want to expose
%w{
public_ip_address private_ip_address key_name private_dns_name
public_dns_name subnet_id architecture root_device_type
root_device_name virtualization_type client_token launch_time
instance_type image_id vpc_id
}.each do |attribute|
define_method attribute do
catch_aws_errors do
instance.send(attribute) if instance
end
end
end
# Don't document this - it's a bit hard to use. Our current doctrine
# is to use dumb things, like arrays of strings - use security_group_ids instead.
def security_groups
catch_aws_errors do
@security_groups ||= instance.security_groups.map do |sg|
{ id: sg.group_id, name: sg.group_name }
end
end
end
def security_group_ids
catch_aws_errors do
@security_group_ids ||= instance.security_groups.map(&:group_id)
end
end
def tags
catch_aws_errors do
@tags ||= instance.tags.map { |tag| { key: tag.key, value: tag.value } }
end
end
def to_s
"EC2 Instance #{@display_name}"
end
def has_roles?
catch_aws_errors do
instance_profile = instance.iam_instance_profile
if instance_profile
roles = @iam_resource.instance_profile(
instance_profile.arn.gsub(%r{^.*\/}, "")
).roles
else
roles = nil
end
roles && !roles.empty?
end
end
private
def instance
catch_aws_errors { @instance ||= @ec2_resource.instance(id) }
end
end

View file

@ -1,69 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsEc2Instances < Inspec.resource(1)
name "aws_ec2_instances"
desc "Verifies settings for AWS EC2 Instances in bulk"
example <<~EXAMPLE
describe aws_ec2_instances do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_ec2_instances does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:instance_ids, field: :instance_id)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"EC2 Instances"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = {}
loop do
api_result = backend.describe_instances(pagination_opts)
@table += unpack_describe_instances_response(api_result.reservations)
break unless api_result.next_token
pagination_opts = { next_token: api_result.next_token }
end
end
def unpack_describe_instances_response(reservations)
instance_rows = []
reservations.each do |res|
instance_rows += res.instances.map do |instance_struct|
{
instance_id: instance_struct.instance_id,
}
end
end
instance_rows
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_instances(query)
aws_service_client.describe_instances(query)
end
end
end
end

View file

@ -1,87 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ecs"
class AwsEcsCluster < Inspec.resource(1)
name "aws_ecs_cluster"
desc "Verifies settings for an ECS cluster"
example <<~EXAMPLE
describe aws_ecs_cluster('default') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :cluster_arn, :cluster_name, :status,
:registered_container_instances_count, :running_tasks_count,
:pending_tasks_count, :active_services_count, :statistics
def to_s
"AWS ECS cluster #{cluster_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:cluster_name],
allowed_scalar_name: :cluster_name,
allowed_scalar_type: String
)
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
# Use default cluster if no cluster name is specified
params = cluster_name.nil? ? {} : { clusters: [cluster_name] }
clusters = backend.describe_clusters(params).clusters
# Cluster name is unique, we either get back one cluster, or none
if clusters.length == 1
@exists = true
unpack_describe_clusters_response(clusters.first)
else
@exists = false
populate_as_missing
end
end
def unpack_describe_clusters_response(cluster_struct)
@cluster_arn = cluster_struct.cluster_arn
@cluster_name = cluster_struct.cluster_name
@status = cluster_struct.status
@registered_container_instances_count = cluster_struct.registered_container_instances_count
@running_tasks_count = cluster_struct.running_tasks_count
@pending_tasks_count = cluster_struct.pending_tasks_count
@active_services_count = cluster_struct.active_services_count
@statistics = cluster_struct.statistics
end
def populate_as_missing
@cluster_arn = ""
@cluster_name = ""
@status = ""
@registered_container_instances_count = 0
@running_tasks_count = 0
@pending_tasks_count = 0
@active_services_count = 0
@statistics = []
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::ECS::Client
def describe_clusters(query = {})
aws_service_client.describe_clusters(query)
end
end
end
end

View file

@ -1,105 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-eks"
class AwsEksCluster < Inspec.resource(1)
name "aws_eks_cluster"
desc "Verifies settings for an EKS cluster"
example <<~EXAMPLE
describe aws_eks_cluster('default') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :version, :arn, :cluster_name, :certificate_authority, :name,
:status, :endpoint, :subnets_count, :subnet_ids, :security_group_ids,
:created_at, :role_arn, :vpc_id, :security_groups_count, :creating,
:active, :failed, :deleting
# Use aliases for matchers
alias active? active
alias failed? failed
alias creating? creating
alias deleting? deleting
def to_s
"AWS EKS cluster #{cluster_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:cluster_name],
allowed_scalar_name: :cluster_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide a cluster_name to aws_eks_cluster."
end
validated_params
end
def fetch_from_api # rubocop:disable Metrics/AbcSize
backend = BackendFactory.create(inspec_runner)
begin
params = { name: cluster_name }
resp = backend.describe_cluster(params)
rescue Aws::EKS::Errors::ResourceNotFoundException
@exists = false
populate_as_missing
return
end
@exists = true
cluster = resp.to_h[:cluster]
@version = cluster[:version]
@name = cluster[:name]
@arn = cluster[:arn]
@certificate_authority = cluster[:certificate_authority][:data]
@created_at = cluster[:created_at]
@endpoint = cluster[:endpoint]
@security_group_ids = cluster[:resources_vpc_config][:security_group_ids]
@subnet_ids = cluster[:resources_vpc_config][:subnet_ids]
@subnets_count = cluster[:resources_vpc_config][:subnet_ids].length
@security_groups_count = cluster[:resources_vpc_config][:security_group_ids].length
@vpc_id = cluster[:resources_vpc_config][:vpc_id]
@role_arn = cluster[:role_arn]
@status = cluster[:status]
@active = cluster[:status] == "ACTIVE"
@failed = cluster[:status] == "FAILED"
@creating = cluster[:status] == "CREATING"
@deleting = cluster[:status] == "DELETING"
end
def populate_as_missing
@version = nil
@name = cluster_name # name is an alias for cluster_name, and it is retained on a miss
@arn = nil
@certificate_authority = nil
@created_at = nil
@endpoint = nil
@security_group_ids = []
@subnet_ids = []
@subnets_count = nil
@security_groups_count = nil
@vpc_id = nil
@role_arn = nil
@status = nil
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EKS::Client
def describe_cluster(query = {})
aws_service_client.describe_cluster(query)
end
end
end
end

View file

@ -1,85 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-elasticloadbalancing"
class AwsElb < Inspec.resource(1)
name "aws_elb"
desc "Verifies settings for AWS Elastic Load Balancer"
example <<~EXAMPLE
describe aws_elb('myelb') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :availability_zones, :dns_name, :elb_name, :external_ports,
:instance_ids, :internal_ports, :security_group_ids,
:subnet_ids, :vpc_id
def to_s
"AWS ELB #{elb_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:elb_name],
allowed_scalar_name: :elb_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide a elb_name to aws_elb."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
begin
lbs = backend.describe_load_balancers(load_balancer_names: [elb_name]).load_balancer_descriptions
@exists = true
# Load balancer names are uniq; we will either have 0 or 1 result
unpack_describe_elbs_response(lbs.first)
rescue Aws::ElasticLoadBalancing::Errors::LoadBalancerNotFound
@exists = false
populate_as_missing
end
end
def unpack_describe_elbs_response(lb_struct)
@availability_zones = lb_struct.availability_zones
@dns_name = lb_struct.dns_name
@external_ports = lb_struct.listener_descriptions.map { |ld| ld.listener.load_balancer_port }
@instance_ids = lb_struct.instances.map(&:instance_id)
@internal_ports = lb_struct.listener_descriptions.map { |ld| ld.listener.instance_port }
@elb_name = lb_struct.load_balancer_name
@security_group_ids = lb_struct.security_groups
@subnet_ids = lb_struct.subnets
@vpc_id = lb_struct.vpc_id
end
def populate_as_missing
@availability_zones = []
@external_ports = []
@instance_ids = []
@internal_ports = []
@security_group_ids = []
@subnet_ids = []
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::ElasticLoadBalancing::Client
def describe_load_balancers(query = {})
aws_service_client.describe_load_balancers(query)
end
end
end
end

View file

@ -1,84 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-elasticloadbalancing"
class AwsElbs < Inspec.resource(1)
name "aws_elbs"
desc "Verifies settings for AWS ELBs (classic Elastic Load Balancers) in bulk"
example <<~EXAMPLE
describe aws_elbs do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_elbs does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.add_accessor(:entries)
.add_accessor(:where)
.add(:exists?) { |table| !table.params.empty? }
.add(:count) { |table| table.params.count }
.add(:availability_zones, field: :availability_zones, style: :simple)
.add(:dns_names, field: :dns_name)
.add(:external_ports, field: :external_ports, style: :simple)
.add(:instance_ids, field: :instance_ids, style: :simple)
.add(:internal_ports, field: :internal_ports, style: :simple)
.add(:elb_names, field: :elb_name)
.add(:security_group_ids, field: :security_group_ids, style: :simple)
.add(:subnet_ids, field: :subnet_ids, style: :simple)
.add(:vpc_ids, field: :vpc_id, style: :simple)
filter.connect(self, :table)
def to_s
"AWS ELBs"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = {}
loop do
api_result = backend.describe_load_balancers(pagination_opts)
@table += unpack_describe_elbs_response(api_result.load_balancer_descriptions)
break unless api_result.next_marker
pagination_opts = { marker: api_result.next_marker }
end
end
def unpack_describe_elbs_response(load_balancers)
load_balancers.map do |lb_struct|
{
availability_zones: lb_struct.availability_zones,
dns_name: lb_struct.dns_name,
external_ports: lb_struct.listener_descriptions.map { |ld| ld.listener.load_balancer_port },
instance_ids: lb_struct.instances.map(&:instance_id),
internal_ports: lb_struct.listener_descriptions.map { |ld| ld.listener.instance_port },
elb_name: lb_struct.load_balancer_name,
security_group_ids: lb_struct.security_groups,
subnet_ids: lb_struct.subnets,
vpc_id: lb_struct.vpc_id,
}
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::ElasticLoadBalancing::Client
def describe_load_balancers(query = {})
aws_service_client.describe_load_balancers(query)
end
end
end
end

View file

@ -1,106 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsFlowLog < Inspec.resource(1)
name "aws_flow_log"
supports platform: "aws"
desc "This resource is used to test the attributes of a Flow Log."
example <<~EXAMPLE
describe aws_flow_log('fl-9c718cf5') do
it { should exist }
end
EXAMPLE
include AwsSingularResourceMixin
def to_s
"AWS Flow Log #{id}"
end
def resource_type
case @resource_id
when /^eni/
@resource_type = "eni"
when /^subnet/
@resource_type = "subnet"
when /^vpc/
@resource_type = "vpc"
end
end
def attached_to_eni?
resource_type.eql?("eni") ? true : false
end
def attached_to_subnet?
resource_type.eql?("subnet") ? true : false
end
def attached_to_vpc?
resource_type.eql?("vpc") ? true : false
end
attr_reader :log_group_name, :resource_id, :flow_log_id
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{flow_log_id subnet_id vpc_id},
allowed_scalar_name: :flow_log_id,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError,
"aws_flow_log requires a parameter: flow_log_id, subnet_id, or vpc_id"
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
resp = backend.describe_flow_logs(filter_args)
flow_log = resp.to_h[:flow_logs].first
@exists = !flow_log.nil?
unless flow_log.nil?
@log_group_name = flow_log[:log_group_name]
@resource_id = flow_log[:resource_id]
@flow_log_id = flow_log[:flow_log_id]
end
end
def filter_args
if @flow_log_id
{ filter: [{ name: "flow-log-id", values: [@flow_log_id] }] }
elsif @subnet_id || @vpc_id
filter = @subnet_id || @vpc_id
{ filter: [{ name: "resource-id", values: [filter] }] }
end
end
def id
return @flow_log_id if @flow_log_id
return @subnet_id if @subnet_id
return @vpc_id if @vpc_id
end
def backend
BackendFactory.create(inspec_runner)
end
class Backend
class AwsClientApi < AwsBackendBase
AwsFlowLog::BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_flow_logs(query)
aws_service_client.describe_flow_logs(query)
end
end
end
end

View file

@ -1,112 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamAccessKey < Inspec.resource(1)
name "aws_iam_access_key"
desc "Verifies settings for an individual IAM access key"
example <<~EXAMPLE
describe aws_iam_access_key(username: 'username', id: 'access-key id') do
it { should exist }
it { should_not be_active }
its('create_date') { should be > Time.now - 365 * 86400 }
its('last_used_date') { should be > Time.now - 90 * 86400 }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :access_key_id, :create_date, :status, :username
alias id access_key_id
def validate_params(raw_params)
recognized_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{username id access_key_id},
allowed_scalar_name: :access_key_id,
allowed_scalar_type: String
)
# id and access_key_id are aliases; standardize on access_key_id
recognized_params[:access_key_id] = recognized_params.delete(:id) if recognized_params.key?(:id)
# Validate format of access_key_id
if recognized_params[:access_key_id] &&
recognized_params[:access_key_id] !~ (/^AKIA[0-9A-Z]{16}$/)
raise ArgumentError, "Incorrect format for Access Key ID - expected AKIA followed " \
"by 16 letters or numbers"
end
# One of username and access_key_id is required
if recognized_params[:username].nil? && recognized_params[:access_key_id].nil?
raise ArgumentError, "You must provide at lease one of access_key_id or username to aws_iam_access_key"
end
recognized_params
end
def active?
return nil unless exists?
status == "Active"
end
def to_s
"IAM Access-Key #{access_key_id}"
end
def last_used_date
return nil unless exists?
return @last_used_date if defined? @last_used_date
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
@last_used_date = backend.get_access_key_last_used({ access_key_id: access_key_id }).access_key_last_used.last_used_date
end
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
query = {}
query[:user_name] = username if username
response = backend.list_access_keys(query)
access_keys = response.access_key_metadata.select do |key|
if access_key_id
key.access_key_id == access_key_id
else
true
end
end
if access_keys.empty?
@exists = false
return
end
if access_keys.count > 1
raise "More than one access key matched for aws_iam_access_key. Use more specific parameters, such as access_key_id."
end
@exists = true
@access_key_id = access_keys[0].access_key_id
@username = access_keys[0].user_name
@create_date = access_keys[0].create_date
@status = access_keys[0].status
# Last used date is lazily loaded, separate API call
rescue Aws::IAM::Errors::NoSuchEntity
@exists = false
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def list_access_keys(query)
aws_service_client.list_access_keys(query)
end
end
end
end

View file

@ -1,153 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamAccessKeys < Inspec.resource(1)
name "aws_iam_access_keys"
desc "Verifies settings for AWS IAM Access Keys in bulk"
example <<~EXAMPLE
describe aws_iam_access_keys do
it { should_not exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(raw_params)
recognized_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{username id access_key_id created_date},
allowed_scalar_name: :access_key_id,
allowed_scalar_type: String
)
# id and access_key_id are aliases; standardize on access_key_id
recognized_params[:access_key_id] = recognized_params.delete(:id) if recognized_params.key?(:id)
if recognized_params[:access_key_id] &&
recognized_params[:access_key_id] !~ (/^AKIA[0-9A-Z]{16}$/)
raise "Incorrect format for Access Key ID - expected AKIA followed " \
"by 16 letters or numbers"
end
recognized_params
end
def fetch_from_api
# TODO: this interface should be normalized to match the AWS API
criteria = {}
criteria[:username] = @username if defined? @username
@table = BackendFactory.create(inspec_runner).fetch(criteria)
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:access_key_ids, field: :access_key_id)
.register_column(:created_date, field: :create_date)
.register_column(:created_days_ago, field: :created_days_ago)
.register_column(:created_with_user, field: :created_with_user)
.register_column(:created_hours_ago, field: :created_hours_ago)
.register_column(:usernames, field: :username)
.register_column(:active, field: :active)
.register_column(:inactive, field: :inactive)
.register_column(:last_used_date, field: :last_used_date)
.register_column(:last_used_hours_ago, field: :last_used_hours_ago)
.register_column(:last_used_days_ago, field: :last_used_days_ago)
.register_column(:ever_used, field: :ever_used)
.register_column(:never_used, field: :never_used)
.register_column(:user_created_date, field: :user_created_date)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"IAM Access Keys"
end
# Internal support class. This is used to fetch
# the users and access keys. We have an abstract
# class with a concrete AWS implementation provided here;
# a few mock implementations are also provided in the unit tests.
class Backend
# Implementation of AccessKeyProvider which operates by looping over
# all users, then fetching their access keys.
# TODO: An alternate, more scalable implementation could be made
# using the Credential Report.
class AwsUserIterator < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def fetch(criteria)
iam_client = aws_service_client
user_details = {}
if criteria.key?(:username)
begin
user_details[criteria[:username]] = iam_client.get_user(user_name: criteria[:username]).user
rescue Aws::IAM::Errors::NoSuchEntity # rubocop:disable Lint/HandleExceptions
# Swallow - a miss on search results should return an empty table
end
else
pagination_opts = {}
loop do
api_result = iam_client.list_users(pagination_opts)
api_result.users.each do |info|
user_details[info.user_name] = info
end
break unless api_result.is_truncated
pagination_opts[:marker] = api_result.marker
end
end
access_key_data = []
user_details.each_key do |username|
user_keys = iam_client.list_access_keys(user_name: username)
.access_key_metadata
user_keys = user_keys.map do |metadata|
{
access_key_id: metadata.access_key_id,
username: username,
status: metadata.status,
create_date: metadata.create_date, # DateTime.parse(metadata.create_date),
}
end
# Copy in from user data
# Synthetics
user_keys.each do |key_info|
add_synthetic_fields(key_info, user_details[username])
end
access_key_data.concat(user_keys)
rescue Aws::IAM::Errors::NoSuchEntity # rubocop:disable Lint/HandleExceptions
# Swallow - a miss on search results should return an empty table
end
access_key_data
end
def add_synthetic_fields(key_info, user_details) # rubocop:disable Metrics/AbcSize
key_info[:id] = key_info[:access_key_id]
key_info[:active] = key_info[:status] == "Active"
key_info[:inactive] = key_info[:status] != "Active"
key_info[:created_hours_ago] = ((Time.now - key_info[:create_date]) / (60 * 60)).to_i
key_info[:created_days_ago] = (key_info[:created_hours_ago] / 24).to_i
key_info[:user_created_date] = user_details[:create_date]
key_info[:created_with_user] = (key_info[:create_date] - key_info[:user_created_date]).abs < 1.0 / 24.0
# Last used is a separate API call
iam_client = aws_service_client
last_used =
iam_client.get_access_key_last_used(access_key_id: key_info[:access_key_id])
.access_key_last_used.last_used_date
key_info[:ever_used] = !last_used.nil?
key_info[:never_used] = last_used.nil?
key_info[:last_used_time] = last_used
return unless last_used
key_info[:last_used_hours_ago] = ((Time.now - last_used) / (60 * 60)).to_i
key_info[:last_used_days_ago] = (key_info[:last_used_hours_ago] / 24).to_i
end
end
end
end

View file

@ -1,62 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamGroup < Inspec.resource(1)
name "aws_iam_group"
desc "Verifies settings for AWS IAM Group"
example <<~EXAMPLE
describe aws_iam_group('mygroup') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :group_name, :users
def to_s
"IAM Group #{group_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:group_name],
allowed_scalar_name: :group_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide a group_name to aws_iam_group."
end
validated_params
end
def fetch_from_api
backend = AwsIamGroup::BackendFactory.create(inspec_runner)
begin
resp = backend.get_group(group_name: group_name)
@exists = true
@aws_group_struct = resp[:group]
@users = resp[:users].map(&:user_name)
rescue Aws::IAM::Errors::NoSuchEntity
@exists = false
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def get_group(query)
aws_service_client.get_group(query)
end
end
end
end

View file

@ -1,56 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamGroups < Inspec.resource(1)
name "aws_iam_groups"
desc "Verifies settings for AWS IAM groups in bulk"
example <<~EXAMPLE
describe aws_iam_groups do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_iam_groups does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_column(:group_names, field: :group_name)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"IAM Groups"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = {}
loop do
api_result = backend.list_groups(pagination_opts)
@table += api_result.groups.map(&:to_h)
pagination_opts = { marker: api_result.marker }
break unless api_result.is_truncated
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def list_groups(query = {})
aws_service_client.list_groups(query)
end
end
end
end

View file

@ -1,121 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamPasswordPolicy < Inspec.resource(1)
name "aws_iam_password_policy"
desc "Verifies iam password policy"
example <<~EXAMPLE
describe aws_iam_password_policy do
its('requires_lowercase_characters?') { should be true }
end
describe aws_iam_password_policy do
its('requires_uppercase_characters?') { should be true }
end
EXAMPLE
supports platform: "aws"
# TODO: rewrite to avoid direct injection, match other resources, use AwsSingularResourceMixin
def initialize(conn = nil)
catch_aws_errors do
if conn
# We're in a mocked unit test.
@policy = conn.iam_resource.account_password_policy
else
# Don't use the resource approach. It's a CRUD operation
# - if the policy does not exist, you get back a blank object to populate and save.
# Using the Client will throw an exception if no policy exists.
@policy = inspec_runner.backend.aws_client(Aws::IAM::Client).get_account_password_policy.password_policy
end
rescue Aws::IAM::Errors::NoSuchEntity
@policy = nil
end
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_resource_mixin.rb
def catch_aws_errors
yield
rescue Aws::Errors::MissingCredentialsError
# The AWS error here is unhelpful:
# "unable to sign request without credentials set"
Inspec::Log.error "It appears that you have not set your AWS credentials. You may set them using environment variables, or using the 'aws://region/aws_credentials_profile' target. See https://docs.chef.io/inspec/platforms/ for details."
fail_resource("No AWS credentials available")
rescue Aws::Errors::ServiceError => e
fail_resource e.message
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_singular_resource_mixin.rb
def inspec_runner
# When running under inspec-cli, we have an 'inspec' method that
# returns the runner. When running under unit tests, we don't
# have that, but we still have to call this to pass something
# (nil is OK) to the backend.
# TODO: remove with https://github.com/chef/inspec-aws/issues/216
# TODO: remove after rewrite to include AwsSingularResource
inspec if respond_to?(:inspec)
end
def to_s
"IAM Password-Policy"
end
def exists?
!@policy.nil?
end
#-------------------------- Properties ----------------------------#
def minimum_password_length
@policy.minimum_password_length
end
def max_password_age_in_days
raise "this policy does not expire passwords" unless expire_passwords?
@policy.max_password_age
end
def number_of_passwords_to_remember
raise "this policy does not prevent password reuse" \
unless prevent_password_reuse?
@policy.password_reuse_prevention
end
#-------------------------- Matchers ----------------------------#
%i{
require_lowercase_characters
require_uppercase_characters
require_symbols
require_numbers
expire_passwords
}.each do |matcher_stem|
# Create our predicates (for example, 'require_symbols?')
stem_with_question_mark = (matcher_stem.to_s + "?").to_sym
define_method stem_with_question_mark do
@policy.send(matcher_stem)
end
# RSpec will expose that as (for example) `be_require_symbols`.
# To undo that, we have to make a matcher alias.
stem_with_be = ("be_" + matcher_stem.to_s).to_sym
RSpec::Matchers.alias_matcher matcher_stem, stem_with_be
end
# This one has an awkward name mapping
def allow_users_to_change_passwords?
@policy.allow_users_to_change_password
end
RSpec::Matchers.alias_matcher :allow_users_to_change_passwords, :be_allow_users_to_change_passwords
# This one has custom logic and renaming
def prevent_password_reuse?
!@policy.password_reuse_prevention.nil?
end
RSpec::Matchers.alias_matcher :prevent_password_reuse, :be_prevent_password_reuse
end

View file

@ -1,57 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamPolicies < Inspec.resource(1)
name "aws_iam_policies"
desc "Verifies settings for AWS IAM Policies in bulk"
example <<~EXAMPLE
describe aws_iam_policies do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_iam_policies does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:policy_names, field: :policy_name)
.register_column(:arns, field: :arn)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"IAM Policies"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = {}
loop do
api_result = backend.list_policies(pagination_opts)
@table += api_result.policies.map(&:to_h)
pagination_opts = { marker: api_result.marker }
break unless api_result.is_truncated
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def list_policies(query)
aws_service_client.list_policies(query)
end
end
end
end

View file

@ -1,311 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
require "json" unless defined?(JSON)
require "set" unless defined?(Set)
require "uri" unless defined?(URI)
class AwsIamPolicy < Inspec.resource(1)
name "aws_iam_policy"
desc "Verifies settings for individual AWS IAM Policy"
example <<~EXAMPLE
describe aws_iam_policy('AWSSupportAccess') do
it { should be_attached }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :arn, :attachment_count, :default_version_id
# Note that we also accept downcases and symbol versions of these
EXPECTED_CRITERIA = %w{
Action
Effect
Resource
Sid
}.freeze
UNIMPLEMENTED_CRITERIA = %w{
Conditional
NotAction
NotPrincipal
NotResource
Principal
}.freeze
def to_s
"Policy #{@policy_name}"
end
def attached?
attachment_count > 0
end
def attached_users
return @attached_users if defined? @attached_users
fetch_attached_entities
@attached_users
end
def attached_groups
return @attached_groups if defined? @attached_groups
fetch_attached_entities
@attached_groups
end
def attached_roles
return @attached_roles if defined? @attached_roles
fetch_attached_entities
@attached_roles
end
def attached_to_user?(user_name)
attached_users.include?(user_name)
end
def attached_to_group?(group_name)
attached_groups.include?(group_name)
end
def attached_to_role?(role_name)
attached_roles.include?(role_name)
end
def policy
return nil unless exists?
return @policy if defined?(@policy)
catch_aws_errors do
backend = BackendFactory.create(inspec_runner)
gpv_response = backend.get_policy_version(policy_arn: arn, version_id: default_version_id)
@policy = JSON.parse(URI.decode_www_form_component(gpv_response.policy_version.document))
end
@policy
end
def statement_count
return nil unless exists?
# Typically it is an array of statements
if policy["Statement"].is_a? Array
policy["Statement"].count
else
# But if there is one statement, it is permissable to degenerate the array,
# and place the statement as a hash directly under the 'Statement' key
1
end
end
def has_statement?(provided_criteria = {})
return nil unless exists?
raw_criteria = provided_criteria.dup # provided_criteria is used for output formatting - can't delete from it.
criteria = has_statement__validate_criteria(raw_criteria)
@normalized_statements ||= has_statement__normalize_statements
statements = has_statement__focus_on_sid(@normalized_statements, criteria)
statements.any? do |statement|
true && \
has_statement__effect(statement, criteria) && \
has_statement__array_criterion(:action, statement, criteria) && \
has_statement__array_criterion(:resource, statement, criteria)
end
end
private
def has_statement__validate_criteria(raw_criteria)
recognized_criteria = {}
EXPECTED_CRITERIA.each do |expected_criterion|
[
expected_criterion,
expected_criterion.downcase,
expected_criterion.to_sym,
expected_criterion.downcase.to_sym,
].each do |variant|
if raw_criteria.key?(variant)
# Always store as downcased symbol
recognized_criteria[expected_criterion.downcase.to_sym] = raw_criteria.delete(variant)
end
end
end
# Special message for valid, but unimplemented statement attributes
UNIMPLEMENTED_CRITERIA.each do |unimplemented_criterion|
[
unimplemented_criterion,
unimplemented_criterion.downcase,
unimplemented_criterion.to_sym,
unimplemented_criterion.downcase.to_sym,
].each do |variant|
if raw_criteria.key?(variant)
raise ArgumentError, "Criterion '#{unimplemented_criterion}' is not supported for performing have_statement queries."
end
end
end
# If anything is left, it's spurious
unless raw_criteria.empty?
raise ArgumentError, "Unrecognized criteria #{raw_criteria.keys.join(", ")} to have_statement. Recognized criteria: #{EXPECTED_CRITERIA.join(", ")}"
end
# Effect has only 2 permitted values
if recognized_criteria.key?(:effect)
unless %w{Allow Deny}.include?(recognized_criteria[:effect])
raise ArgumentError, "Criterion 'Effect' for have_statement must be one of 'Allow' or 'Deny' - got '#{recognized_criteria[:effect]}'"
end
end
recognized_criteria
end
def has_statement__normalize_statements
# Some single-statement policies place their statement
# directly in policy['Statement'], rather than in an
# Array within it. See arn:aws:iam::aws:policy/AWSCertificateManagerReadOnly
# Thus, coerce to Array.
policy["Statement"] = [policy["Statement"]] if policy["Statement"].is_a? Hash
policy["Statement"].map do |statement|
# Coerce some values into arrays
%w{Action Resource}.each do |field|
if statement.key?(field)
statement[field] = Array(statement[field])
end
end
# Symbolize all keys
statement.keys.each do |field|
statement[field.downcase.to_sym] = statement.delete(field)
end
statement
end
end
def has_statement__focus_on_sid(statements, criteria)
return statements unless criteria.key?(:sid)
sid_seek = criteria[:sid]
statements.select do |statement|
if sid_seek.is_a? Regexp
statement[:sid] =~ sid_seek
else
statement[:sid] == sid_seek
end
end
end
def has_statement__effect(statement, criteria)
!criteria.key?(:effect) || criteria[:effect] == statement[:effect]
end
def has_statement__array_criterion(crit_name, statement, criteria)
return true unless criteria.key?(crit_name)
check = criteria[crit_name]
# This is an array due to normalize_statements
# If it is nil, the statement does not have an entry for that dimension;
# but since we were asked to match on it (on nothing), we
# decide to never match
values = statement[crit_name]
return false if values.nil?
if check.is_a?(String)
# If check is a string, it only has to match one of the values
values.any? { |v| v == check }
elsif check.is_a?(Regexp)
# If check is a regex, it only has to match one of the values
values.any? { |v| v =~ check }
elsif check.is_a?(Array) && check.all? { |c| c.is_a? String }
# If check is an array of strings, perform setwise check
Set.new(values) == Set.new(check)
elsif check.is_a?(Array) && check.all? { |c| c.is_a? Regexp }
# If check is an array of regexes, all values must match all regexes
values.all? { |v| check.all? { |r| v =~ r } }
else
false
end
end
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:policy_name],
allowed_scalar_name: :policy_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide the parameter 'policy_name' to aws_iam_policy."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
policy = nil
pagination_opts = { max_items: 1000 }
loop do
api_result = backend.list_policies(pagination_opts)
policy = api_result.policies.detect do |p|
p.policy_name == @policy_name
end
break if policy # Found it!
break unless api_result.is_truncated # Not found and no more results
pagination_opts[:marker] = api_result.marker
end
@exists = !policy.nil?
return unless @exists
@arn = policy[:arn]
@default_version_id = policy[:default_version_id]
@attachment_count = policy[:attachment_count]
end
def fetch_attached_entities
unless @exists
@attached_groups = nil
@attached_users = nil
@attached_roles = nil
return
end
backend = AwsIamPolicy::BackendFactory.create(inspec_runner)
criteria = { policy_arn: arn }
resp = nil
catch_aws_errors do
resp = backend.list_entities_for_policy(criteria)
end
@attached_groups = resp.policy_groups.map(&:group_name)
@attached_users = resp.policy_users.map(&:user_name)
@attached_roles = resp.policy_roles.map(&:role_name)
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def get_policy_version(criteria)
aws_service_client.get_policy_version(criteria)
end
def list_policies(criteria)
aws_service_client.list_policies(criteria)
end
def list_entities_for_policy(criteria)
aws_service_client.list_entities_for_policy(criteria)
end
end
end
end

View file

@ -1,60 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamRole < Inspec.resource(1)
name "aws_iam_role"
desc "Verifies settings for an IAM Role"
example <<~EXAMPLE
describe aws_iam_role('my-role') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :description, :role_name
def to_s
"IAM Role #{role_name}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:role_name],
allowed_scalar_name: :role_name,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide a role_name to aws_iam_role."
end
validated_params
end
def fetch_from_api
role_info = nil
begin
role_info = BackendFactory.create(inspec_runner).get_role(role_name: role_name)
rescue Aws::IAM::Errors::NoSuchEntity
@exists = false
return
end
@exists = true
@description = role_info.role.description
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def get_role(query)
aws_service_client.get_role(query)
end
end
end
end

View file

@ -1,82 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamRootUser < Inspec.resource(1)
name "aws_iam_root_user"
desc "Verifies settings for AWS root account"
example <<~EXAMPLE
describe aws_iam_root_user do
it { should have_access_key }
end
EXAMPLE
supports platform: "aws"
# TODO: rewrite to avoid direct injection, match other resources, use AwsSingularResourceMixin
def initialize(conn = nil)
@client = conn ? conn.iam_client : inspec_runner.backend.aws_client(Aws::IAM::Client)
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_resource_mixin.rb
def catch_aws_errors
yield
rescue Aws::Errors::MissingCredentialsError
# The AWS error here is unhelpful:
# "unable to sign request without credentials set"
Inspec::Log.error "It appears that you have not set your AWS credentials. You may set them using environment variables, or using the 'aws://region/aws_credentials_profile' target. See https://docs.chef.io/inspec/platforms/ for details."
fail_resource("No AWS credentials available")
rescue Aws::Errors::ServiceError => e
fail_resource e.message
end
# TODO: DRY up, see https://github.com/chef/inspec/issues/2633
# Copied from resource_support/aws/aws_singular_resource_mixin.rb
def inspec_runner
# When running under inspec-cli, we have an 'inspec' method that
# returns the runner. When running under unit tests, we don't
# have that, but we still have to call this to pass something
# (nil is OK) to the backend.
# TODO: remove with https://github.com/chef/inspec-aws/issues/216
# TODO: remove after rewrite to include AwsSingularResource
inspec if respond_to?(:inspec)
end
def has_access_key?
summary_account["AccountAccessKeysPresent"] == 1
end
def has_mfa_enabled?
summary_account["AccountMFAEnabled"] == 1
end
# if the root account has a Virtual MFA device then it will have a special
# serial number ending in 'root-account-mfa-device'
def has_virtual_mfa_enabled?
mfa_device_pattern = %r{arn:aws:iam::\d{12}:mfa\/root-account-mfa-device}
virtual_mfa_devices.any? { |d| mfa_device_pattern =~ d["serial_number"] }
end
def has_hardware_mfa_enabled?
has_mfa_enabled? && !has_virtual_mfa_enabled?
end
def to_s
"AWS Root-User"
end
private
def summary_account
catch_aws_errors do
@summary_account ||= @client.get_account_summary.summary_map
end
end
def virtual_mfa_devices
catch_aws_errors do
@__virtual_devices ||= @client.list_virtual_mfa_devices.virtual_mfa_devices
end
end
end

View file

@ -1,145 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamUser < Inspec.resource(1)
name "aws_iam_user"
desc "Verifies settings for AWS IAM user"
example <<~EXAMPLE
describe aws_iam_user(username: 'test_user') do
it { should have_mfa_enabled }
it { should_not have_console_password }
it { should_not have_inline_user_policies }
it { should_not have_attached_user_policies }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :access_keys, :attached_policy_names, :attached_policy_arns, \
:has_console_password, :has_mfa_enabled, :inline_policy_names, :username
alias has_mfa_enabled? has_mfa_enabled
alias has_console_password? has_console_password
def name
Inspec.deprecate(:properties_aws_iam_user, "The aws_iam_user `name` property is deprecated. Please use `username` instead")
username
end
def to_s
"IAM User #{username}"
end
def has_attached_policies?
return nil unless exists?
!attached_policy_names.empty?
end
def has_inline_policies?
return nil unless exists?
!inline_policy_names.empty?
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{username aws_user_struct name user},
allowed_scalar_name: :username,
allowed_scalar_type: String
)
# If someone passed :name, rename it to :username
if validated_params.key?(:name)
Inspec.deprecate(:properties_aws_iam_user, "The aws_iam_users `name` property is deprecated. Please use `username` instead")
validated_params[:username] = validated_params.delete(:name)
end
# If someone passed :user, rename it to :aws_user_struct
if validated_params.key?(:user)
Inspec.deprecate(:properties_aws_iam_user, "The aws_iam_users `user` property is deprecated. Please use `aws_user_struct` instead")
validated_params[:aws_user_struct] = validated_params.delete(:user)
end
if validated_params.empty?
raise ArgumentError, "You must provide a username to aws_iam_user."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@aws_user_struct ||= nil # silence unitialized warning
unless @aws_user_struct
begin
@aws_user_struct = backend.get_user(user_name: username)
rescue Aws::IAM::Errors::NoSuchEntity
@exists = false
@access_keys = []
@inline_policy_names = []
@attached_policy_arns = []
@attached_policy_names = []
return
end
end
# TODO: extract properties from aws_user_struct?
@exists = true
begin
_login_profile = backend.get_login_profile(user_name: username)
@has_console_password = true
# Password age also available here
rescue Aws::IAM::Errors::NoSuchEntity
@has_console_password = false
end
mfa_info = backend.list_mfa_devices(user_name: username)
@has_mfa_enabled = !mfa_info.mfa_devices.empty?
# TODO: consider returning InSpec AwsIamAccessKey objects
@access_keys = backend.list_access_keys(user_name: username).access_key_metadata
# If the above call fails, we get nil here; but we promise access_keys will be an array.
@access_keys ||= []
@inline_policy_names = backend.list_user_policies(user_name: username).policy_names
attached_policies = backend.list_attached_user_policies(user_name: username).attached_policies
@attached_policy_arns = attached_policies.map { |p| p[:policy_arn] }
@attached_policy_names = attached_policies.map { |p| p[:policy_name] }
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
def get_user(criteria)
aws_service_client.get_user(criteria)
end
def get_login_profile(criteria)
aws_service_client.get_login_profile(criteria)
end
def list_mfa_devices(criteria)
aws_service_client.list_mfa_devices(criteria)
end
def list_access_keys(criteria)
aws_service_client.list_access_keys(criteria)
end
def list_user_policies(criteria)
aws_service_client.list_user_policies(criteria)
end
def list_attached_user_policies(criteria)
aws_service_client.list_attached_user_policies(criteria)
end
end
end
end

View file

@ -1,160 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-iam"
class AwsIamUsers < Inspec.resource(1)
name "aws_iam_users"
desc "Verifies settings for AWS IAM users"
example <<~EXAMPLE
describe aws_iam_users.where(has_mfa_enabled?: false) do
it { should_not exist }
end
describe aws_iam_users.where(has_console_password?: true) do
it { should exist }
end
describe aws_iam_users.where(has_inline_policies?: true) do
it { should_not exist }
end
describe aws_iam_users.where(has_attached_policies?: true) do
it { should_not exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def self.lazy_get_login_profile(row, _criterion, table)
backend = BackendFactory.create(table.resource.inspec_runner)
begin
_login_profile = backend.get_login_profile(user_name: row[:user_name])
row[:has_console_password] = true
rescue Aws::IAM::Errors::NoSuchEntity
row[:has_console_password] = false
end
row[:has_console_password?] = row[:has_console_password]
end
def self.lazy_list_mfa_devices(row, _criterion, table)
backend = BackendFactory.create(table.resource.inspec_runner)
begin
aws_mfa_devices = backend.list_mfa_devices(user_name: row[:user_name])
row[:has_mfa_enabled] = !aws_mfa_devices.mfa_devices.empty?
rescue Aws::IAM::Errors::NoSuchEntity
row[:has_mfa_enabled] = false
end
row[:has_mfa_enabled?] = row[:has_mfa_enabled]
end
def self.lazy_list_user_policies(row, _criterion, table)
backend = BackendFactory.create(table.resource.inspec_runner)
row[:inline_policy_names] = backend.list_user_policies(user_name: row[:user_name]).policy_names
row[:has_inline_policies] = !row[:inline_policy_names].empty?
row[:has_inline_policies?] = row[:has_inline_policies]
end
def self.lazy_list_attached_policies(row, _criterion, table)
backend = BackendFactory.create(table.resource.inspec_runner)
attached_policies = backend.list_attached_user_policies(user_name: row[:user_name]).attached_policies
row[:has_attached_policies] = !attached_policies.empty?
row[:has_attached_policies?] = row[:has_attached_policies]
row[:attached_policy_names] = attached_policies.map { |p| p[:policy_name] }
row[:attached_policy_arns] = attached_policies.map { |p| p[:policy_arn] }
end
filter = FilterTable.create
# These are included on the initial fetch
filter.register_column(:usernames, field: :user_name)
.register_column(:username) { |res| res.entries.map { |row| row[:user_name] } } # We should deprecate this; plural resources get plural properties
.register_column(:password_ever_used?, field: :password_ever_used?)
.register_column(:password_never_used?, field: :password_never_used?)
.register_column(:password_last_used_days_ago, field: :password_last_used_days_ago)
# Remaining properties / criteria are handled lazily, grouped by fetcher
filter.register_column(:has_console_password?, field: :has_console_password?, lazy: method(:lazy_get_login_profile))
.register_column(:has_console_password, field: :has_console_password, lazy: method(:lazy_get_login_profile))
filter.register_column(:has_mfa_enabled?, field: :has_mfa_enabled?, lazy: method(:lazy_list_mfa_devices))
.register_column(:has_mfa_enabled, field: :has_mfa_enabled, lazy: method(:lazy_list_mfa_devices))
filter.register_column(:has_inline_policies?, field: :has_inline_policies?, lazy: method(:lazy_list_user_policies))
.register_column(:has_inline_policies, field: :has_inline_policies, lazy: method(:lazy_list_user_policies))
.register_column(:inline_policy_names, field: :inline_policy_names, style: :simple, lazy: method(:lazy_list_user_policies))
filter.register_column(:has_attached_policies?, field: :has_attached_policies?, lazy: method(:lazy_list_attached_policies))
.register_column(:has_attached_policies, field: :has_attached_policies, lazy: method(:lazy_list_attached_policies))
.register_column(:attached_policy_names, field: :attached_policy_names, style: :simple, lazy: method(:lazy_list_attached_policies))
.register_column(:attached_policy_arns, field: :attached_policy_arns, style: :simple, lazy: method(:lazy_list_attached_policies))
filter.install_filter_methods_on_resource(self, :table)
def validate_params(raw_params)
# No params yet
unless raw_params.empty?
raise ArgumentError, "aws_iam_users does not accept resource parameters"
end
raw_params
end
def fetch_from_api_paginated(backend)
table = []
page_marker = nil
loop do
api_result = backend.list_users(marker: page_marker)
table += api_result.users.map(&:to_h)
page_marker = api_result.marker
break unless api_result.is_truncated
end
table
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = fetch_from_api_paginated(backend)
@table.each do |user|
password_last_used = user[:password_last_used]
user[:password_ever_used?] = !password_last_used.nil?
user[:password_never_used?] = password_last_used.nil?
if user[:password_ever_used?]
user[:password_last_used_days_ago] = ((Time.now - password_last_used) / (24 * 60 * 60)).to_i
end
end
@table
end
def to_s
"IAM Users"
end
#===========================================================================#
# Backend Implementation
#===========================================================================#
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::IAM::Client
# TODO: delegate this out
def list_users(query = {})
aws_service_client.list_users(query)
end
def get_login_profile(query)
aws_service_client.get_login_profile(query)
end
def list_mfa_devices(query)
aws_service_client.list_mfa_devices(query)
end
def list_user_policies(query)
aws_service_client.list_user_policies(query)
end
def list_attached_user_policies(query)
aws_service_client.list_attached_user_policies(query)
end
end
end
end

View file

@ -1,100 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-kms"
class AwsKmsKey < Inspec.resource(1)
name "aws_kms_key"
desc "Verifies settings for an individual AWS KMS Key"
example <<~EXAMPLE
describe aws_kms_key('arn:aws:kms:us-east-1::key/4321dcba-21io-23de-85he-ab0987654321') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :key_id, :arn, :creation_date, :key_usage, :key_state, :description,
:deletion_date, :valid_to, :external, :has_key_expiration, :managed_by_aws,
:has_rotation_enabled, :enabled
# Use aliases for matchers
alias deletion_time deletion_date
alias invalidation_time valid_to
alias external? external
alias enabled? enabled
alias managed_by_aws? managed_by_aws
alias has_key_expiration? has_key_expiration
alias has_rotation_enabled? has_rotation_enabled
def to_s
"KMS Key #{@key_id}"
end
def created_days_ago
((Time.now - creation_date) / (24 * 60 * 60)).to_i unless creation_date.nil?
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:key_id],
allowed_scalar_name: :key_id,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide the parameter 'key_id' to aws_kms_key."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
query = { key_id: @key_id }
catch_aws_errors do
resp = backend.describe_key(query)
@exists = true
@key = resp.key_metadata.to_h
@key_id = @key[:key_id]
@arn = @key[:arn]
@creation_date = @key[:creation_date]
@enabled = @key[:enabled]
@description = @key[:description]
@key_usage = @key[:key_usage]
@key_state = @key[:key_state]
@deletion_date = @key[:deletion_date]
@valid_to = @key[:valid_to]
@external = @key[:origin] == "EXTERNAL"
@has_key_expiration = @key[:expiration_model] == "KEY_MATERIAL_EXPIRES"
@managed_by_aws = @key[:key_manager] == "AWS"
resp = backend.get_key_rotation_status(query)
@has_rotation_enabled = resp.key_rotation_enabled unless resp.empty?
rescue Aws::KMS::Errors::NotFoundException
@exists = false
return
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::KMS::Client
def describe_key(query)
aws_service_client.describe_key(query)
end
def get_key_rotation_status(query)
aws_service_client.get_key_rotation_status(query)
end
end
end
end

View file

@ -1,58 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-kms"
class AwsKmsKeys < Inspec.resource(1)
name "aws_kms_keys"
desc "Verifies settings for AWS KMS Keys in bulk"
example <<~EXAMPLE
describe aws_kms_keys do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_kms_keys does not accept resource parameters."
end
resource_params
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:key_arns, field: :key_arn)
.register_column(:key_ids, field: :key_id)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"KMS Keys"
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = { limit: 1000 }
loop do
api_result = backend.list_keys(pagination_opts)
@table += api_result.keys.map(&:to_h)
break unless api_result.truncated
pagination_opts = { marker: api_result.next_marker }
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::KMS::Client
def list_keys(query = {})
aws_service_client.list_keys(query)
end
end
end
end

View file

@ -1,74 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-rds"
class AwsRdsInstance < Inspec.resource(1)
name "aws_rds_instance"
desc "Verifies settings for an rds instance"
example <<~EXAMPLE
describe aws_rds_instance(db_instance_identifier: 'test-instance-id') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :db_instance_identifier
def to_s
"RDS Instance #{@db_instance_identifier}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:db_instance_identifier],
allowed_scalar_name: :db_instance_identifier,
allowed_scalar_type: String
)
if validated_params.empty? || !validated_params.key?(:db_instance_identifier)
raise ArgumentError, "You must provide an id for the aws_rds_instance."
end
if validated_params.key?(:db_instance_identifier) && validated_params[:db_instance_identifier] !~ /^[a-z]{1}[0-9a-z\-]{0,62}$/
raise ArgumentError, "aws_rds_instance Database Instance ID must be in the format: start with a letter followed by up to 62 letters/numbers/hyphens."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
dsg_response = nil
catch_aws_errors do
dsg_response = backend.describe_db_instances(db_instance_identifier: db_instance_identifier)
@exists = true
rescue Aws::RDS::Errors::DBInstanceNotFound
@exists = false
return
end
if dsg_response.db_instances.empty?
@exists = false
return
end
@db_instance_identifier = dsg_response.db_instances[0].db_instance_identifier
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::RDS::Client
def describe_db_instances(query)
aws_service_client.describe_db_instances(query)
end
end
end
end

View file

@ -1,67 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsRouteTable < Inspec.resource(1)
name "aws_route_table"
desc "Verifies settings for an AWS Route Table"
example <<~EXAMPLE
describe aws_route_table do
its('route_table_id') { should cmp 'rtb-05462d2278326a79c' }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
def to_s
"Route Table #{@route_table_id}"
end
attr_reader :route_table_id, :vpc_id
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:route_table_id],
allowed_scalar_name: :route_table_id,
allowed_scalar_type: String
)
if validated_params.key?(:route_table_id) &&
validated_params[:route_table_id] !~ /^rtb\-([0-9a-f]{17})|(^rtb\-[0-9a-f]{8})$/
raise ArgumentError,
"aws_route_table Route Table ID must be in the" \
' format "rtb-" followed by 8 or 17 hexadecimal characters.'
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
if @route_table_id.nil?
args = nil
else
args = { filters: [{ name: "route-table-id", values: [@route_table_id] }] }
end
resp = backend.describe_route_tables(args)
routetable = resp.to_h[:route_tables]
@exists = !routetable.empty?
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_route_tables(query)
aws_service_client.describe_route_tables(query)
end
end
end
end

View file

@ -1,64 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsRouteTables < Inspec.resource(1)
name "aws_route_tables"
desc "Verifies settings for AWS Route Tables in bulk"
example <<~EXAMPLE
describe aws_route_tables do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:vpc_ids, field: :vpc_id)
.register_column(:route_table_ids, field: :route_table_id)
filter.install_filter_methods_on_resource(self, :routes_data)
def routes_data
@table
end
def to_s
"Route Tables"
end
private
def validate_params(raw_criteria)
unless raw_criteria.is_a? Hash
raise "Unrecognized criteria for fetching Route Tables. " \
"Use 'criteria: value' format."
end
# No criteria yet
unless raw_criteria.empty?
raise ArgumentError, "aws_route_tables does not currently accept resource parameters."
end
raw_criteria
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
@table = backend.describe_route_tables({}).to_h[:route_tables]
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::EC2::Client
def describe_route_tables(query = {})
aws_service_client.describe_route_tables(query)
end
end
end
end

View file

@ -1,141 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-s3"
class AwsS3Bucket < Inspec.resource(1)
name "aws_s3_bucket"
desc "Verifies settings for a s3 bucket"
example <<~EXAMPLE
describe aws_s3_bucket(bucket_name: 'test_bucket') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :bucket_name, :has_default_encryption_enabled, :has_access_logging_enabled, :region
def to_s
"S3 Bucket #{@bucket_name}"
end
def bucket_acl
catch_aws_errors do
@bucket_acl ||= BackendFactory.create(inspec_runner).get_bucket_acl(bucket: bucket_name).grants
end
end
def bucket_policy
@bucket_policy ||= fetch_bucket_policy
end
# RSpec will alias this to be_public
def public?
# first line just for formatting
false || \
bucket_acl.any? { |g| g.grantee.type == "Group" && g.grantee.uri =~ /AllUsers/ } || \
bucket_acl.any? { |g| g.grantee.type == "Group" && g.grantee.uri =~ /AuthenticatedUsers/ } || \
bucket_policy.any? { |s| s.effect == "Allow" && s.principal == "*" }
end
def has_default_encryption_enabled?
return false unless @exists
@has_default_encryption_enabled ||= fetch_bucket_encryption_configuration
end
def has_access_logging_enabled?
return false unless @exists
catch_aws_errors do
@has_access_logging_enabled ||= !BackendFactory.create(inspec_runner).get_bucket_logging(bucket: bucket_name).logging_enabled.nil?
end
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:bucket_name],
allowed_scalar_name: :bucket_name,
allowed_scalar_type: String
)
if validated_params.empty? || !validated_params.key?(:bucket_name)
raise ArgumentError, "You must provide a bucket_name to aws_s3_bucket."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
# Since there is no basic "get_bucket" API call, use the
# region fetch as the existence check.
begin
@region = backend.get_bucket_location(bucket: bucket_name).location_constraint
rescue Aws::S3::Errors::NoSuchBucket
@exists = false
return
end
@exists = true
end
def fetch_bucket_policy
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
# AWS SDK returns a StringIO, we have to read()
raw_policy = backend.get_bucket_policy(bucket: bucket_name).policy
return JSON.parse(raw_policy.read)["Statement"].map do |statement|
lowercase_hash = {}
statement.each_key { |k| lowercase_hash[k.downcase] = statement[k] }
@bucket_policy = OpenStruct.new(lowercase_hash)
end
rescue Aws::S3::Errors::NoSuchBucketPolicy
@bucket_policy = []
end
end
def fetch_bucket_encryption_configuration
@has_default_encryption_enabled ||= catch_aws_errors do
!BackendFactory.create(inspec_runner)
.get_bucket_encryption(bucket: bucket_name)
.server_side_encryption_configuration
.nil?
rescue Aws::S3::Errors::ServerSideEncryptionConfigurationNotFoundError
false
end
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::S3::Client
def get_bucket_acl(query)
aws_service_client.get_bucket_acl(query)
end
def get_bucket_location(query)
aws_service_client.get_bucket_location(query)
end
def get_bucket_policy(query)
aws_service_client.get_bucket_policy(query)
end
def get_bucket_logging(query)
aws_service_client.get_bucket_logging(query)
end
def get_bucket_encryption(query)
aws_service_client.get_bucket_encryption(query)
end
end
end
end

View file

@ -1,87 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-s3"
class AwsS3BucketObject < Inspec.resource(1)
name "aws_s3_bucket_object"
desc "Verifies settings for a s3 bucket object"
example <<~EXAMPLE
describe aws_s3_bucket_object(bucket_name: 'bucket_name', key: 'file_name') do
it { should exist }
it { should_not be_public }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :bucket_name, :key
def to_s
# keep the format that aws uses.
"s3://#{@bucket_name}/#{@key}"
end
def object_acl
return @object_acl if defined? @object_acl
catch_aws_errors do
@object_acl = BackendFactory.create(inspec_runner).get_object_acl(bucket: bucket_name, key: key).grants
end
@object_acl
end
# RSpec will alias this to be_public
def public?
# first line just for formatting
false || \
object_acl.any? { |g| g.grantee.type == "Group" && g.grantee.uri =~ /AllUsers/ } || \
object_acl.any? { |g| g.grantee.type == "Group" && g.grantee.uri =~ /AuthenticatedUsers/ }
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{bucket_name key id}
)
if validated_params.empty? || !validated_params.key?(:bucket_name) || !validated_params.key?(:key)
raise ArgumentError, "You must provide a bucket_name and key to aws_s3_bucket_object."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
# Just use get_object to detect if the bucket exists
backend.get_object(bucket: bucket_name, key: key)
rescue Aws::S3::Errors::NoSuchBucket
@exists = false
return
rescue Aws::S3::Errors::NoSuchKey
@exists = false
return
end
@exists = true
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::S3::Client
# Used to detect if object exists
def get_object(query)
aws_service_client.get_object(query)
end
def get_object_acl(query)
aws_service_client.get_object_acl(query)
end
end
end
end

View file

@ -1,52 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-s3"
class AwsS3Buckets < Inspec.resource(1)
name "aws_s3_buckets"
desc "Verifies settings for AWS S3 Buckets in bulk"
example <<~EXAMPLE
describe aws_s3_bucket do
its('bucket_names') { should eq ['my_bucket'] }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:bucket_names, field: :name)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"S3 Buckets"
end
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_s3_buckets does not accept resource parameters."
end
resource_params
end
private
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = backend.list_buckets.buckets.map(&:to_h)
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::S3::Client
def list_buckets
aws_service_client.list_buckets
end
end
end
end

View file

@ -1,314 +0,0 @@
require "set" unless defined?(Set)
require "ipaddr" unless defined?(IPAddr)
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsSecurityGroup < Inspec.resource(1)
name "aws_security_group"
desc "Verifies settings for an individual AWS Security Group."
example <<~EXAMPLE
describe aws_security_group('sg-12345678') do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :description, :group_id, :group_name, :vpc_id, :inbound_rules, :outbound_rules, :inbound_rules_count, :outbound_rules_count
def to_s
"EC2 Security Group #{@group_id}"
end
def allow_in?(criteria = {})
allow(inbound_rules, criteria.dup)
end
RSpec::Matchers.alias_matcher :allow_in, :be_allow_in
def allow_out?(criteria = {})
allow(outbound_rules, criteria.dup)
end
RSpec::Matchers.alias_matcher :allow_out, :be_allow_out
def allow_in_only?(criteria = {})
allow_only(inbound_rules, criteria.dup)
end
RSpec::Matchers.alias_matcher :allow_in_only, :be_allow_in_only
def allow_out_only?(criteria = {})
allow_only(outbound_rules, criteria.dup)
end
RSpec::Matchers.alias_matcher :allow_out_only, :be_allow_out_only
private
def allow_only(rules, criteria)
rules = allow__focus_on_position(rules, criteria)
# allow_{in_out}_only require either a single-rule group, or you
# to select a rule using position.
return false unless rules.count == 1 || criteria.key?(:position)
if criteria.key?(:security_group)
if criteria.key?(:position)
pos = criteria[:position] - 1
else
pos = 0
end
return false unless rules[pos].key?(:user_id_group_pairs) && rules[pos][:user_id_group_pairs].count == 1
end
criteria[:exact] = true
allow(rules, criteria)
end
def allow(rules, criteria)
criteria = allow__check_criteria(criteria)
rules = allow__focus_on_position(rules, criteria)
rules.any? do |rule|
matched = true
matched &&= allow__match_port(rule, criteria)
matched &&= allow__match_protocol(rule, criteria)
matched &&= allow__match_ipv4_range(rule, criteria)
matched &&= allow__match_ipv6_range(rule, criteria)
matched &&= allow__match_security_group(rule, criteria)
matched
end
end
def allow__check_criteria(raw_criteria)
allowed_criteria = [
:from_port,
:ipv4_range,
:ipv6_range,
:security_group,
:port,
:position,
:protocol,
:to_port,
:exact, # Internal
]
recognized_criteria = {}
allowed_criteria.each do |expected_criterion|
if raw_criteria.key?(expected_criterion)
recognized_criteria[expected_criterion] = raw_criteria.delete(expected_criterion)
end
end
# Any leftovers are unwelcome
unless raw_criteria.empty?
raise ArgumentError, "Unrecognized security group rule 'allow' criteria '#{raw_criteria.keys.join(",")}'. Expected criteria: #{allowed_criteria.join(", ")}"
end
recognized_criteria
end
def allow__focus_on_position(rules, criteria)
return rules unless criteria.key?(:position)
idx = criteria.delete(:position)
# Normalize to a zero-based numeric index
case # rubocop: disable Style/EmptyCaseCondition
when idx.is_a?(Symbol) && idx == :first
idx = 0
when idx.is_a?(Symbol) && idx == :last
idx = rules.count - 1
when idx.is_a?(String)
idx = idx.to_i - 1 # We document this as 1-based, so adjust to be zero-based.
when idx.is_a?(Numeric)
idx -= 1 # We document this as 1-based, so adjust to be zero-based.
else
raise ArgumentError, "aws_security_group 'allow' 'position' criteria must be an integer or the symbols :first or :last"
end
unless idx < rules.count
raise ArgumentError, "aws_security_group 'allow' 'position' criteria #{idx + 1} is out of range - there are only #{rules.count} rules for security group #{group_id}."
end
[rules[idx]]
end
def allow__match_port(rule, criteria) # rubocop: disable Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/AbcSize
if criteria[:exact] || criteria[:from_port] || criteria[:to_port]
# Exact match mode
# :port is shorthand for a single-valued port range.
criteria[:to_port] = criteria[:from_port] = criteria[:port] if criteria[:port]
to = criteria[:to_port]
from = criteria[:from_port]
# It's a match if neither criteria was specified
return true if to.nil? && from.nil?
# Normalize to integers
to = to.to_i unless to.nil?
from = from.to_i unless from.nil?
# It's a match if either was specified and the other was not
return true if rule[:to_port] == to && from.nil?
return true if rule[:from_port] == from && to.nil?
# Finally, both must match.
rule[:to_port] == to && rule[:from_port] == from
elsif !criteria[:port]
# port not specified, match anything
true
else
# Range membership mode
rule_from = rule[:from_port] || 0
rule_to = rule[:to_port] || 65535
(rule_from..rule_to).cover?(criteria[:port].to_i)
end
end
def allow__match_protocol(rule, criteria)
return true unless criteria.key?(:protocol)
prot = criteria[:protocol]
# We provide a "fluency alias" for -1 (any).
prot = "-1" if prot == "any"
rule[:ip_protocol] == prot
end
def match_ipv4_or_6_range(rule, criteria)
if criteria.key?(:ipv4_range)
query = criteria[:ipv4_range]
query = [query] unless query.is_a?(Array)
ranges = rule[:ip_ranges].map { |rng| rng[:cidr_ip] }
else # IPv6
query = criteria[:ipv6_range]
query = [query] unless query.is_a?(Array)
ranges = rule[:ipv_6_ranges].map { |rng| rng[:cidr_ipv_6] }
end
if criteria[:exact]
Set.new(query) == Set.new(ranges)
else
# CIDR subset mode
# "Each of the provided IP ranges must be a member of one of the rule's listed IP ranges"
query.all? do |candidate|
candidate = IPAddr.new(candidate)
ranges.any? do |range|
range = IPAddr.new(range)
range.include?(candidate)
end
end
end
end
def allow__match_ipv4_range(rule, criteria)
return true unless criteria.key?(:ipv4_range)
match_ipv4_or_6_range(rule, criteria)
end
def allow__match_ipv6_range(rule, criteria)
return true unless criteria.key?(:ipv6_range)
match_ipv4_or_6_range(rule, criteria)
end
def allow__match_security_group(rule, criteria)
return true unless criteria.key?(:security_group)
query = criteria[:security_group]
return false unless rule[:user_id_group_pairs]
rule[:user_id_group_pairs].any? { |group| query == group[:group_id] }
end
def validate_params(raw_params)
recognized_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: %i{id group_id group_name vpc_id},
allowed_scalar_name: :group_id,
allowed_scalar_type: String
)
# id is an alias for group_id
recognized_params[:group_id] = recognized_params.delete(:id) if recognized_params.key?(:id)
if recognized_params.key?(:group_id) && recognized_params[:group_id] !~ /^sg\-[0-9a-f]{8}/
raise ArgumentError, 'aws_security_group security group ID must be in the format "sg-" followed by 8 hexadecimal characters.'
end
if recognized_params.key?(:vpc_id) && recognized_params[:vpc_id] !~ /^vpc\-[0-9a-f]{8}/
raise ArgumentError, 'aws_security_group VPC ID must be in the format "vpc-" followed by 8 hexadecimal characters.'
end
validated_params = recognized_params
if validated_params.empty?
raise ArgumentError, "You must provide parameters to aws_security_group, such as group_name, group_id, or vpc_id.g_group."
end
validated_params
end
def count_sg_rules(ip_permissions)
rule_count = 0
ip_permissions.each do |ip_permission|
%i{ip_ranges ipv_6_ranges user_id_group_pairs}.each do |key|
if ip_permission.key? key
rule_count += ip_permission[key].length
end
end
end
rule_count
end
def fetch_from_api # rubocop: disable Metrics/AbcSize
backend = BackendFactory.create(inspec_runner)
# Transform into filter format expected by AWS
filters = []
%i{
description
group_id
group_name
vpc_id
}.each do |criterion_name|
instance_var = "@#{criterion_name}".to_sym
next unless instance_variable_defined?(instance_var)
val = instance_variable_get(instance_var)
next if val.nil?
filters.push(
{
name: criterion_name.to_s.tr("_", "-"),
values: [val],
}
)
end
dsg_response = backend.describe_security_groups(filters: filters)
if dsg_response.security_groups.empty?
@exists = false
@inbound_rules = []
@outbound_rules = []
return
end
@exists = true
@description = dsg_response.security_groups[0].description
@group_id = dsg_response.security_groups[0].group_id
@group_name = dsg_response.security_groups[0].group_name
@vpc_id = dsg_response.security_groups[0].vpc_id
@inbound_rules = dsg_response.security_groups[0].ip_permissions.map(&:to_h)
@inbound_rules_count = count_sg_rules(dsg_response.security_groups[0].ip_permissions.map(&:to_h))
@outbound_rules = dsg_response.security_groups[0].ip_permissions_egress.map(&:to_h)
@outbound_rules_count = count_sg_rules(dsg_response.security_groups[0].ip_permissions_egress.map(&:to_h))
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::EC2::Client
def describe_security_groups(query)
aws_service_client.describe_security_groups(query)
end
end
end
end

View file

@ -1,71 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsSecurityGroups < Inspec.resource(1)
name "aws_security_groups"
desc "Verifies settings for AWS Security Groups in bulk"
example <<~EXAMPLE
# Verify that you have security groups defined
describe aws_security_groups do
it { should exist }
end
# Verify you have more than the default security group
describe aws_security_groups do
its('entries.count') { should be > 1 }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:group_ids, field: :group_id)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"EC2 Security Groups"
end
private
def validate_params(raw_criteria)
unless raw_criteria.is_a? Hash
raise "Unrecognized criteria for fetching Security Groups. " \
"Use 'criteria: value' format."
end
# No criteria yet
unless raw_criteria.empty?
raise ArgumentError, "aws_ec2_security_groups does not currently accept resource parameters."
end
raw_criteria
end
def fetch_from_api
@table = []
backend = BackendFactory.create(inspec_runner)
backend.describe_security_groups({}).security_groups.each do |sg_info|
@table.push({
group_id: sg_info.group_id,
group_name: sg_info.group_name,
vpc_id: sg_info.vpc_id,
})
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::EC2::Client
def describe_security_groups(query)
aws_service_client.describe_security_groups(query)
end
end
end
end

View file

@ -1,82 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-sns"
class AwsSnsSubscription < Inspec.resource(1)
name "aws_sns_subscription"
desc "Verifies settings for an SNS Subscription"
example <<~EXAMPLE
describe aws_sns_subscription('arn:aws:sns:us-east-1::test-topic-01:b214aff5-a2c7-438f-a753-8494493f2ff6') do
it { should_not have_raw_message_delivery }
it { should be_confirmation_authenticated }
its('owner') { should cmp '12345678' }
its('topic_arn') { should cmp 'arn:aws:sns:us-east-1::test-topic-01' }
its('endpoint') { should cmp 'arn:aws:sqs:us-east-1::test-queue-01' }
its('protocol') { should cmp 'sqs' }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :arn, :owner, :raw_message_delivery, :topic_arn, :endpoint, :protocol,
:confirmation_was_authenticated, :aws_response
alias confirmation_authenticated? confirmation_was_authenticated
alias raw_message_delivery? raw_message_delivery
def has_raw_message_delivery?
raw_message_delivery
end
def to_s
"SNS Subscription #{@arn}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:subscription_arn],
allowed_scalar_name: :subscription_arn,
allowed_scalar_type: String
)
if validated_params.empty?
raise ArgumentError, "You must provide a subscription_arn to aws_sns_subscription."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
catch_aws_errors do
aws_response = backend.get_subscription_attributes(subscription_arn: @subscription_arn).attributes
@exists = true
@owner = aws_response["Owner"]
@raw_message_delivery = aws_response["RawMessageDelivery"].eql?("true")
@topic_arn = aws_response["TopicArn"]
@endpoint = aws_response["Endpoint"]
@protocol = aws_response["Protocol"]
@confirmation_was_authenticated = aws_response["ConfirmationWasAuthenticated"].eql?("true")
rescue Aws::SNS::Errors::NotFound
@exists = false
return
end
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::SNS::Client
def get_subscription_attributes(criteria)
aws_service_client.get_subscription_attributes(criteria)
end
end
end
end

View file

@ -1,57 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-sns"
class AwsSnsTopic < Inspec.resource(1)
name "aws_sns_topic"
desc "Verifies settings for an SNS Topic"
example <<~EXAMPLE
describe aws_sns_topic('arn:aws:sns:us-east-1:123456789012:some-topic') do
it { should exist }
its('confirmed_subscription_count') { should_not be_zero }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :arn, :confirmed_subscription_count
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:arn],
allowed_scalar_name: :arn,
allowed_scalar_type: String
)
# Validate the ARN
unless validated_params[:arn] =~ /^arn:aws:sns:[\w\-]+:\d{12}:[\S]+$/
raise ArgumentError, "Malformed ARN for SNS topics. Expected an ARN of the form " \
"'arn:aws:sns:REGION:ACCOUNT-ID:TOPIC-NAME'"
end
validated_params
end
def fetch_from_api
aws_response = BackendFactory.create(inspec_runner).get_topic_attributes(topic_arn: @arn).attributes
@exists = true
# The response has a plain hash with CamelCase plain string keys and string values
@confirmed_subscription_count = aws_response["SubscriptionsConfirmed"].to_i
rescue Aws::SNS::Errors::NotFound
@exists = false
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::SNS::Client
def get_topic_attributes(criteria)
aws_service_client.get_topic_attributes(criteria)
end
end
end
end

View file

@ -1,60 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-sns"
class AwsSnsTopics < Inspec.resource(1)
name "aws_sns_topics"
desc "Verifies settings for SNS Topics in bulk"
example <<~EXAMPLE
describe aws_sns_topics do
its('topic_arns') { should include '' }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_sns_topics does not accept resource parameters."
end
resource_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = []
pagination_opts = nil
catch_aws_errors do
loop do
api_result = backend.list_topics(pagination_opts)
@table += api_result.topics.map(&:to_h)
break if api_result.next_token.nil?
pagination_opts = { next_token: api_result.next_token }
end
end
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:topic_arns, field: :topic_arn)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"EC2 SNS Topics"
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::SNS::Client
def list_topics(pagination_opts)
aws_service_client.list_topics(pagination_opts)
end
end
end
end

View file

@ -1,66 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-sqs"
require "uri" unless defined?(URI)
class AwsSqsQueue < Inspec.resource(1)
name "aws_sqs_queue"
desc "Verifies settings for an SQS Queue"
example <<~EXAMPLE
describe aws_sqs_queue('https://sqs.ap-southeast-2.amazonaws.com/519527725796/QueueName') do
it { should exist }
its('visiblity_timeout') { should be 300}
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :arn, :is_fifo_queue, :visibility_timeout, :maximum_message_size, :message_retention_period, :delay_seconds, :receive_message_wait_timeout_seconds, :content_based_deduplication
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:url],
allowed_scalar_name: :url,
allowed_scalar_type: String
)
# Validate the URL
unless validated_params[:url] =~ /\A#{URI::DEFAULT_PARSER.make_regexp(%w{https})}\z/
raise ArgumentError, "Malformed URL for SQS. Expected an ARN of the form " \
"'https://sqs.ap-southeast-2.amazonaws.com/111212121/MyQeueue'"
end
validated_params
end
def fetch_from_api
aws_response = BackendFactory.create(inspec_runner).get_queue_attributes(queue_url: @url, attribute_names: ["All"]).attributes
@exists = true
@visibility_timeout = aws_response["VisibilityTimeout"].to_i
@maximum_message_size = aws_response["MaximumMessageSize"].to_i
@message_retention_period = aws_response["MessageRetentionPeriod"].to_i
@delay_seconds = aws_response["DelaySeconds"].to_i
@receive_message_wait_timeout_seconds = aws_response["ReceiveMessageWaitTimeSeconds"].to_i
# FIFO queues - these attributes only exist for FIFO queues, their presence indicates a FIFO
# queue
@is_fifo_queue = aws_response["FifoQueue"].nil? ? false : true
@content_based_deduplication = aws_response["ContentBasedDeduplication"].nil? ? false : true
rescue Aws::SQS::Errors::NonExistentQueue
@exists = false
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::SQS::Client
def get_queue_attributes(criteria)
aws_service_client.get_queue_attributes(criteria)
end
end
end
end

View file

@ -1,92 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsSubnet < Inspec.resource(1)
name "aws_subnet"
desc "This resource is used to test the attributes of a VPC subnet"
example <<~EXAMPLE
describe aws_subnet(subnet_id: 'subnet-12345678') do
it { should exist }
its('cidr_block') { should eq '10.0.1.0/24' }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
attr_reader :assigning_ipv_6_address_on_creation, :availability_zone, :available_ip_address_count,
:available, :cidr_block, :default_for_az, :ipv_6_cidr_block_association_set,
:mapping_public_ip_on_launch, :subnet_id, :vpc_id
alias available? available
alias default_for_az? default_for_az
alias mapping_public_ip_on_launch? mapping_public_ip_on_launch
alias assigning_ipv_6_address_on_creation? assigning_ipv_6_address_on_creation
def to_s
"VPC Subnet #{@subnet_id}"
end
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:subnet_id],
allowed_scalar_name: :subnet_id,
allowed_scalar_type: String
)
# Make sure the subnet_id parameter was specified and in the correct form.
if validated_params.key?(:subnet_id) && validated_params[:subnet_id] !~ /^subnet\-[0-9a-f]{8}/
raise ArgumentError, 'aws_subnet Subnet ID must be in the format "subnet-" followed by 8 hexadecimal characters.'
end
if validated_params.empty?
raise ArgumentError, "You must provide a subnet_id to aws_subnet."
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
# Transform into filter format expected by AWS
filters = []
filters.push({ name: "subnet-id", values: [@subnet_id] })
ds_response = backend.describe_subnets(filters: filters)
# If no subnets exist in the VPC, exist is false.
if ds_response.subnets.empty?
@exists = false
return
end
@exists = true
assign_properties(ds_response)
end
def assign_properties(ds_response)
@vpc_id = ds_response.subnets[0].vpc_id
@subnet_id = ds_response.subnets[0].subnet_id
@cidr_block = ds_response.subnets[0].cidr_block
@availability_zone = ds_response.subnets[0].availability_zone
@available_ip_address_count = ds_response.subnets[0].available_ip_address_count
@default_for_az = ds_response.subnets[0].default_for_az
@mapping_public_ip_on_launch = ds_response.subnets[0].map_public_ip_on_launch
@available = ds_response.subnets[0].state == "available"
@ipv_6_cidr_block_association_set = ds_response.subnets[0].ipv_6_cidr_block_association_set
@assigning_ipv_6_address_on_creation = ds_response.subnets[0].assign_ipv_6_address_on_creation
end
# Uses the SDK API to really talk to AWS
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_subnets(query)
aws_service_client.describe_subnets(query)
end
end
end
end

View file

@ -1,56 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsSubnets < Inspec.resource(1)
name "aws_subnets"
desc "Verifies settings for VPC Subnets in bulk"
example <<~EXAMPLE
# you should be able to test the cidr_block of a subnet
describe aws_subnets.where(vpc_id: 'vpc-123456789') do
its('subnet_ids') { should eq ['subnet-12345678', 'subnet-87654321'] }
its('cidr_blocks') { should eq ['172.31.96.0/20'] }
its('states') { should_not include 'pending' }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
def validate_params(resource_params)
unless resource_params.empty?
raise ArgumentError, "aws_vpc_subnets does not accept resource parameters."
end
resource_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
@table = backend.describe_subnets.subnets.map(&:to_h)
end
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:vpc_ids, field: :vpc_id)
.register_column(:subnet_ids, field: :subnet_id)
.register_column(:cidr_blocks, field: :cidr_block)
.register_column(:states, field: :state)
filter.install_filter_methods_on_resource(self, :table)
def to_s
"EC2 VPC Subnets"
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend self
self.aws_client_class = Aws::EC2::Client
def describe_subnets(query = {})
aws_service_client.describe_subnets(query)
end
end
end
end

View file

@ -1,77 +0,0 @@
require "resource_support/aws/aws_singular_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsVpc < Inspec.resource(1)
name "aws_vpc"
desc "Verifies settings for AWS VPC"
example <<~EXAMPLE
describe aws_vpc do
it { should be_default }
its('cidr_block') { should cmp '10.0.0.0/16' }
end
EXAMPLE
supports platform: "aws"
include AwsSingularResourceMixin
def to_s
"VPC #{vpc_id}"
end
attr_reader :cidr_block, :dhcp_options_id, :instance_tenancy, :is_default,
:state, :vpc_id
alias default? is_default
private
def validate_params(raw_params)
validated_params = check_resource_param_names(
raw_params: raw_params,
allowed_params: [:vpc_id],
allowed_scalar_name: :vpc_id,
allowed_scalar_type: String
)
if validated_params.key?(:vpc_id) && validated_params[:vpc_id] !~ /^vpc\-([0-9a-f]{8})|(^vpc\-[0-9a-f]{17})$/
raise ArgumentError, 'aws_vpc VPC ID must be in the format "vpc-" followed by 8 or 17 hexadecimal characters.'
end
validated_params
end
def fetch_from_api
backend = BackendFactory.create(inspec_runner)
if @vpc_id.nil?
filter = { name: "isDefault", values: ["true"] }
else
filter = { name: "vpc-id", values: [@vpc_id] }
end
resp = backend.describe_vpcs({ filters: [filter] })
vpc = resp.vpcs[0].to_h
@exists = !vpc.empty?
return unless @exists
@cidr_block = vpc[:cidr_block]
@dhcp_options_id = vpc[:dhcp_options_id]
@instance_tenancy = vpc[:instance_tenancy]
@is_default = vpc[:is_default]
@state = vpc[:state]
@vpc_id = vpc[:vpc_id]
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_vpcs(query)
aws_service_client.describe_vpcs(query)
end
end
end
end

View file

@ -1,55 +0,0 @@
require "resource_support/aws/aws_plural_resource_mixin"
require "resource_support/aws/aws_backend_base"
require "aws-sdk-ec2"
class AwsVpcs < Inspec.resource(1)
name "aws_vpcs"
desc "Verifies settings for AWS VPCs in bulk"
example <<~EXAMPLE
describe aws_vpcs do
it { should exist }
end
EXAMPLE
supports platform: "aws"
include AwsPluralResourceMixin
# Underlying FilterTable implementation.
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:cidr_blocks, field: :cidr_block)
.register_column(:vpc_ids, field: :vpc_id)
# We need a dummy here, so FilterTable will define and populate the dhcp_options_id field
filter.register_column(:dummy, field: :dhcp_options_id)
.register_column(:dhcp_options_ids) { |obj| obj.entries.map(&:dhcp_options_id).uniq }
filter.install_filter_methods_on_resource(self, :table)
def validate_params(raw_params)
# No params yet
unless raw_params.empty?
raise ArgumentError, "aws_vpcs does not accept resource parameters"
end
raw_params
end
def to_s
"VPCs"
end
def fetch_from_api
describe_vpcs_response = BackendFactory.create(inspec_runner).describe_vpcs
@table = describe_vpcs_response.to_h[:vpcs].map(&:to_h)
end
class Backend
class AwsClientApi < AwsBackendBase
BackendFactory.set_default_backend(self)
self.aws_client_class = Aws::EC2::Client
def describe_vpcs(query = {})
aws_service_client.describe_vpcs(query)
end
end
end
end

View file

@ -1,379 +0,0 @@
# Base class for Azure Resources. This allows the generic class to work
# as well as the specific target resources for Azure Resources
#
# @author Russell Seymour
module Inspec::Resources
class AzureResourceBase < Inspec.resource(1)
attr_reader :opts, :client, :azure
# Constructor that retrieves the specified resource
#
# The opts hash should contain the following
# :group_name - name of the resource group in which to look for items
# :type - the type of Azure resource to look for
# :apiversion - API version to use when looking for a specific resource
# :name - name of the resource to find
#
# @author Russell Seymour
#
# @param [Hash] opts Hashtable of options as highlighted above
# rubocop:disable Metrics/AbcSize
def initialize(opts)
# declare the hashtable of counts
@counts = {}
@total = 0
@opts = opts
# Determine if the environment variables for the options have been set
option_var_names = {
group_name: "AZURE_RESOURCE_GROUP_NAME",
name: "AZURE_RESOURCE_NAME",
type: "AZURE_RESOURCE_TYPE",
apiversion: "AZURE_RESOURCE_API_VERSION",
}
option_var_names.each do |option_name, env_var_name|
opts[option_name] = ENV[env_var_name] unless ENV[env_var_name].nil?
end
@azure = inspec.backend
@client = azure.azure_client
@failed_resource = false
end
def failed_resource?
@failed_resource
end
def catch_azure_errors
yield
rescue MsRestAzure::AzureOperationError => e
# e.message is actually a massive stringified JSON, which might be useful in the future.
# You want error_message here.
fail_resource e.error_message
@failed_resource = true
nil
end
# Return information about the resource group
def resource_group
catch_azure_errors do
resource_group = client.resource_groups.get(opts[:group_name])
# create the methods for the resource group object
dm = AzureResourceDynamicMethods.new
dm.create_methods(self, resource_group)
end
end
def resources
resources = nil
catch_azure_errors do
resources = client.resources.list_by_resource_group(opts[:group_name])
end
return if failed_resource?
# filter the resources based on the type, and the name if they been specified
resources = filter_resources(resources, opts)
# if there is one resource then define methods on this class
if resources.count == 1
@total = 1
resource = nil
catch_azure_errors do
# get the apiversion for the resource, if one has not been specified
apiversion = azure.get_api_version(resources[0].type, opts)
# get the resource by id so it can be interrogated
resource = client.resources.get_by_id(resources[0].id, apiversion)
end
return if failed_resource?
dm = AzureResourceDynamicMethods.new
dm.create_methods(self, resource)
else
# As there are many resources, parse each one so that it can be
# interrogated by the FilterTable
# @probes = parse_resources(resources, azure)
@probes = resources.each.map do |item|
# update the total
@total += 1
# determine the counts for each type
namespace, type_name = item.type.split(/\./)
counts.key?(namespace) ? false : counts[namespace] = {}
counts[namespace].key?(type_name) ? counts[namespace][type_name] += 1 : counts[namespace][type_name] = 1
# get the detail about the resource
apiversion = azure.get_api_version(item.type, opts)
resource = client.resources.get_by_id(item.id, apiversion)
# parse the resource
parse_resource(resource)
end.compact
# Iterate around the counts and create the necessary classes
counts.each do |namespace, ns_counts|
define_singleton_method namespace do
AzureResourceTypeCounts.new(ns_counts)
end
end
end
end
# Does the resource have any tags?
#
# If it is a Hashtable then it does not, because there was nothing to parse so there is not
# a nested object to work with
#
# @author Russell Seymour
def has_tags?
tags.is_a?(Hash) ? false : true
end
# Returns how many tags have been set on the resource
#
# @author Russell Seymour
def tag_count
tags.count
end
# It is necessary to be able to test the tags of a resource. It is possible to say of the
# resource has tags or not, and it is possible to check that the tags include a specific tag
# However the value is not accessible, this function creates methods for all the tags that
# are available.
#
# The format of the method name is '<TAG_NAME>_tag' and will return the value of that tag
#
# Disabling rubopcop check. If this is set as a normal if..then..end statement there is a
# violation stating it should use a guard. When using a guard it throws this error
#
# @author Russell Seymour
def create_tag_methods
# Iterate around the items of the tags and create the necessary access methods
if defined?(tags.item)
tags.item.each do |name, value|
method_name = format("%s_tag", name)
define_singleton_method method_name do
value
end
end
end
end
private
# Filter the resources that are returned by the options that have been specified
#
def filter_resources(resources, opts)
if opts[:type] && opts[:name]
resources.select { |r| r.type == opts[:type] && r.name == opts[:name] }
elsif opts[:type]
resources.select { |r| r.type == opts[:type] }
elsif opts[:name]
resources.select { |r| r.name == opts[:name] }
else
resources
end
end
end
end
# Class to create methods on the calling object at run time.
# Each of the Azure Resources have different attributes and properties, and they all need
# to be testable. To do this no methods are hardcoded, each on is craeted based on the
# information returned from Azure.
#
# The class is a helper class essentially as it creates the methods on the calling class
# rather than itself. This means that there is less duplication of code and it can be
# reused easily.
#
# @author Russell Seymour
# @since 0.2.0
class AzureResourceDynamicMethods
# Given the calling object and its data, create the methods on the object according
# to the data that has been retrieved. Various types of data can be returned so the method
# checks the type to ensure that the necessary methods are configured correctly
#
# @param AzureResourceProbe|AzureResource object The object on which the methods should be craeted
# @param variant data The data from which the methods should be created
def create_methods(object, data)
# Check the type of data as this affects the setup of the methods
# If it is an Azure Generic Resource then setup methods for each of
# the instance variables
case data.class.to_s
when /^Azure::Resources::Mgmt::.*::Models::GenericResource$/,
/^Azure::Resources::Mgmt::.*::Models::ResourceGroup$/
# iterate around the instance variables
data.instance_variables.each do |var|
create_method(object, var.to_s.delete("@"), data.instance_variable_get(var))
end
# When the data is a Hash object iterate around each of the key value pairs and
# craete a method for each one.
when "Hash"
data.each do |key, value|
create_method(object, key, value)
end
end
end
private
# Method that is responsible for creating the method on the calling object. This is
# because some nesting maybe required. For example of the value is a Hash then it will
# need to have an AzureResourceProbe create for each key, whereas if it is a simple
# string then the value just needs to be returned
#
# @private
#
# @param AzureResourceProbe|AzureResource object Object on which the methods need to be created
# @param string name The name of the method
# @param variant value The value that needs to be returned by the method
def create_method(object, name, value)
# Create the necessary method based on the var that has been passed
# Test the value for its type so that the method can be setup correctly
case value.class.to_s
when "String", "Integer", "TrueClass", "FalseClass", "Fixnum"
object.define_singleton_method name do
value
end
when "Hash"
value.count == 0 ? return_value = value : return_value = AzureResourceProbe.new(value)
object.define_singleton_method name do
return_value
end
when /^Azure::Resources::Mgmt::.*::Models::ResourceGroupProperties$/
# This is a special case where the properties of the resource group is not a simple JSON model
# This is because the plugin is using the Azure SDK to get this information so it is an SDK object
# that has to be interrogated in a different way. This is the only object type that behaves like this
value.instance_variables.each do |var|
create_method(object, var.to_s.delete("@"), value.instance_variable_get(var))
end
when "Array"
# Some things are just string or integer arrays
# Check this by seeing if the first element is a string / integer / boolean or
# a hashtable
# This may not be the best methid, but short of testing all elements in the array, this is
# the quickest test
case value[0].class.to_s
when "String", "Integer", "TrueClass", "FalseClass", "Fixnum"
probes = value
else
probes = []
value.each do |value_item|
probes << AzureResourceProbe.new(value_item)
end
end
object.define_singleton_method name do
probes
end
end
end
end
# Class object to maintain a count of the Azure Resource types that are found
# when a less specific test is carried out. For example if all the resoures of a resource
# group are called for, there will be various types and number of those types.
#
# Each type is namespaced, so for example a virtual machine has the type 'Microsoft.Compute/virtualMachines'
# This is broken down into the 'Microsoft' class with the type 'Compute/virtualMachines'
# This has been done for two reasons:
# 1. Enable the dotted notation to work in the test
# 2. Allow third party resource types ot be catered for if they are ever enabled by Microsoft
#
# @author Russell Seymour
# @since 0.2.0
class AzureResourceTypeCounts
# Constructor to setup a new class for a specific Azure Resource type.
# It should be passed a hashtable with information such as:
# {
# "Compute/virtualMachines" => 2,
# "Network/networkInterfaces" => 3
# }
# This will result in two methods being created on the class:
# - Compute/virtualNetworks
# - Network/networkInterfaces
# Each of which will return the corresponding count value
#
# @param Hash counts Hash table of types and the count of each one
#
# @return AzureResourceTypeCounts
def initialize(counts)
counts.each do |type, count|
define_singleton_method type do
count
end
end
end
end
# Class object that is created for each element that is returned by Azure.
# This is what is interrogated by InSpec. If they are nested hashes, then this results
# in nested AzureResourceProbe objects.
#
# For example, if the following was seen in an Azure Resource
# properties -> storageProfile -> imageReference
# Would result in the following nestec classes
# AzureResource -> AzureResourceProbe -> AzureResourceProbe
#
# The methods for each of the classes are dynamically defined at run time and will
# match the items that are retrieved from Azure. See the 'test/integration/verify/controls' for
# examples
#
# This class will not be called externally
#
# @author Russell Seymour
# @since 0.2.0
# @attr_reader string name Name of the Azure resource
# @attr_reader string type Type of the Azure Resource
# @attr_reader string location Location in Azure of the resource
class AzureResourceProbe
attr_reader :name, :type, :location, :item, :count
# Initialize method for the class. Accepts an item, be it a scalar value, hash or Azure object
# It will then create the necessary dynamic methods so that they can be called in the tests
# This is accomplished by call the AzureResourceDynamicMethods
#
# @param varaint The item from which the class will be initialized
#
# @return AzureResourceProbe
def initialize(item)
dm = AzureResourceDynamicMethods.new
dm.create_methods(self, item)
# Set the item as a property on the class
# This is so that it is possible to interrogate what has been added to the class and isolate them from
# the standard methods that a Ruby class has.
# This used for checking Tags on a resource for example
# It also allows direct access if so required
@item = item
# Set how many items have been set
@count = item.length
end
# Allows resources to respond to the include test
# This means that things like tags can be checked for and then their value tested
#
# @author Russell Seymour
#
# @param [String] key Name of the item to look for in the @item property
def include?(key)
@item.key?(key)
end
# Give a sting like `computer_name` return the camelCase version, e.g.
# computerName
#
# @param string data Data that needs to be converted from snake_case to camelCase
#
# @return string
def camel_case(data)
camel_case_data = data.split("_").inject([]) { |buffer, e| buffer.push(buffer.empty? ? e : e.capitalize) }.join
# Ensure that gb (as in gigabytes) is uppercased
camel_case_data.gsub(/[gb]/, &:upcase)
end
end

View file

@ -1,55 +0,0 @@
require "resources/azure/azure_backend"
require "inspec/utils/filter"
module Inspec::Resources
class AzureGenericResource < AzureResourceBase
name "azure_generic_resource"
desc '
InSpec Resource to interrogate any Resource type in Azure
'
supports platform: "azure"
attr_accessor :filter, :total, :counts, :name, :type, :location, :probes
def initialize(opts = {})
Inspec.deprecate(:resource_azure_generic_resource)
# Call the parent class constructor
super(opts)
# Get the resource group
resource_group
# Get the resources
resources
# Create the tag methods
create_tag_methods
end
# Define the filter table so that it can be interrogated
@filter = FilterTable.create
@filter.register_filter_method(:contains)
.register_column(:type, field: "type")
.register_column(:name, field: "name")
.register_column(:location, field: "location")
.register_column(:properties, field: "properties")
@filter.install_filter_methods_on_resource(self, :probes)
def parse_resource(resource)
# return a hash of information
parsed = {
"location" => resource.location,
"name" => resource.name,
"type" => resource.type,
"exist?" => true,
"properties" => AzureResourceProbe.new(resource.properties),
}
parsed
end
end
end

View file

@ -1,151 +0,0 @@
require "resources/azure/azure_backend"
module Inspec::Resources
class AzureResourceGroup < AzureResourceBase
name "azure_resource_group"
desc '
InSpec Resource to get metadata about a specific Resource Group
'
supports platform: "azure"
attr_reader :name, :location, :id, :total, :counts, :mapping
# Constructor to get the resource group itself and perform some analysis on the
# resources that in the resource group.
#
# This analysis is defined by the the mapping hashtable which is used to define
# the 'has_xxx?' methods (see AzureResourceGroup#create_has_methods) and return
# the counts for each type
#
# @author Russell Seymour
def initialize(opts)
opts.key?(:name) ? opts[:group_name] = opts[:name] : false
# Ensure that the opts only have the name of the resource group set
opts.select! { |k, _v| k == :group_name }
super(opts)
# set the mapping for the Azure Resources
@mapping = {
nic: "Microsoft.Network/networkInterfaces",
vm: "Microsoft.Compute/virtualMachines",
extension: "Microsoft.Compute/virtualMachines/extensions",
nsg: "Microsoft.Network/networkSecurityGroups",
vnet: "Microsoft.Network/virtualNetworks",
managed_disk: "Microsoft.Compute/disks",
managed_disk_image: "Microsoft.Compute/images",
sa: "Microsoft.Storage/storageAccounts",
public_ip: "Microsoft.Network/publicIPAddresses",
}
# Get information about the resource group itself
resource_group
# Get information about the resources in the resource group
resources
# Call method to create the has_xxxx? methods
create_has_methods
# Call method to allow access to the tag values
create_tag_methods
end
# Return the provisioning state of the resource group
#
# @author Russell Seymour
def provisioning_state
properties.provisioningState
end
# Analyze the fully qualified id of the resource group to return the subscription id
# that this resource group is part of
#
# The format of the id is
# /subscriptions/<SUBSCRIPTION_ID>/resourceGroups/<RESOURCE_GROUP_NAME>
#
# @author Russell Seymour
def subscription_id
id.split(%r{\/}).reject(&:empty?)[1]
end
# Method to parse the resources that have been returned
# This allows the calculations of the amount of resources to be determined
#
# @author Russell Seymour
#
# @param [Hash] resource A hashtable representing the resource group
def parse_resource(resource)
# return a hash of information
parsed = {
"name" => resource.name,
"type" => resource.type,
}
parsed
end
# This method catches the xxx_count calls that are made on the resource.
#
# The method that is called is stripped of '_count' and then compared with the
# mappings table. If that type exists then the number of those items is returned.
# However if that type is not in the Resource Group then the method will return
# a NoMethodError exception
#
# @author Russell Seymour
#
# @param [Symbol] method_id The name of the method that was called
def method_missing(method_id)
# Determine the mapping_key based on the method_id
mapping_key = method_id.to_s.chomp("_count").to_sym
if mapping.key?(mapping_key)
# based on the method id get the
namespace, type_name = mapping[mapping_key].split(/\./)
# check that the type_name is defined, if not return 0
if send(namespace).methods.include?(type_name.to_sym)
# return the count for the method id
send(namespace).send(type_name)
else
0
end
else
msg = format("undefined method `%s` for %s", method_id, self.class)
raise NoMethodError, msg
end
end
private
# For each of the mappings this method creates the has_xxx? method. This allows the use
# of the following type of test
#
# it { should have_nics }
#
# For example, it will create a has_nics? method that returns a boolean to state of the
# resource group has any nics at all.
#
# @author Russell Seymour
# @private
def create_has_methods
return if failed_resource?
# Create the has methods for each of the mappings
# This is a quick test to show that the resource group has at least one of these things
mapping.each do |name, type|
# Determine the name of the method name
method_name = format("has_%ss?", name)
namespace, type_name = type.split(/\./)
# use the namespace and the type_name to determine if the resource group has this type or not
result = send(namespace).methods.include?(type_name.to_sym) ? true : false
define_singleton_method method_name do
result
end
end
end
end
end

View file

@ -1,262 +0,0 @@
require "resources/azure/azure_backend"
module Inspec::Resources
class AzureVirtualMachine < AzureResourceBase
name "azure_virtual_machine"
desc '
InSpec Resource to test Azure Virtual Machines
'
supports platform: "azure"
# Constructor for the resource. This calls the parent constructor to
# get the generic resource for the specified machine. This will provide
# static methods that are documented
#
# @author Russell Seymour
def initialize(opts = {})
# The generic resource needs to pass back a Microsoft.Compute/virtualMachines object so force it
opts[:type] = "Microsoft.Compute/virtualMachines"
super(opts)
# Find the virtual machines
resources
create_tag_methods
end
# Method to catch calls that are not explicitly defined.
# This allows the simple attributes of the virtual machine to be read without having
# to define each one in turn.
#
# rubocop:disable Metrics/AbcSize
#
# @param symobl method_id The symbol of the method that has been called
#
# @return Value of attribute that has been called
def method_missing(method_id)
# Depending on the method that has been called, determine what value should be returned
# These are set as camel case methods to comply with rubocop
image_reference_attrs = %w{sku publisher offer}
osdisk_attrs = %w{os_type caching create_option disk_size_gb}
hardware_profile_attrs = %w{vm_size}
os_profile_attrs = %w{computer_name admin_username}
osdisk_managed_disk_attrs = %w{storage_account_type}
# determine the method name to call by converting the snake_case to camelCase
# method_name = self.camel_case(method_id.to_s)
method_name = method_id.to_s.split("_").inject([]) { |buffer, e| buffer.push(buffer.empty? ? e : e.capitalize) }.join
method_name.end_with?("Gb") ? method_name.gsub!(/Gb/, &:upcase) : false
if image_reference_attrs.include?(method_id.to_s)
properties.storageProfile.imageReference.send(method_name)
elsif osdisk_attrs.include?(method_id.to_s)
properties.storageProfile.osDisk.send(method_name)
elsif hardware_profile_attrs.include?(method_id.to_s)
properties.hardwareProfile.send(method_name)
elsif os_profile_attrs.include?(method_id.to_s)
properties.osProfile.send(method_name)
elsif osdisk_managed_disk_attrs.include?(method_id.to_s)
properties.storageProfile.osDisk.managedDisk.send(method_name)
end
end
# Return the name of the os disk
#
# @return string Name of the OS disk
def os_disk_name
properties.storageProfile.osDisk.name
end
# Determine if the OS disk is a managed disk
#
# @return boolean
def has_managed_osdisk?
defined?(properties.storageProfile.osDisk.managedDisk)
end
# Does the machine have any NICs connected
#
# @return boolean
def has_nics?
properties.networkProfile.networkInterfaces.count != 0
end
# How many NICs are connected to the machine
#
# @return integer
def nic_count
properties.networkProfile.networkInterfaces.count
end
# Return an array of the connected NICs so that it can be tested to ensure
# the machine is connected properly
#
# @return array Array of NIC names connected to the machine
def connected_nics
nic_names = []
properties.networkProfile.networkInterfaces.each do |nic|
nic_names << nic.id.split(%r{/}).last
end
nic_names
end
# Whether the machine has data disks or not
#
# @return boolean
def has_data_disks?
properties.storageProfile.dataDisks.count != 0
end
# How many data disks are connected
#
# @return integer
def data_disk_count
properties.storageProfile.dataDisks.count
end
# Does the machine allow password authentication
#
# This allows the use of
# it { should have_password_authentication }
# within the InSpec profile
#
# @return boolean
def has_password_authentication?
password_authentication?
end
# Determine if the machine allows password authentication
#
# @return boolean
def password_authentication?
# if the osProfile property has a linuxConfiguration section then interrogate that
# otherwise it is a Windows machine and that always has password auth
if defined?(properties.osProfile.linuxConfiguration)
!properties.osProfile.linuxConfiguration.disablePasswordAuthentication
else
true
end
end
# Has the machine been given Custom Data at creation
#
# This allows the use of
# it { should have_custom_data }
# within the InSpec Profile
#
# @return boolean
def has_custom_data?
custom_data?
end
# Determine if custom data has been set
#
# @return boolean
def custom_data?
if defined?(properties.osProfile.CustomData)
true
else
false
end
end
# Are any SSH Keys assigned to the machine
#
# This allows the use of
# it { should have_ssh_keys }
# within the InSpec Profile
#
# @return boolean
def has_ssh_keys?
ssh_keys?
end
# Determine if any ssh keys have been asigned to the machine
#
# @return boolean
def ssh_keys?
if defined?(properties.osProfile.linuxConfiguration.ssh)
properties.osProfile.linuxConfiguration.ssh.publicKeys != 0
else
false
end
end
# Return the number of ssh keys that have been assigned to the machine
#
# @return integer
def ssh_key_count
if defined?(properties.osProfile.linuxConfiguration.ssh)
properties.osProfile.linuxConfiguration.ssh.publicKeys.count
else
0
end
end
# Determine is the specified key is in the ssh_keys list
#
# @return array Array of the public keys that are assigned to allow for testing of that key
def ssh_keys
# iterate around the keys
keys = []
properties.osProfile.linuxConfiguration.ssh.publicKeys.each do |key|
keys << key.keyData
end
keys
end
# Does the machine have boot diagnostics enabled
#
# @return boolean
def has_boot_diagnostics?
if defined?(properties.diagnosticsProfile)
properties.diagnosticsProfile.bootDiagnostics.enabled
else
false
end
end
# Return the URI that has been set for the boot diagnostics storage
#
# @return string
def boot_diagnostics_storage_uri
properties.diagnosticsProfile.bootDiagnostics.storageUri
end
# If this is a windows machine, returns whether the agent was provisioned or not
#
# @return boolean
def has_provision_vmagent?
if defined?(properties.osProfile.windowsConfiguration)
properties.osProfile.windowsConfiguration.provisionVMAgent
else
false
end
end
# If a windows machine see if automatic updates for the agent are enabled
#
# @return boolean
def has_automatic_agent_update?
if defined?(properties.osProfile.windowsConfiguration)
properties.osProfile.windowsConfiguration.enableAutomaticUpdates
else
false
end
end
# If this is a windows machine return a boolean to state of the WinRM options
# have been set
#
# @return boolean
def has_winrm_options?
if defined?(properties.osProfile.windowsConfiguration) && defined?(properties.osProfile.windowsConfiguration.winrm)
properties.osProfile.windowsConfiguration.winrm.protocol
else
false
end
end
end
end

View file

@ -1,131 +0,0 @@
require "resources/azure/azure_backend"
require "uri" unless defined?(URI)
module Inspec::Resources
class AzureVirtualMachineDataDisk < AzureResourceBase
name "azure_virtual_machine_data_disk"
desc '
InSpec Resource to ensure that the data disks attached to a machine are correct
'
supports platform: "azure"
# Create a filter table so that tests on the disk can be performed
filter = FilterTable.create
filter.register_custom_matcher(:exists?) { |x| !x.entries.empty? }
filter.register_column(:disk, field: :disk)
.register_column(:number, field: :number)
.register_column(:name, field: :name)
.register_column(:size, field: :size)
.register_column(:vhd_uri, field: :vhd_uri)
.register_column(:storage_account_name, field: :storage_account_name)
.register_column(:lun, field: :lun)
.register_column(:caching, field: :caching)
.register_column(:create_option, field: :create_option)
.register_column(:is_managed_disk?, field: :is_managed_disk?)
.register_column(:storage_account_type, field: :storage_account_type)
.register_column(:subscription_id, field: :subscription_id)
.register_column(:resource_group, field: :resource_group)
filter.install_filter_methods_on_resource(self, :datadisk_details)
# Constructor for the resource. This calls the parent constructor to
# get the generic resource for the specified machine. This will provide
# static methods that are documented
#
# @author Russell Seymour
def initialize(opts = {})
# The generic resource needs to pass back a Microsoft.Compute/virtualMachines object so force it
opts[:type] = "Microsoft.Compute/virtualMachines"
super(opts)
# Get the data disks
resources
end
# Return information about the disks and add to the filter table so that
# assertions can be performed
#
# @author Russell Seymour
def datadisk_details
return if failed_resource?
# Iterate around the data disks on the machine
properties.storageProfile.dataDisks.each_with_index.map do |datadisk, index|
# Call function to parse the data disks and return an object based on the parameters
parse_datadisk(datadisk, index)
end
end
# Return boolean to denote if the machine has data disks attached or not
def has_data_disks?
!entries.empty?
end
# Return an integer stating how many data disks are attached to the machine
def count
entries.count
end
# Return boolean to state if the machine is using managed disks for data disks
def has_managed_disks?
# iterate around the entries
result = entries.each.select { |e| e[:is_managed_disk?] }
result.empty? ? false : true
end
private
# Parse the data disk to determine if these are managed disks or in a storage account
# for example. The disk index, name and size will be returned
#
# params object disk Object containing the details of the disk
# params integer index Index denoting which disk number this is on the machine
#
# return hashtable
def parse_datadisk(disk, index)
# Configure parsed hashtable to hold the information
# Initialize this with common attributes from the different types of disk
parsed = {
disk: index,
number: index + 1,
lun: disk.lun,
name: disk.name,
size: disk.diskSizeGB,
caching: disk.caching,
create_option: disk.createOption,
}
# Determine if the current disk is a managed disk or not
if defined?(disk.vhd)
# As this is in a storage account this is not a managed disk
parsed[:is_managed_disk?] = false
# Set information about the disk
# Parse the uri of the disk URI so that the storage account can be retrieved
uri = URI.parse(disk.vhd.uri)
parsed[:vhd_uri] = disk.vhd.uri
parsed[:storage_account_name] = uri.host.split(".").first
elsif defined?(disk.managedDisk)
# State that this is a managed disk
parsed[:is_managed_disk?] = true
# Get information about the managed disk
parsed[:storage_account_type] = disk.managedDisk.storageAccountType
parsed[:id] = disk.managedDisk.id
# Break up the ID string so that the following information can get retrieved
# - subscription_id
# - resource_group
id_parts = parsed[:id].split(%r{/}).reject(&:empty?)
parsed[:subscription_id] = id_parts[1]
parsed[:resource_group] = id_parts[3]
end
# return the parsed object
parsed
end
end
end

View file

@ -1,102 +0,0 @@
require "resource_support/aws"
module MockAwsBillingReports
class Empty < AwsBackendBase
def describe_report_definitions(_query)
Aws::CostandUsageReportService::Types::DescribeReportDefinitionsResponse.new(report_definitions: [])
end
end
class Basic < AwsBackendBase
def describe_report_definitions(_query)
Aws::CostandUsageReportService::Types::DescribeReportDefinitionsResponse
.new(report_definitions:
[
Aws::CostandUsageReportService::Types::ReportDefinition.new(
report_name: "inspec1",
time_unit: "HOURLY",
format: "textORcsv",
compression: "ZIP",
s3_bucket: "inspec1-s3-bucket",
s3_prefix: "inspec1/accounting",
s3_region: "us-east-1"
),
Aws::CostandUsageReportService::Types::ReportDefinition.new(
report_name: "inspec2",
time_unit: "DAILY",
format: "textORcsv",
compression: "GZIP",
s3_bucket: "inspec2-s3-bucket",
s3_prefix: "inspec2/accounting",
s3_region: "us-west-1"
),
])
end
end
# This backend will always repond with 5 reports, as if the `max_results` option was passed to
# `#describe_report_definitions`. I chose 5 because when using `max_results` in the real world
# it seems to only accept a value of 5.
#
# == Returns:
# A Aws::CostandUsageReportService::Types::DescribeReportDefinitionsRespons object with two instance
# properties:
# `report_definitions` An Array that includes a single page of 5 Reports.
# `next_token` A String set to the start of the next page. When `next_token` is nil, there are no more pages.
#
class Paginated < AwsBackendBase
# Generate a set of report data, and shuffle their order.
def generate_definitions
definitions = []
definitions << Aws::CostandUsageReportService::Types::ReportDefinition.new(
report_name: "inspec1",
time_unit: "HOURLY",
format: "textORcsv",
compression: "ZIP",
s3_bucket: "inspec1-s3-bucket",
s3_prefix: "inspec1/accounting",
s3_region: "us-east-1"
)
definitions << Aws::CostandUsageReportService::Types::ReportDefinition.new(
report_name: "inspec2",
time_unit: "DAILY",
format: "textORcsv",
compression: "GZIP",
s3_bucket: "inspec2-s3-bucket",
s3_prefix: "inspec2/accounting",
s3_region: "us-west-1"
)
(3..12).each do |i|
definitions <<
Aws::CostandUsageReportService::Types::ReportDefinition.new(
report_name: "inspec#{i}",
time_unit: %w{HOURLY DAILY}.sample,
format: "textORcsv",
compression: %w{ZIP GZIP}.sample,
s3_bucket: "inspec#{i}-s3-bucket",
s3_prefix: "inspec#{i}",
s3_region: "us-east-1"
)
end
definitions.shuffle
end
def describe_report_definitions(options = {})
@definitions ||= generate_definitions
starting_position = options.fetch(:next_token, 0)
selected_definitions = @definitions.slice(starting_position, 5).compact
next_token = starting_position + 5
next_token = @definitions.count < next_token ? nil : next_token
response = Aws::CostandUsageReportService::Types::DescribeReportDefinitionsResponse
.new(report_definitions: selected_definitions)
response.next_token = next_token
response
end
end
end

View file

@ -1,108 +0,0 @@
# Testing Against AWS - Integration Testing
## Problem Statement
We want to be able to test AWS-related InSpec resources against AWS itself. This means we need to create constructs ("test fixtures") in AWS to examine using InSpec. For cost management, we also want to be able to destroy
## General Approach
We use Terraform to setup test fixtures in AWS, then run a defined set of InSpec controls against these (which should all pass), and finally tear down the test fixtures with Terraform. For fixtures that cannot be managed by Terraform, we manually setup fixtures using instructions below.
We use the AWS CLI credentials system to manage credentials.
### Installing Terraform
Download [Terraform](https://www.terraform.io/downloads.html). We require at least v0.10. To install and choose from multiple Terraform versions, consider using [tfenv](https://github.com/kamatama41/tfenv).
### Installing AWS CLI
Install the [AWS CLI](http://docs.aws.amazon.com/cli/latest/userguide/installing.html). We will store profiles for testing in the `~/.aws/credentials` file.
## Limitations
There are some things that we can't (or very much shouldn't) do via Terraform - like manipulating the root account MFA settings.
Also, there are some singleton resources (such as the default VPC, or Config status) that we should not manipulate without consequences.
## Current Solution
Our solution is to create two AWS accounts, each dedicated to the task of integration testing inspec-aws.
In the "default" account, we setup all fixtures that can be handled by Terraform. For any remaining fixtures,
such as enabling MFA on the root account, we manually set one value in the "default" account, and manually set the opposing value in the "minimal" account. This allows use to perform testing on any reachable resource or property, regardless of whether or not Terraform can manage it.
All tests (and test fixtures) that do not require special handling are placed in the "default" set. That includes both positive and negative checks.
Note that some tests will fail for the first day or two after you set up the accounts, due to the tests checking properties such as the last usage time of an access key, for example.
Additionally, the first time you run the tests, you will need to accept the user agreement in the AWS marketplace for the linux AMIs we use. You'll need to do it 4 times, once for each of debian and centos on the two accounts.
### Creating the Default account
Follow these instructions carefully. Do not perform any action not specified.
1. Create an AWS account. Make a note of the account email and root password in a secure secret storage system.
2. Create an IAM user named `test-fixture-maker`.
* Enable programmatic access (to generate an access key)
* Direct-attach the policy AdministratorAccess
* Note the access key and secret key ID that are generated.
3. Using the aws command line tool, store the access key and secret key in a profile with a special name:
`aws configure --profile inspec-aws-test-default`
#### Test Fixtures for the Default Account
1. As the root user, enable a virtual MFA device.
2. Create an IAM user named 'test-user-last-key-use'.
* Enable programmatic access (to generate an access key)
* Note the access key and secret key ID that are generated.
* Direct-attach the policy AmazonEC2ReadOnlyAccess
* Using the AWS CLI and the credentials, execute the command `aws ec2 describe-instances`.
* The goal here is to have an access key that was used at one point.
### Creating the Minimal Account
Follow these instructions carefully. Do not perform any action not specified.
1. Create an AWS account. Make a note of the account email and root password in a secure secret storage system.
2. Create an IAM user named `test-fixture-maker`.
* Enable programmatic access (to generate an access key)
* Direct-attach the policy AdministratorAccess
* Note the access key and secret key ID that are generated.
3. Using the aws command line tool, store the access key and secret key in a profile with a special name:
`aws configure --profile inspec-aws-test-minimal`
#### Test Fixtures for the Minimal Account
1. Create an Access Key for the root user. You do not have to save the access key.
## Running the integration tests
To run all AWS integration tests, run:
```
bundle exec rake test:aws
```
To run the tests against one account only:
```
bundle exec rake test:aws:default
```
or
```
bundle exec rake test:aws:minimal
```
Each account has separate tasks for setup, running the tests, and cleanup. You may run them separately:
```
bundle exec rake test:aws:setup:default
bundle exec rake test:aws:run:default
bundle exec rake test:aws:cleanup:default
```

View file

@ -1,22 +0,0 @@
terraform {
required_version = "~> 0.11.0"
}
provider "aws" {
# was 1.13.0
version = "= 1.42.0"
}
data "aws_caller_identity" "creds" {}
output "aws_account_id" {
value = "${data.aws_caller_identity.creds.account_id}"
}
data "aws_region" "current" {}
output "aws_region" {
value = "${data.aws_region.current.name}"
}
data "aws_availability_zones" "available" {}

View file

@ -1,230 +0,0 @@
resource "aws_s3_bucket" "trail_1_bucket" {
bucket = "${terraform.env}-trail-01-bucket"
force_destroy = true
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::${terraform.env}-trail-01-bucket"
},
{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::${terraform.env}-trail-01-bucket/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
POLICY
}
resource "aws_iam_role" "cloud_watch_logs_role" {
name = "${terraform.env}-cloud-watch-logs-role"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
POLICY
}
resource "aws_iam_role_policy" "cloud_watch_logs_role_policy" {
depends_on = ["aws_iam_role.cloud_watch_logs_role"]
name = "${terraform.env}-cloud-watch-logs-role-policy"
role = "${terraform.env}-cloud-watch-logs-role"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailCreateLogStream",
"Effect": "Allow",
"Action": [
"logs:CreateLogStream"
],
"Resource": [
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.creds.account_id}:log-group:${aws_cloudwatch_log_group.trail_1_log_group.name}:log-stream:${data.aws_caller_identity.creds.account_id}_CloudTrail_${data.aws_region.current.name}*"
]
},
{
"Sid": "AWSCloudTrailPutLogEvents",
"Effect": "Allow",
"Action": [
"logs:PutLogEvents"
],
"Resource": [
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.creds.account_id}:log-group:${aws_cloudwatch_log_group.trail_1_log_group.name}:log-stream:${data.aws_caller_identity.creds.account_id}_CloudTrail_${data.aws_region.current.name}*"
]
}
]
}
POLICY
}
resource "aws_cloudwatch_log_group" "trail_1_log_group" {
name = "${terraform.env}-trail-01-log-group"
}
resource "aws_kms_key" "trail_1_key" {
description = "${terraform.env}-trail-01-key"
deletion_window_in_days = 10
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "Key policy created by CloudTrail",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::${data.aws_caller_identity.creds.account_id}:root"
},
"Action": "kms:*",
"Resource": "*"
},
{
"Sid": "Allow CloudTrail to encrypt logs",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "kms:GenerateDataKey*",
"Resource": "*",
"Condition": {
"StringLike": {
"kms:EncryptionContext:aws:cloudtrail:arn": "arn:aws:cloudtrail:*:${data.aws_caller_identity.creds.account_id}:trail/*"
}
}
},
{
"Sid": "Allow CloudTrail to describe key",
"Effect": "Allow",
"Principal": {
"Service": "cloudtrail.amazonaws.com"
},
"Action": "kms:DescribeKey",
"Resource": "*"
},
{
"Sid": "Allow principals in the account to decrypt log files",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": [
"kms:Decrypt",
"kms:ReEncryptFrom"
],
"Resource": "*",
"Condition": {
"StringEquals": {
"kms:CallerAccount": "${data.aws_caller_identity.creds.account_id}"
},
"StringLike": {
"kms:EncryptionContext:aws:cloudtrail:arn": "arn:aws:cloudtrail:*:${data.aws_caller_identity.creds.account_id}:trail/*"
}
}
},
{
"Sid": "Allow alias creation during setup",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "kms:CreateAlias",
"Resource": "*",
"Condition": {
"StringEquals": {
"kms:ViaService": "ec2.${data.aws_region.current.name}.amazonaws.com",
"kms:CallerAccount": "${data.aws_caller_identity.creds.account_id}"
}
}
}
]
}
POLICY
}
resource "aws_cloudtrail" "trail_1" {
depends_on = ["aws_iam_role_policy.cloud_watch_logs_role_policy"]
name = "${terraform.env}-trail-01"
s3_bucket_name = "${aws_s3_bucket.trail_1_bucket.id}"
include_global_service_events = true
enable_logging = true
is_multi_region_trail = true
enable_log_file_validation = true
cloud_watch_logs_group_arn = "${aws_cloudwatch_log_group.trail_1_log_group.arn}"
cloud_watch_logs_role_arn = "${aws_iam_role.cloud_watch_logs_role.arn}"
kms_key_id = "${aws_kms_key.trail_1_key.arn}"
}
resource "aws_cloudtrail" "trail_2" {
name = "${terraform.env}-trail-02"
s3_bucket_name = "${aws_s3_bucket.trail_1_bucket.id}"
}
output "cloudtrail_trail_1_name" {
value = "${aws_cloudtrail.trail_1.name}"
}
output "cloudtrail_trail_1_arn" {
value = "${aws_cloudtrail.trail_1.arn}"
}
output "cloudtrail_trail_1_s3_bucket_name" {
value = "${aws_s3_bucket.trail_1_bucket.id}"
}
output "cloudtrail_trail_1_key_arn" {
value = "${aws_kms_key.trail_1_key.arn}"
}
output "cloudtrail_trail_1_cloud_watch_logs_group_arn" {
value = "${aws_cloudwatch_log_group.trail_1_log_group.arn}"
}
output "cloudtrail_trail_1_cloud_watch_logs_role_arn" {
value = "${aws_iam_role.cloud_watch_logs_role.arn}"
}
output "cloudtrail_trail_2_s3_bucket_name" {
value = "${aws_s3_bucket.trail_1_bucket.id}"
}
output "cloudtrail_trail_2_name" {
value = "${aws_cloudtrail.trail_2.name}"
}
output "cloudtrail_trail_2_arn" {
value = "${aws_cloudtrail.trail_2.arn}"
}

View file

@ -1,95 +0,0 @@
# Contains resources and outputs related to testing the aws_cloudwatch_* resources.
#======================================================#
# Log Metric Filters
#======================================================#
#----------------------- Recall -----------------------#
# Fixture notes:
# LMF 1 recalled by filter name and log group
# LMF 2 recalled by pattern
resource "aws_cloudwatch_log_metric_filter" "lmf_1" {
name = "${terraform.env}_lmf"
pattern = "testpattern01"
log_group_name = "${aws_cloudwatch_log_group.lmf_lg_1.name}"
metric_transformation {
name = "${terraform.env}_testmetric_1"
namespace = "${terraform.env}_YourNamespace_1"
value = "1"
}
}
output "log_metric_filter_1_name" {
value = "${aws_cloudwatch_log_metric_filter.lmf_1.name}"
}
resource "aws_cloudwatch_log_group" "lmf_lg_1" {
name = "${terraform.env}_lmf_lg_1"
}
output "log_metric_filter_1_log_group_name" {
value = "${aws_cloudwatch_log_group.lmf_lg_1.name}"
}
output "log_metric_filter_1_metric_name" {
value = "${terraform.env}_testmetric_1"
}
resource "aws_cloudwatch_log_metric_filter" "lmf_2" {
name = "${terraform.env}_lmf"
pattern = "${terraform.env}testpattern02"
log_group_name = "${aws_cloudwatch_log_group.lmf_lg_2.name}"
metric_transformation {
name = "${terraform.env}_testmetric_3"
namespace = "${terraform.env}_YourNamespace_3"
value = "1"
}
}
output "log_metric_filter_2_name" {
value = "${aws_cloudwatch_log_metric_filter.lmf_2.name}"
}
resource "aws_cloudwatch_log_group" "lmf_lg_2" {
name = "${terraform.env}_lmf_lg_2"
}
output "log_metric_filter_2_log_group_name" {
value = "${aws_cloudwatch_log_group.lmf_lg_2.name}"
}
output "log_metric_filter_2_pattern" {
value = "${terraform.env}testpattern02"
}
#======================================================#
# Cloudwatch Alarms
#======================================================#
resource "aws_cloudwatch_metric_alarm" "alarm_1" {
alarm_name = "${terraform.env}-test-alarm-01"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "2"
metric_name = "${terraform.env}_testmetric_1"
namespace = "${terraform.env}_YourNamespace_1"
period = "120"
statistic = "Average"
threshold = "80"
alarm_description = "This metric is a test metric"
insufficient_data_actions = []
}
output "cloudwatch_alarm_1_name" {
value = "${terraform.env}-test-alarm-01"
}
output "cloudwatch_alarm_1_namespace" {
value = "${terraform.env}_YourNamespace_1"
}
output "cloudwatch_alarm_1_metric_name" {
value = "${terraform.env}_testmetric_1"
}

View file

@ -1,113 +0,0 @@
#======================================================#
# Configuration Recorder
#======================================================#
resource "aws_config_configuration_recorder" "config_recorder" {
name = "config_recorder"
role_arn = "${aws_iam_role.role_for_config_recorder.arn}"
}
resource "aws_iam_role" "role_for_config_recorder" {
name = "role_for_config_recorder"
assume_role_policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "config.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
POLICY
}
output "role_for_config_recorder_arn" {
value = "${aws_iam_role.role_for_config_recorder.arn}"
}
output "config_recorder_name" {
value = "${aws_config_configuration_recorder.config_recorder.name}"
}
#======================================================#
# Configuration Delivery Channel
#======================================================#
# Note that since AWS accounts can only have one Config Recorder,
# we have to re-use it here (as well as its role).
resource "aws_config_delivery_channel" "delivery_channel_01" {
name = "delivery_channel_01"
s3_bucket_name = "${aws_s3_bucket.bucket_for_delivery_channel.bucket}"
depends_on = ["aws_config_configuration_recorder.config_recorder"]
sns_topic_arn = "${aws_sns_topic.sns_topic_for_delivery_channel.arn}"
snapshot_delivery_properties = {
delivery_frequency = "TwentyFour_Hours"
}
}
output "delivery_channel_01_name" {
value = "${aws_config_delivery_channel.delivery_channel_01.id}"
}
output "config_recorder_for_delivery_channel_role_arn" {
value = "${aws_iam_role.role_for_config_recorder.arn}"
}
#======================================================#
# IAM Roles
#======================================================#
resource "aws_iam_role_policy" "policy_for_delivery_channel" {
name = "policy_for_delivery_channel"
role = "${aws_iam_role.role_for_config_recorder.id}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:*"
],
"Effect": "Allow",
"Resource": [
"${aws_s3_bucket.bucket_for_delivery_channel.arn}",
"${aws_s3_bucket.bucket_for_delivery_channel.arn}/*"
]
}
]
}
POLICY
}
#=================================================================#
# Config S3 Buckets
#=================================================================#
resource "aws_s3_bucket" "bucket_for_delivery_channel" {
bucket = "inspec-bucket-for-delivery-channel-${terraform.env}.chef.io"
acl = "public-read"
force_destroy = true
}
output "s3_bucket_for_delivery_channel_name" {
value = "${aws_s3_bucket.bucket_for_delivery_channel.id}"
}
#===========================================================================#
# SNS Topic
#===========================================================================#
resource "aws_sns_topic" "sns_topic_for_delivery_channel" {
name = "${terraform.env}-sns_topic_for_delivery_channel"
}
output "sns_topic_for_delivery_channel_arn" {
value = "${aws_sns_topic.sns_topic_for_delivery_channel.arn}"
}

View file

@ -1,584 +0,0 @@
# Contains resources and outputs related to testing the aws_ec2_* resources.
#======================================================#
# EC2 Instances
#======================================================#
# Test fixture info:
# instance | OS | has_role? | instance_type
# -----------------------------------------------
# alpha | debian | N | t2.micro
# beta | centos | Y | t2.small
resource "aws_instance" "alpha" {
ami = "${data.aws_ami.debian.id}"
instance_type = "t2.micro"
tags {
Name = "${terraform.env}.alpha"
X-Project = "inspec"
}
depends_on = [ "aws_subnet.subnet_01" ]
}
resource "aws_instance" "beta" {
ami = "${data.aws_ami.centos.id}"
instance_type = "t2.small"
iam_instance_profile = "${aws_iam_instance_profile.profile_for_ec2_with_role.name}"
tags {
Name = "${terraform.env}.beta"
X-Project = "inspec"
}
depends_on = [ "aws_subnet.subnet_01" ]
}
#----------------------- Recall -----------------------#
# Using Alpha for recall
output "ec2_instance_recall_hit_name" {
value = "${aws_instance.alpha.tags.Name}"
}
output "ec2_instance_recall_hit_id" {
value = "${aws_instance.alpha.id}"
}
output "ec2_instance_recall_miss" {
value = "i-06b4bc106e0d03dfd"
}
#----------------- has_role property ------------------#
# No role
output "ec2_instance_no_role_id" {
value = "${aws_instance.alpha.id}"
}
# Has a role
resource "aws_iam_role" "role_for_ec2_with_role" {
name = "${terraform.env}.role_for_ec2_with_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_iam_instance_profile" "profile_for_ec2_with_role" {
name = "${terraform.env}.profile_for_ec2_with_role"
role = "${aws_iam_role.role_for_ec2_with_role.name}"
}
output "ec2_instance_has_role_id" {
value = "${aws_instance.beta.id}"
}
#-------------------- instance_type property -----------------------#
output "ec2_instance_type_t2_micro_id" {
value = "${aws_instance.alpha.id}"
}
output "ec2_instance_type_t2_small_id" {
value = "${aws_instance.beta.id}"
}
#---------------------- image_id property --------------------------#
# Debian
data "aws_ami" "debian" {
most_recent = true
owners = ["679593333241"]
filter {
name = "name"
values = ["debian-jessie-amd64-hvm-*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
}
output "ec2_ami_id_debian" {
value = "${data.aws_ami.debian.id}"
}
output "ec2_instance_debian_id" {
value = "${aws_instance.alpha.id}"
}
# Centos
data "aws_ami" "centos" {
most_recent = true
owners = ["679593333241"]
filter {
name = "name"
values = ["CentOS Linux 7 x86_64 HVM EBS*"]
}
filter {
name = "architecture"
values = ["x86_64"]
}
filter {
name = "root-device-type"
values = ["ebs"]
}
}
output "ec2_ami_id_centos" {
value = "${data.aws_ami.centos.id}"
}
output "ec2_instance_centos_id" {
value = "${aws_instance.beta.id}"
}
#============================================================#
# Security Groups
#============================================================#
# Look up the default VPC and the default security group for it
data "aws_vpc" "default" {
default = "true"
}
data "aws_security_group" "default" {
vpc_id = "${data.aws_vpc.default.id}"
name = "default"
}
output "ec2_security_group_default_vpc_id" {
value = "${data.aws_vpc.default.id}"
}
output "ec2_security_group_default_group_id" {
value = "${data.aws_security_group.default.id}"
}
resource "aws_vpc" "non_default" {
cidr_block = "172.32.0.0/16"
}
output "vpc_default_vpc_id" {
value = "${data.aws_vpc.default.id}"
}
output "vpc_default_cidr_block" {
value = "${data.aws_vpc.default.cidr_block}"
}
output "vpc_default_dhcp_options_id" {
value = "${data.aws_vpc.default.dhcp_options_id}"
}
output "vpc_non_default_vpc_id" {
value = "${aws_vpc.non_default.id}"
}
output "vpc_non_default_cidr_block" {
value = "${aws_vpc.non_default.cidr_block}"
}
output "vpc_non_default_instance_tenancy" {
value = "${aws_vpc.non_default.instance_tenancy}"
}
output "vpc_non_default_dhcp_options_id" {
value = "${aws_vpc.non_default.dhcp_options_id}"
}
# Create a security group with a known description
# in the default VPC
resource "aws_security_group" "alpha" {
name = "${terraform.env}-alpha"
description = "SG alpha"
vpc_id = "${data.aws_vpc.default.id}"
}
output "ec2_security_group_alpha_group_id" {
value = "${aws_security_group.alpha.id}"
}
output "ec2_security_group_alpha_group_name" {
value = "${aws_security_group.alpha.name}"
}
# Create another security group
# in the default VPC
resource "aws_security_group" "beta" {
name = "${terraform.env}-beta"
description = "SG beta"
vpc_id = "${data.aws_vpc.default.id}"
}
output "ec2_security_group_beta_group_id" {
value = "${aws_security_group.beta.id}"
}
output "ec2_security_group_beta_group_name" {
value = "${aws_security_group.beta.name}"
}
# Create third security group
# in the default VPC
resource "aws_security_group" "gamma" {
name = "${terraform.env}-gamma"
description = "SG gamma"
vpc_id = "${data.aws_vpc.default.id}"
}
output "ec2_security_group_gamma_group_id" {
value = "${aws_security_group.gamma.id}"
}
output "ec2_security_group_gamma_group_name" {
value = "${aws_security_group.gamma.name}"
}
# NOTE: AWS (in the console and CLI) creates SGs with a default
# allow all egress. Terraform removes that rule, unless you specify it here.
# Populate SG Alpha with some rules
resource "aws_security_group_rule" "alpha_http_world" {
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.alpha.id}"
}
resource "aws_security_group_rule" "alpha_ssh_in" {
type = "ingress"
from_port = "22"
to_port = "22"
protocol = "tcp"
cidr_blocks = ["10.1.2.0/24"]
security_group_id = "${aws_security_group.alpha.id}"
}
resource "aws_security_group_rule" "alpha_x11" {
description = "Only allow X11 out for some reason"
type = "egress"
from_port = "6000"
to_port = "6007"
protocol = "tcp"
cidr_blocks = ["10.1.2.0/24", "10.3.2.0/24"]
ipv6_cidr_blocks = ["2001:db8::/122"]
security_group_id = "${aws_security_group.alpha.id}"
}
resource "aws_security_group_rule" "alpha_all_ports" {
type = "ingress"
from_port = "0"
to_port = "65535"
protocol = "tcp"
cidr_blocks = ["10.1.2.0/24"]
security_group_id = "${aws_security_group.alpha.id}"
}
resource "aws_security_group_rule" "alpha_piv6_all_ports" {
type = "ingress"
from_port = "0"
to_port = "65535"
protocol = "tcp"
ipv6_cidr_blocks = ["2001:db8::/122"]
security_group_id = "${aws_security_group.alpha.id}"
}
# Populate SG Beta with some rules
resource "aws_security_group_rule" "beta_http_world" {
type = "ingress"
from_port = "80"
to_port = "80"
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
security_group_id = "${aws_security_group.beta.id}"
}
resource "aws_security_group_rule" "beta_ssh_in_alfa" {
type = "ingress"
from_port = "22"
to_port = "22"
protocol = "tcp"
source_security_group_id = "${aws_security_group.alpha.id}"
security_group_id = "${aws_security_group.beta.id}"
}
resource "aws_security_group_rule" "beta_all_ports_in_gamma" {
type = "ingress"
from_port = "0"
to_port = "65535"
protocol = "tcp"
source_security_group_id = "${aws_security_group.gamma.id}"
security_group_id = "${aws_security_group.beta.id}"
}
# Populate SG Gamma with a rule
resource "aws_security_group_rule" "gamma_ssh_in_alfa" {
type = "ingress"
from_port = "22"
to_port = "22"
protocol = "tcp"
source_security_group_id = "${aws_security_group.alpha.id}"
security_group_id = "${aws_security_group.gamma.id}"
}
#============================================================#
# VPC Subnets
#============================================================#
variable "base_cidr" {
default = "172.31.48.0/20"
}
resource "aws_subnet" "subnet_01" {
vpc_id = "${data.aws_vpc.default.id}"
cidr_block = "${cidrsubnet(var.base_cidr, 8, 0)}"
tags {
Name = "${terraform.env}.subnet_01"
}
}
# Re-output any VPC ID for subnet listings
output "subnet_vpc_id" {
# Use the default VPC since it is gaurenteed
value = "${data.aws_vpc.default.id}"
}
output "subnet_01_id" {
value = "${aws_subnet.subnet_01.id}"
}
output "subnet_01_az" {
value = "${aws_subnet.subnet_01.availability_zone}"
}
#============================================================#
# ELB testing
#============================================================#
# Use default VPC - "${data.aws_vpc.default.id}"
# Use two subnets.
# Fixture data:
# ELB alpha
# - single subnet on AZ a
# - not externally facing
# - sends 80 to 8080
# - zero instances
# ELB beta
# - dual subnet on AZ a and c
# - externally facing
# - sends 80 to 80
# - two instances
# A pair of subnets
resource "aws_subnet" "elb_a" {
vpc_id = "${data.aws_vpc.default.id}"
availability_zone = "${data.aws_region.current.name}a"
cidr_block = "${cidrsubnet(var.base_cidr, 8, 1)}"
tags {
Name = "${terraform.env}.elb_a"
}
}
output "elb_subnet_a_id" {
value = "${aws_subnet.elb_a.id}"
}
output "elb_vpc_id" {
value = "${data.aws_vpc.default.id}"
}
resource "aws_subnet" "elb_c" {
vpc_id = "${data.aws_vpc.default.id}"
availability_zone = "${data.aws_region.current.name}c"
cidr_block = "${cidrsubnet(var.base_cidr, 8, 2)}"
tags {
Name = "${terraform.env}.elb_c"
}
}
output "elb_subnet_c_id" {
value = "${aws_subnet.elb_c.id}"
}
# A security group for the ELB so it is accessible via the web
resource "aws_security_group" "elb_world_to_lb" {
vpc_id = "${data.aws_vpc.default.id}"
# HTTP access from anywhere
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
output "elb_security_group_to_lb_id" {
value = "${aws_security_group.elb_world_to_lb.id}"
}
resource "aws_security_group" "elb_lb_to_instances" {
vpc_id = "${data.aws_vpc.default.id}"
# HTTP access from the VPC
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["${data.aws_vpc.default.cidr_block}"]
}
ingress {
from_port = 8080
to_port = 8080
protocol = "tcp"
cidr_blocks = ["${data.aws_vpc.default.cidr_block}"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
output "elb_security_group_to_instances_id" {
value = "${aws_security_group.elb_lb_to_instances.id}"
}
resource "aws_elb" "alpha" {
name = "${terraform.env}-alpha"
subnets = [
"${aws_subnet.elb_a.id}",
]
security_groups = [
"${aws_security_group.elb_world_to_lb.id}",
]
instances = []
listener {
instance_port = 8080
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
output "elb_alpha_name" {
value = "${aws_elb.alpha.name}"
}
output "elb_alpha_dns_name" {
value = "${aws_elb.alpha.dns_name}"
}
resource "aws_elb" "beta" {
name = "${terraform.env}-beta"
subnets = [
"${aws_subnet.elb_a.id}",
"${aws_subnet.elb_c.id}",
]
security_groups = [
"${aws_security_group.elb_world_to_lb.id}",
]
instances = [
"${aws_instance.elb_beta_1.id}",
"${aws_instance.elb_beta_2.id}",
]
listener {
instance_port = 80
instance_protocol = "http"
lb_port = 80
lb_protocol = "http"
}
}
output "elb_beta_name" {
value = "${aws_elb.beta.name}"
}
output "elb_beta_dns_name" {
value = "${aws_elb.beta.dns_name}"
}
resource "aws_instance" "elb_beta_1" {
instance_type = "t2.micro"
ami = "${data.aws_ami.debian.id}"
subnet_id = "${aws_subnet.elb_c.id}"
vpc_security_group_ids = [
"${aws_security_group.elb_lb_to_instances.id}",
]
tags {
Name = "${terraform.env}.elb_beta_1"
}
}
output "elb_beta_instance_1_id" {
value = "${aws_instance.elb_beta_1.id}"
}
resource "aws_instance" "elb_beta_2" {
instance_type = "t2.micro"
ami = "${data.aws_ami.debian.id}"
subnet_id = "${aws_subnet.elb_a.id}"
vpc_security_group_ids = [
"${aws_security_group.elb_lb_to_instances.id}",
]
tags {
Name = "${terraform.env}.elb_beta_2"
}
}
output "elb_beta_instance_2_id" {
value = "${aws_instance.elb_beta_2.id}"
}

View file

@ -1,173 +0,0 @@
# Contains resources and outputs related to testing the aws_eks_cluster resources.
#======================================================#
# EKS variables
#======================================================#
variable "eks_map_accounts" {
description = "Additional AWS account numbers to add to the aws-auth configmap."
type = "list"
default = [
"777777777777",
"888888888888",
]
}
variable "eks_map_roles" {
description = "Additional IAM roles to add to the aws-auth configmap."
type = "list"
default = [
{
role_arn = "arn:aws:iam::66666666666:role/role1"
username = "role1"
group = "system:masters"
},
]
}
variable "eks_map_users" {
description = "Additional IAM users to add to the aws-auth configmap."
type = "list"
default = [
{
user_arn = "arn:aws:iam::66666666666:user/user1"
username = "user1"
group = "system:masters"
},
{
user_arn = "arn:aws:iam::66666666666:user/user2"
username = "user2"
group = "system:masters"
},
]
}
#======================================================#
# EKS Cluster
#======================================================#
locals {
cluster_name = "test-eks-inspec-${terraform.env}"
worker_groups = [
{
instance_type = "t2.small"
additional_userdata = "echo foo bar"
subnets = "${join(",", module.eks_vpc.private_subnets)}"
additional_security_group_ids = "${aws_security_group.eks_worker_group_mgmt_one.id},${aws_security_group.eks_worker_group_mgmt_two.id}"
},
]
tags = {
Environment = "test-eks-${terraform.env}"
}
}
resource "aws_security_group" "eks_worker_group_mgmt_one" {
name_prefix = "eks_worker_group_mgmt_one-${terraform.env}"
description = "SG to be applied to all *nix machines"
vpc_id = "${module.eks_vpc.vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
]
}
}
resource "aws_security_group" "eks_worker_group_mgmt_two" {
name_prefix = "eks_worker_group_mgmt_two-${terraform.env}"
vpc_id = "${module.eks_vpc.vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"192.168.0.0/16",
]
}
}
resource "aws_security_group" "eks_all_worker_mgmt" {
name_prefix = "eks_all_worker_management-${terraform.env}"
vpc_id = "${module.eks_vpc.vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
}
module "eks_vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "1.14.0"
name = "eks-test-vpc"
cidr = "10.0.0.0/16"
azs = ["${data.aws_availability_zones.available.names[0]}", "${data.aws_availability_zones.available.names[1]}", "${data.aws_availability_zones.available.names[2]}"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = []
enable_nat_gateway = false
tags = "${merge(local.tags, map("kubernetes.io/cluster/${local.cluster_name}", "shared"))}"
}
output "eks_vpc_id" {
value = "${module.eks_vpc.vpc_id}"
}
output "eks_vpc_subnets" {
value = "${module.eks_vpc.private_subnets}"
}
module "eks" {
source = "terraform-aws-modules/eks/aws"
version = "1.6.0"
cluster_name = "${local.cluster_name}"
subnets = ["${module.eks_vpc.private_subnets}"]
tags = "${local.tags}"
vpc_id = "${module.eks_vpc.vpc_id}"
worker_groups = "${local.worker_groups}"
worker_group_count = "1"
worker_additional_security_group_ids = ["${aws_security_group.eks_all_worker_mgmt.id}"]
map_roles = "${var.eks_map_roles}"
map_users = "${var.eks_map_users}"
map_accounts = "${var.eks_map_accounts}"
manage_aws_auth = false
}
output "eks_cluster_id" {
value = "${module.eks.cluster_id}"
}
output "eks_cluster_name" {
value = "${module.eks.cluster_id}"
}
output "eks_cluster_security_group_id" {
value = "${module.eks.cluster_security_group_id}"
}
output "eks_worker_security_group_id" {
value = "${module.eks.worker_security_group_id}"
}
output "eks_cluster_endpoint" {
value = "${module.eks.cluster_endpoint}"
}
output "eks_cluster_certificate" {
value = "${module.eks.cluster_certificate_authority_data}"
}

View file

@ -1,82 +0,0 @@
resource "aws_flow_log" "flow_log_alpha_vpc_log" {
log_group_name = "${aws_cloudwatch_log_group.flow_log_alpha_log_group.name}"
iam_role_arn = "${aws_iam_role.flow_log_alpha_role.arn}"
vpc_id = "${data.aws_vpc.default.id}"
traffic_type = "ALL"
}
resource "aws_subnet" "flow_log_alpha_subnet" {
vpc_id = "${data.aws_vpc.default.id}"
cidr_block = "172.31.112.0/24"
}
resource "aws_flow_log" "flow_log_alpha_subnet_log" {
log_group_name = "${aws_cloudwatch_log_group.flow_log_alpha_log_group.name}"
iam_role_arn = "${aws_iam_role.flow_log_alpha_role.arn}"
subnet_id = "${aws_subnet.flow_log_alpha_subnet.id}"
traffic_type = "ALL"
}
resource "aws_cloudwatch_log_group" "flow_log_alpha_log_group" {
name = "flow_log_alpha_log_group"
}
resource "aws_iam_role" "flow_log_alpha_role" {
name = "flow_log_alpha_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "vpc-flow-logs.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_iam_role_policy" "flow_log_alpha_policy" {
name = "flow_log_alpha_policy"
role = "${aws_iam_role.flow_log_alpha_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents",
"logs:DescribeLogGroups",
"logs:DescribeLogStreams"
],
"Effect": "Allow",
"Resource": "*"
}
]
}
EOF
}
output "flow_log_alpha_vpc_log_id" {
value = "${aws_flow_log.flow_log_alpha_vpc_log.id}"
}
output "flow_log_alpha_subnet_log_id" {
value = "${aws_flow_log.flow_log_alpha_subnet_log.id}"
}
output "flow_log_alpha_subnet_id" {
value = "${aws_subnet.flow_log_alpha_subnet.id}"
}
output "flow_log_vpc_id" {
value = "${data.aws_vpc.default.id}"
}

View file

@ -1,422 +0,0 @@
# Contains resources and outputs related to testing the aws_iam_* resources.
variable "login_profile_pgp_key" {
type = "string"
# You may override this using a tfvars file or other approaches.
default = "mQINBFit+9sBEAC7Aj1/IqLBMupJ/ESurbFy/h5Nukxd2c5JmzyIXbEgjnjrZCpFDCZ9fHYsEchzO9e9u+RiqJE78/Rp3PJjQeJnA4fln/XxK8K7U/Vyi9p725blielNsqRr6ERQZlbBb8uPHHd5YKOOSt+fLQuG2n/Ss13W5WKREpMLkzd80Uyl6Yofsguj8YdKvExV5akvi2VrZcHBIhmbjU+R33kDOuNlHGx4fhVHhydegog0nQnB48hRJQgbMPoMlySM666JDW4DmePms56M7IUDHFCH+oMGCGTdcuzo4BQwv6TMS6mZM3QVtnyEI5rVmbfkhc70ChqYbFB8isvmsLTRvJXdhyrXHA+YjiN3yMOq1oE/N85ug3D5tp9+yT7O+hu+vmgZ1oqRamuwExPZsmfwWd4lcTbu8sRMQy6J9H7b3ZPaN/cr0uO8RE5e1u7EhewV2+07glW7nuXY5DqPCvyIHqOINHvIh7uMWbAdYIiy73GMaNP3W3b/HQOXwdFz8N0kxT3AgTw+vJ5kiCzpG6gwJeFZtke2zzd5WDqUSs0uaCwEyR5FkB9H3YwNawZ1n1lzuTFcxVpnjLc6TOsrWtQ5Ccy9MFHOp/mxtnsOc/Le6YmcAK3xJ4FvSrOzyWH1Jc01wHmG1kLWznDW8+xFj+Zki+g/h0XtezVErmlffvqYT8cT1npeuwARAQABtCJpbnNwZWMtYXdzIDxpbnNwZWMtYXdzQGluc3BlYy5jb20+iQI4BBMBAgAiBQJYrfvbAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCbG1xp7O1xwOK4D/4riU9Bs3ZF6e5lO2SzwBS6m+9aFBGkVZGndcMW+k05ksKmyOuYjbyukeHlRxVeVKpbOxJSIKoame+7LNmtlK/0y+kvKN1hkmLas0yZcTlS4V6mJRTR9DXKsIVjlbvQQ3iqHSqZSqg0UbVDjG3PaupWqlBW3pqb1lisDcTWKmltaOigCJsmpiOA23+SEYjTzXzV5wpBGPTFnyhPD+cjh0AZIC0+/u0zA1ycMUFP1d1p+DDQQuhqV5CHMbdExdyScpPnJU7tLoFytiwhVkbgUG11CoVHfFYac0Eome4jW5TFwfrg5leZob6xWUaJrQa+GKB8TVbW7ytQG0s1zQFUIhBdl975ftHAhyy7yerNXW2asgnQ6XiFbWK8RI/pPnktbc9upRb1roegye+Rp79ocmFe0nnzgsE74JFqlPoG4qglicuzcBMpCyRfixfdQIa1uyxOHHUvYhyzAKrEIsSeJfD4t3scypo4j0Kx3eG0ejRszpdVNVLJOHHAMXbgJBhHufQHX+4ZruI8+CqQ3rJsHezJOX3gH8GP0jkmTEj+ZiTE9tyoHSjwHTSIVKaadlLN+XUcvDnAK38UEo2+CxEnbsURe0mJsdvzN7SFw/DnQle4w3L4vqjvsGxM2xc/uqIpXIxmBd8yf8T4J8taZX2DNtN8Tgz2yiWFTjHCG9lzPZmwabkCDQRYrfvbARAAy24tShvJmUCMB+QfnZV9dTjB6ZY9chdvQaeejotQY4cnw8AU8J38niydEeU4QpUWyrNa0WM4mtY/naR1Q216KVvDQTgcWFRuxs7VzyAf4slVRa2H6VdNRUx9m3jCpzoWku3TtXlOV0P9gRb7LWESX6Xp62nO5A/6wYDLLWD1pGWSdetQrTsGKy9F0rHr4WGRGQlvPg4x523LLkIV6+7TmHCUuvi6SY4ZtX2pLZ/cooX/Dw8LHwG7a6d9WIdbBGsU5z4wltc1CjwAY9M4FfDjnL5vp/jhHrmzna/rh2PI4AP16te/YR8s1ybWHacHgjKGN4Wtq/GywcGUxVPIlXaUbCz9uDGt/b19JxptOONcdgjFv1AQkAcrGehNlEsiDkaSqSaqbjWZ2RCICu2HPvxBBBxowJtpu3gDG69tKvuSPbFn2fYxs98X8DQsXIFEb7A5ZJmPgpigRAiPGhBo/llZBw8aGrd1ZCUSreEasQkVkLiXoCOgby16IROFnxhqfD6z8qr08beHgifzBVqwPQ8cUpLEOvX/kqH7vcqSOMI6RanXzrVWiuy0HFVlMHPF5RV7JZBSEr/ZkElducC3LeY6t5X5yViVlIvP+6M4U9iIkuCPdBnt350quKGnZWqhkMoLLFDl7Q++83SSc1/u3iyqzFGzF3VFE2pA6OSpIYFJMFUAEQEAAYkCHwQYAQIACQUCWK372wIbDAAKCRCbG1xp7O1xwMOJD/4iEpEMzMINqTkB7UWJyZxvJ3q353SASPD78TTrh9Yp+dWwSPLgqygxDToPVOTgW1FEli3VY24ibHG6BSA6WTQFD/Gf2Z2AVEdNaIAWLjz5GNG0fSJfLy/W4umPN4RCjd7A4OYoFVLU4Wr042Cb3L6/wQojZF7qiDK9quvySmJgOQHW+/ToxV3BXtm+YSxSOVLNuMr7+FaIcmtrLLYgp38x3ST6jeJGiFQRHDjtc8VoKaIpQZkBqWCQZYk+medoOqAYEBKxNUWOiof04kOJUvNQ6jTimIOpuYVpllRi3CorSavwk68cCtqTS7GDwfky14rL6FYDzhh/POBv2u7WepZ7sFSAg9hhHq+8Gy/e5kNPpVg7vmNsXbcNX9VnGSsg8GEoEnKJ3vLV/hrpGlFkQ87ppOVQ7qQlVFvbodA85xs3OWCevvUQYYqyrmbV1PKdMoXaRZRexY6EHuUSBrtXuprwXuKEa1ELu5LbmzN008BJTKVLlf2jhbGvt9yH2QhPzeFHlLz5r0tc/3cxJx2S0Sz0varCsfN2knOazjxIW/l3RYkXfNF26vF2eaJuCeakeAqPVBnG3b1KPEcwVLSidu44TLfZ4x3DtHE4oZb+OfV4Q/1uUy7qu5QpUwI+JAsJUWbeWhXBOTmMgXfoI1M9ns+yR/IrZtC4+SVN9C0PBGeLMQ=="
}
#======================================================#
# Accoount Password Policy
#======================================================#
# Only one of these is allowed
resource "aws_iam_account_password_policy" "fixture" {
minimum_password_length = 10
require_lowercase_characters = true
require_numbers = true
require_uppercase_characters = true
require_symbols = true
allow_users_to_change_password = true
max_password_age = 365
password_reuse_prevention = 7
}
#======================================================#
# IAM Users
#======================================================#
#----------------------- Recall -----------------------#
resource "aws_iam_user" "recall_hit" {
name = "${terraform.env}.recall_hit"
}
output "iam_user_recall_hit" {
value = "${aws_iam_user.recall_hit.name}"
}
output "iam_user_recall_miss" {
value = "${terraform.env}.recall_miss"
}
#---------- Property - has_console_password -----------#
# TODO: test for no console password user
resource "aws_iam_user" "has_console_password" {
name = "${terraform.env}.has_console_password"
force_destroy = true
}
resource "aws_iam_user_login_profile" "login_profile_for_password_user" {
user = "${aws_iam_user.has_console_password.name}"
pgp_key = "${var.login_profile_pgp_key}"
}
output "iam_user_has_console_password" {
value = "${aws_iam_user.has_console_password.name}"
}
#------------- Property - has_mfa_enabled -------------#
# TODO: manual fixture for positive test of mfa_enabled
# TODO: tests and fixtures for hardware mfa
resource "aws_iam_user" "no_mfa_enabled" {
name = "${terraform.env}.no_mfa_enabled"
}
output "iam_user_no_mfa_enabled" {
value = "${aws_iam_user.no_mfa_enabled.name}"
}
#------------- Property - access_keys -------------#
resource "aws_iam_user" "with_access_key" {
name = "${terraform.env}.with_access_key"
}
resource "aws_iam_access_key" "access_key_for_user" {
user = "${aws_iam_user.with_access_key.name}"
pgp_key = "${var.login_profile_pgp_key}"
}
output "iam_user_with_access_key" {
value = "${aws_iam_user.with_access_key.name}"
}
resource "aws_iam_user" "without_access_key" {
name = "${terraform.env}.without_access_key"
}
output "iam_user_without_access_key" {
value = "${aws_iam_user.without_access_key.name}"
}
#======================================================#
# IAM Access Keys
#======================================================#
output "iam_access_key_recall_hit" {
value = "${aws_iam_access_key.access_key_for_user.id}"
}
output "iam_access_key_recall_miss" {
value = "AKIAFAKEFAKEFAKEFAKE"
}
#======================================================#
# IAM Groups
#======================================================#
resource "aws_iam_group" "administrators" {
name = "${terraform.env}.administrators"
}
output "iam_group_administrators" {
value = "${aws_iam_group.administrators.name}"
}
#======================================================#
# IAM Policies
#======================================================#
# Test fixtures:
# Note: Principal is not allowed on an IAM Policy. (May be allowed on a role? certainly on s3 bucket?)
# alpha
# has 2 statements
# one is a wildcard on ec2
# both have IDs
# one is a resource full wildcard
# one is Allow, one is Deny
# scalar values throughout
# beta
# one statement
# list values for Resource and Action
# gamma
# allow all
resource "aws_iam_policy" "alpha" {
name = "${terraform.env}-alpha"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "alpha01",
"Action": "ec2:Describe*",
"Effect": "Allow",
"Resource": "*"
},
{
"Sid": "alpha02",
"Action": "s3:GetObject",
"Effect": "Deny",
"Resource": "arn:aws:s3:::bobs-stuff"
}
]
}
EOF
}
output "aws_iam_policy_alpha_name" {
value = "${aws_iam_policy.alpha.name}"
}
resource "aws_iam_policy" "beta" {
name = "${terraform.env}-beta"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "beta01",
"Action": [
"ec2:DescribeSubnets",
"ec2:DescribeSecurityGroups"
],
"Effect": "Deny",
"Resource": [
"arn:aws:ec2:::*",
"*"
]
}
]
}
EOF
}
output "aws_iam_policy_beta_name" {
value = "${aws_iam_policy.beta.name}"
}
resource "aws_iam_policy" "gamma" {
name = "${terraform.env}-gamma"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "gamma01",
"Action": "*",
"Effect": "Allow",
"Resource": "*"
}
]
}
EOF
}
output "aws_iam_policy_gamma_name" {
value = "${aws_iam_policy.gamma.name}"
}
#======================================================#
# IAM Group Memberships
#======================================================#
resource "aws_iam_group_membership" "administrators_membership" {
name = "administrators_membership"
users = [
"${aws_iam_user.recall_hit.name}",
]
group = "${aws_iam_group.administrators.name}"
}
#======================================================#
# User Policies - Attached and Inline
#======================================================#
# Naming scheme: 1i_1a means one
# inline policy, one attached policy, etc
#------------------------------------------#
# A user with neither attached nor inline
#------------------------------------------#
output "iam_user_for_policies_0i_0a_name" {
value = "${aws_iam_user.recall_hit.name}"
}
#------------------------------------------#
# A user with one inline and no attached
#------------------------------------------#
resource "aws_iam_user" "for_policies_1i_0a" {
name = "${terraform.env}_user_for_policies_1i_0a"
}
resource "aws_iam_user_policy" "test_1i_0a" {
name = "test_1i_0a_1"
user = "${aws_iam_user.for_policies_1i_0a.name}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "ec2:Describe*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
output "iam_user_for_policies_1i_0a_name" {
value = "${aws_iam_user.for_policies_1i_0a.name}"
}
#------------------------------------------#
# A user with two inlines and no attached
#------------------------------------------#
resource "aws_iam_user" "for_policies_2i_0a" {
name = "${terraform.env}_user_for_policies_2i_0a"
}
resource "aws_iam_user_policy" "test_2i_0a_1" {
name = "test_2i_0a_1"
user = "${aws_iam_user.for_policies_2i_0a.name}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "s3:List*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
resource "aws_iam_user_policy" "test_2i_0a_2" {
name = "test_2i_0a_2"
user = "${aws_iam_user.for_policies_2i_0a.name}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "rds:Describe*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
output "iam_user_for_policies_2i_0a_name" {
value = "${aws_iam_user.for_policies_2i_0a.name}"
}
#------------------------------------------#
# A user with one inline and one attached
#------------------------------------------#
resource "aws_iam_user" "for_policies_1i_1a" {
name = "${terraform.env}_user_for_policies_1i_1a"
}
resource "aws_iam_user_policy" "test_1i_1a" {
name = "test_1i_1a_1"
user = "${aws_iam_user.for_policies_1i_1a.name}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "sns:Describe*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
resource "aws_iam_policy" "user_attached_1i_1a_1" {
name = "${terraform.env}_user_attached_1i_1a_1"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "ec2:Describe*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
resource "aws_iam_user_policy_attachment" "attch_1i_1a_1" {
user = "${aws_iam_user.for_policies_1i_1a.name}"
policy_arn = "${aws_iam_policy.user_attached_1i_1a_1.arn}"
}
output "iam_policy_user_attached_1i_1a_1_arn" {
value = "${aws_iam_policy.user_attached_1i_1a_1.arn}"
}
output "iam_policy_user_attached_1i_1a_1_name" {
value = "${aws_iam_policy.user_attached_1i_1a_1.name}"
}
output "iam_user_for_policies_1i_1a_name" {
value = "${aws_iam_user.for_policies_1i_1a.name}"
}
#------------------------------------------#
# A user with no inline and two attached
#------------------------------------------#
resource "aws_iam_user" "for_policies_0i_2a" {
name = "${terraform.env}_user_for_policies_0i_2a"
}
# We'd like to test with an AWS-Managed policy, but
# the old AWS TF plugin we're using doesn't support the data lookup.
# For now, hardcode until https://github.com/chef/inspec/issues/2884
# data "aws_iam_policy" "user_attached_0i_2a_1" {
# arn = "arn:aws:iam::aws:policy/ReadOnlyAccess"
# }
variable "aws_iam_policy_user_attached_0i_2a_1_arn" {
default = "arn:aws:iam::aws:policy/ReadOnlyAccess"
}
variable "aws_iam_policy_user_attached_0i_2a_1_name" {
default = "ReadOnlyAccess"
}
resource "aws_iam_policy" "user_attached_0i_2a_2" {
name = "${terraform.env}_user_attached_0i_2a_2"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{ "Action": "ec2:Describe*", "Effect": "Allow", "Resource": "*" }
]
}
EOF
}
resource "aws_iam_user_policy_attachment" "attch_0i_2a_1" {
user = "${aws_iam_user.for_policies_0i_2a.name}"
policy_arn = "${var.aws_iam_policy_user_attached_0i_2a_1_arn}"
}
resource "aws_iam_user_policy_attachment" "attch_0i_2a_2" {
user = "${aws_iam_user.for_policies_0i_2a.name}"
policy_arn = "${aws_iam_policy.user_attached_0i_2a_2.arn}"
}
output "iam_policy_user_attached_0i_2a_1_arn" {
value = "${var.aws_iam_policy_user_attached_0i_2a_1_arn}"
}
output "iam_policy_user_attached_0i_2a_1_name" {
value = "${var.aws_iam_policy_user_attached_0i_2a_1_name}"
}
output "iam_policy_user_attached_0i_2a_2_arn" {
value = "${aws_iam_policy.user_attached_0i_2a_2.arn}"
}
output "iam_policy_user_attached_0i_2a_2_name" {
value = "${aws_iam_policy.user_attached_0i_2a_2.name}"
}
output "iam_user_for_policies_0i_2a_name" {
value = "${aws_iam_user.for_policies_0i_2a.name}"
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.3 KiB

View file

@ -1,43 +0,0 @@
#===========================================================================#
# KMS Key
#===========================================================================#
resource "aws_kms_key" "kms_key_1" {
description = "${terraform.env}-kms_key_1"
deletion_window_in_days = 10
key_usage = "ENCRYPT_DECRYPT"
is_enabled = true
enable_key_rotation = true
}
resource "aws_kms_key" "kms_key_2" {
description = "${terraform.env}-kms_key_2"
deletion_window_in_days = 10
key_usage = "ENCRYPT_DECRYPT"
is_enabled = false
enable_key_rotation = false
}
output "kms_key_recall_hit_arn" {
value = "${aws_kms_key.kms_key_1.arn}"
}
output "kms_key_enabled_key_id" {
value = "${aws_kms_key.kms_key_1.key_id}"
}
output "kms_key_disabled_key_id" {
value = "${aws_kms_key.kms_key_2.key_id}"
}
output "kms_key_enabled_key_description" {
value = "${terraform.env}-kms_key_1"
}
output "kms_key_enabled_arn" {
value = "${aws_kms_key.kms_key_1.arn}"
}
output "kms_key_disabled_arn" {
value = "${aws_kms_key.kms_key_2.arn}"
}

View file

@ -1,26 +0,0 @@
# Contains resources and outputs related to testing the aws_rds_* resources.
#======================================================#
# RDS Instances
#======================================================#
resource "aws_db_instance" "default" {
allocated_storage = 20
storage_type = "gp2"
engine = "mysql"
engine_version = "5.6.37"
instance_class = "db.t2.micro"
identifier = "test-instance-id"
name = "test_instance"
username = "testuser"
password = "testpassword"
parameter_group_name = "default.mysql5.6"
skip_final_snapshot = true
depends_on = [ "aws_subnet.subnet_01" ]
}
output "rds_db_instance_id" {
description = "The RDS instance ID"
value = "${aws_db_instance.default.id}"
}

View file

@ -1,44 +0,0 @@
#===========================================================================#
# Route Tables
#===========================================================================#
data "aws_internet_gateway" "default" {
filter {
name = "attachment.vpc-id"
values = ["${data.aws_vpc.default.id}"]
}
}
resource "aws_route_table" "rtb" {
vpc_id = "${data.aws_vpc.default.id}"
route {
cidr_block = "172.32.1.0/24"
gateway_id = "${data.aws_internet_gateway.default.id}"
}
}
resource "aws_route_table" "rtb2" {
vpc_id = "${data.aws_vpc.default.id}"
route {
cidr_block = "172.32.1.0/24"
gateway_id = "${data.aws_internet_gateway.default.id}"
}
}
output "route_table_1_id" {
value = "${aws_route_table.rtb.id}"
}
output "route_table_2_id" {
value = "${aws_route_table.rtb2.id}"
}
output "route_table_1_propagating_vgws" {
value = "${aws_route_table.rtb.propagating_vgws}"
}
output "route_table_1_vpc_id" {
value = "${aws_route_table.rtb.vpc_id}"
}

View file

@ -1,194 +0,0 @@
#=================================================================#
# S3 Bucket
#=================================================================#
resource "aws_s3_bucket" "public" {
bucket = "inspec-testing-public-${terraform.env}.chef.io"
acl = "public-read"
}
output "s3_bucket_public_name" {
value = "${aws_s3_bucket.public.id}"
}
output "s3_bucket_public_region" {
value = "${aws_s3_bucket.public.region}"
}
resource "aws_s3_bucket" "private" {
bucket = "inspec-testing-private-${terraform.env}.chef.io"
acl = "private"
}
output "s3_bucket_private_name" {
value = "${aws_s3_bucket.private.id}"
}
resource "aws_s3_bucket" "public_bucket_for_objects" {
bucket = "inspec-testing-public-objects-${terraform.env}.chef.io"
acl = "public-read"
}
output "s3_bucket_public_for_objects" {
value = "${aws_s3_bucket.public_bucket_for_objects.id}"
}
resource "aws_s3_bucket" "auth" {
bucket = "inspec-testing-auth-${terraform.env}.chef.io"
acl = "authenticated-read"
}
output "s3_bucket_auth_name" {
value = "${aws_s3_bucket.auth.id}"
}
resource "aws_s3_bucket" "private_acl_public_policy" {
bucket = "inspec-testing-mixed-01-${terraform.env}.chef.io"
acl = "private"
}
output "s3_bucket_private_acl_public_policy_name" {
value = "${aws_s3_bucket.private_acl_public_policy.id}"
}
resource "aws_s3_bucket" "log_bucket" {
bucket = "inspec-testing-log-bucket-${terraform.env}.chef.io"
force_destroy = true
acl = "log-delivery-write"
}
output "s3_bucket_log_bucket_name" {
value = "${aws_s3_bucket.log_bucket.id}"
}
resource "aws_s3_bucket" "acess_logging_enabled" {
bucket = "inspec-testing-acess-logging-enabled-${terraform.env}.chef.io"
acl = "private"
logging {
target_bucket = "${aws_s3_bucket.log_bucket.id}"
target_prefix = "log/"
}
}
output "s3_bucket_access_logging_enabled_name" {
value = "${aws_s3_bucket.acess_logging_enabled.id}"
}
resource "aws_s3_bucket" "acess_logging_not_enabled" {
bucket = "inspec-testing-acess-logging-not-enabled-${terraform.env}.chef.io"
acl = "private"
}
output "s3_bucket_access_logging_not_enabled_name" {
value = "${aws_s3_bucket.acess_logging_not_enabled.id}"
}
resource "aws_s3_bucket" "default_encryption_enabled" {
bucket = "inspec-testing-defencrypt-enabled-${terraform.env}.chef.io"
acl = "private"
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "aws:kms"
}
}
}
}
output "s3_bucket_default_encryption_enabled_name" {
value = "${aws_s3_bucket.default_encryption_enabled.id}"
}
resource "aws_s3_bucket" "default_encryption_not_enabled" {
bucket = "inspec-testing-defencrypt-not-enabled-${terraform.env}.chef.io"
acl = "private"
}
output "s3_bucket_default_encryption_not_enabled_name" {
value = "${aws_s3_bucket.default_encryption_not_enabled.id}"
}
#=================================================================#
# S3 Bucket Policies
#=================================================================#
resource "aws_s3_bucket_policy" "allow" {
bucket = "${aws_s3_bucket.public.id}"
policy =<<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::${aws_s3_bucket.public.id}/*"
}
]
}
POLICY
}
resource "aws_s3_bucket_policy" "deny" {
bucket = "${aws_s3_bucket.private.id}"
policy =<<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "DenyGetObject",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::${aws_s3_bucket.private.id}/*"
}
]
}
POLICY
}
resource "aws_s3_bucket_policy" "allow-02" {
bucket = "${aws_s3_bucket.private_acl_public_policy.id}"
policy =<<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowGetObject",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::${aws_s3_bucket.private_acl_public_policy.id}/*"
}
]
}
POLICY
}
#=================================================================#
# S3 Bucket Object
#=================================================================#
resource "aws_s3_bucket_object" "inspec_logo_public" {
bucket = "${aws_s3_bucket.public_bucket_for_objects.id}"
key = "inspec-logo-public"
source = "inspec-logo.png"
acl = "public-read"
}
resource "aws_s3_bucket_object" "inspec_logo_private" {
bucket = "${aws_s3_bucket.public_bucket_for_objects.id}"
key = "inspec-logo-private"
source = "inspec-logo.png"
acl = "private"
}
output "s3_bucket_object_inspec_logo_public" {
value = "${aws_s3_bucket_object.inspec_logo_public.id}"
}
output "s3_bucket_object_inspec_logo_private" {
value = "${aws_s3_bucket_object.inspec_logo_private.id}"
}

View file

@ -1,63 +0,0 @@
#===========================================================================#
# SNS Topic
#===========================================================================#
# Test fixture:
# sns_topic_1 has one confirmed subscription
# sns_topic_2 has no subscriptions
resource "aws_sns_topic" "sns_test_topic_1" {
name = "${terraform.env}-test-topic-01"
}
output "sns_topic_recall_hit_arn" {
value = "${aws_sns_topic.sns_test_topic_1.arn}"
}
resource "aws_sqs_queue" "sqs_test_queue_1" {
name = "${terraform.env}-test-queue-01"
}
resource "aws_sns_topic_subscription" "sqs_test_queue_01_sub" {
topic_arn = "${aws_sns_topic.sns_test_topic_1.arn}"
protocol = "sqs"
endpoint = "${aws_sqs_queue.sqs_test_queue_1.arn}"
}
output "sns_topic_with_subscription_arn" {
value = "${aws_sns_topic.sns_test_topic_1.arn}"
}
resource "aws_sns_topic" "sns_test_topic_2" {
name = "${terraform.env}-test-topic-02"
}
output "sns_topic_no_subscription_arn" {
value = "${aws_sns_topic.sns_test_topic_2.arn}"
}
resource "aws_sns_topic" "topic_for_sub_03" {
name = "${terraform.env}-topic_for_sub_3_test"
}
resource "aws_sqs_queue" "sqs_for_sub_03" {
name = "${terraform.env}-sqs_for_sub_03"
}
resource "aws_sns_topic_subscription" "subscription_3" {
topic_arn = "${aws_sns_topic.topic_for_sub_03.arn}"
protocol = "sqs"
endpoint = "${aws_sqs_queue.sqs_for_sub_03.arn}"
}
output "sns_subscription_03_arn" {
value = "${aws_sns_topic_subscription.subscription_3.arn}"
}
output "sns_topic_3_arn" {
value = "${aws_sns_topic.topic_for_sub_03.arn}"
}
output "sqs_for_sub_03_arn" {
value = "${aws_sqs_queue.sqs_for_sub_03.arn}"
}

View file

@ -1,33 +0,0 @@
#===========================================================================#
# SQS QUeue
#===========================================================================#
# Test fixture:
# sqs_queue_1 is a non-fifo queue
# sqs_queue_2 is a fifo queue
resource "aws_sqs_queue" "sqs_queue_1" {
name = "sqs_queue_1"
delay_seconds = 0
max_message_size = 262144 # 256 KB
message_retention_seconds = 345600 # 4 days
receive_wait_time_seconds = 2
visibility_timeout_seconds = 300 # 5 minutes
}
output "sqs_queue_1_url" {
value = "${aws_sqs_queue.sqs_queue_1.id}"
}
resource "aws_sqs_queue" "sqs_queue_2" {
name = "sqs_queue_2.fifo"
fifo_queue = true
content_based_deduplication = true
}
output "sqs_queue_2_url" {
value = "${aws_sqs_queue.sqs_queue_2.id}"
}

View file

@ -1,71 +0,0 @@
fixtures = {}
[
'cloudtrail_trail_1_cloud_watch_logs_group_arn',
'cloudtrail_trail_1_cloud_watch_logs_role_arn',
'cloudtrail_trail_1_key_arn',
'cloudtrail_trail_1_name',
'cloudtrail_trail_1_arn',
'cloudtrail_trail_1_s3_bucket_name',
'cloudtrail_trail_2_name',
'cloudtrail_trail_2_arn',
'cloudtrail_trail_2_s3_bucket_name'
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/cloudtrail.tf',
)
end
control "aws_cloudtrail_trail recall" do
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_1_name']) do
it { should exist}
end
describe aws_cloudtrail_trail(trail_name: fixtures['cloudtrail_trail_2_name']) do
it { should exist }
end
describe aws_cloudtrail_trail('non-existent-trail') do
it { should_not exist }
end
end
control "aws_cloudtrail_trail properties" do
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_1_name']) do
its('s3_bucket_name') { should eq fixtures['cloudtrail_trail_1_s3_bucket_name'] }
its('trail_arn') { should eq fixtures['cloudtrail_trail_1_arn'] }
its('cloud_watch_logs_role_arn') { should eq fixtures['cloudtrail_trail_1_cloud_watch_logs_role_arn'] }
its('cloud_watch_logs_log_group_arn') { should eq fixtures['cloudtrail_trail_1_cloud_watch_logs_group_arn']}
its('kms_key_id') { should eq fixtures['cloudtrail_trail_1_key_arn'] }
its('delivered_logs_days_ago') { should eq 0 }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_2_name']) do
its('s3_bucket_name') { should eq fixtures['cloudtrail_trail_2_s3_bucket_name'] }
its('trail_arn') { should eq fixtures['cloudtrail_trail_2_arn'] }
its('cloud_watch_logs_role_arn') { should be_nil }
its('cloud_watch_logs_log_group_arn') { should be_nil}
its('kms_key_id') { should be_nil }
end
end
control "aws_cloudtrail_trail matchers" do
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_1_name']) do
it { should be_encrypted }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_2_name']) do
it { should_not be_encrypted }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_1_name']) do
it { should be_multi_region_trail }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_2_name']) do
it { should_not be_multi_region_trail }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_1_name']) do
it { should be_log_file_validation_enabled }
end
describe aws_cloudtrail_trail(fixtures['cloudtrail_trail_2_name']) do
it { should_not be_log_file_validation_enabled }
end
end

View file

@ -1,5 +0,0 @@
control "aws_cloudtrail_trails recall" do
describe aws_cloudtrail_trails do
it { should exist }
end
end

View file

@ -1,29 +0,0 @@
fixtures = {}
[
'cloudwatch_alarm_1_name',
'cloudwatch_alarm_1_metric_name',
'cloudwatch_alarm_1_namespace',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/cloudwatch.tf',
)
end
control 'aws_cloudwatch_alarm recall' do
describe aws_cloudwatch_alarm(
metric_name: fixtures['cloudwatch_alarm_1_metric_name'],
metric_namespace: fixtures['cloudwatch_alarm_1_namespace'],
) do
it { should exist }
end
describe aws_cloudwatch_alarm(
metric_name: 'NopeNope',
metric_namespace: 'Nope',
) do
it { should_not exist }
end
end

View file

@ -1,74 +0,0 @@
fixtures = {}
[
'log_metric_filter_1_name',
'log_metric_filter_1_log_group_name',
'log_metric_filter_1_metric_name',
'log_metric_filter_2_name',
'log_metric_filter_2_log_group_name',
'log_metric_filter_2_pattern',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/cloudwatch.tf',
)
end
#----------------------- Recall -----------------------#
control "aws_cloudwatch_log_metric_filter recall" do
describe aws_cloudwatch_log_metric_filter(
filter_name: fixtures['log_metric_filter_1_name'],
log_group_name: fixtures['log_metric_filter_1_log_group_name'],
) do
it { should exist }
end
describe aws_cloudwatch_log_metric_filter(
pattern: fixtures['log_metric_filter_2_pattern'],
) do
it { should exist }
end
end
#----------------------- pattern property -----------------------#
control "aws_cloudwatch_log_metric_filter pattern property" do
describe aws_cloudwatch_log_metric_filter(
filter_name: fixtures['log_metric_filter_1_name'],
log_group_name: fixtures['log_metric_filter_1_log_group_name'],
) do
its('pattern') { should cmp 'testpattern01' }
end
end
#----------------------- metric_name property -----------------------#
control "aws_cloudwatch_log_metric_filter metric_name property" do
describe aws_cloudwatch_log_metric_filter(
filter_name: fixtures['log_metric_filter_1_name'],
log_group_name: fixtures['log_metric_filter_1_log_group_name'],
) do
its('metric_name') { should cmp fixtures['log_metric_filter_1_metric_name'] }
end
end
#----------------------- log_group_name property -----------------------#
control "aws_cloudwatch_log_metric_filter log_group_name property" do
describe aws_cloudwatch_log_metric_filter(
filter_name: fixtures['log_metric_filter_2_name'],
log_group_name: fixtures['log_metric_filter_2_log_group_name'],
) do
its('log_group_name') { should cmp fixtures['log_metric_filter_2_log_group_name'] }
end
end
#----------------------- filter_name property -----------------------#
control "aws_cloudwatch_log_metric_filter filter_name property" do
describe aws_cloudwatch_log_metric_filter(
filter_name: fixtures['log_metric_filter_2_name'],
log_group_name: fixtures['log_metric_filter_2_log_group_name'],
) do
its('filter_name') { should cmp fixtures['log_metric_filter_2_name'] }
end
end

View file

@ -1,59 +0,0 @@
fixtures = {}
[
'delivery_channel_01_name',
'config_recorder_for_delivery_channel_role_arn',
's3_bucket_for_delivery_channel_name',
'delivery_channel_01_bucket_prefix',
'sns_topic_for_delivery_channel_arn'
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
#======================================================#
# aws_config_delivery_channel - Singular
#======================================================#
#------------------- Recall / Miss -------------------#
control "aws_config_delivery_channel recall" do
# Test default singleton return
describe aws_config_delivery_channel do
it { should exist }
end
# Test scalar param
describe aws_config_delivery_channel(fixtures['delivery_channel_01_name']) do
it { should exist }
end
# Test hash parameter
describe aws_config_delivery_channel(channel_name: fixtures['delivery_channel_01_name']) do
it { should exist }
end
# Test recorder that doesn't exist
describe aws_config_delivery_channel(channel_name: 'NonExistentChannel') do
it { should_not exist }
end
end
#------------------- Properties -------------------#
control "aws_config_delivery_channel properties" do
describe aws_config_delivery_channel(fixtures['delivery_channel_01_name']) do
its('channel_name') { should eq fixtures['delivery_channel_01_name'] }
its('s3_bucket_name') { should eq fixtures['s3_bucket_for_delivery_channel_name'] }
its('s3_key_prefix') { should eq nil }
its('sns_topic_arn') { should eq fixtures['sns_topic_for_delivery_channel_arn'] }
its('delivery_frequency_in_hours') { should eq 24 }
its('delivery_frequency_in_hours') { should be > 3 }
end
describe aws_config_delivery_channel do
its('channel_name') { should eq fixtures['delivery_channel_01_name'] }
end
end

View file

@ -1,62 +0,0 @@
fixtures = {}
[
'role_for_config_recorder_arn',
'config_recorder_name',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/config.tf',
)
end
#======================================================#
# aws_config_recorder - Singular
#======================================================#
#------------------- Recall / Miss -------------------#
control "aws_config_recorder recall" do
# Get the singleton if you don't pass a name
describe aws_config_recorder do
it { should exist }
end
# Test scalar param
describe aws_config_recorder(fixtures['config_recorder_name']) do
it { should exist }
end
# Test hash parameter
describe aws_config_recorder(recorder_name: fixtures['config_recorder_name']) do
it { should exist }
end
# Test recorder that doesnt exist
describe aws_config_recorder(recorder_name: 'NonExistentRecorder') do
it { should_not exist }
end
end
#------------------- Properties -------------------#
control "aws_config_recorder properties" do
describe aws_config_recorder do
its('recorder_name') { should eq fixtures['config_recorder_name'] }
end
describe aws_config_recorder(fixtures['config_recorder_name']) do
its('recorder_name') { should eq fixtures['config_recorder_name'] }
its('role_arn') { should eq fixtures['role_for_config_recorder_arn'] }
its('resource_types') { should eq [] }
end
end
#------------------- Matchers -------------------#
control "aws_config_recorder matchers" do
describe aws_config_recorder(fixtures['config_recorder_name']) do
it { should_not be_recording }
it { should be_recording_all_resource_types }
it { should_not be_recording_all_global_types }
end
end

View file

@ -1,77 +0,0 @@
fixtures = {}
[
'ec2_instance_recall_hit_id',
'ec2_instance_recall_hit_name',
'ec2_instance_recall_miss',
'ec2_instance_no_role_id',
'ec2_instance_has_role_id',
'ec2_instance_type_t2_micro_id',
'ec2_instance_type_t2_small_id',
'ec2_instance_centos_id',
'ec2_ami_id_centos',
'ec2_instance_debian_id',
'ec2_ami_id_debian',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/ec2.tf',
)
end
#------------------- Recall / Miss -------------------#
control "aws_ec_instance - Recall" do
describe aws_ec2_instance(fixtures['ec2_instance_recall_miss']) do
it { should_not exist }
end
# Recall by ID
describe aws_ec2_instance(fixtures['ec2_instance_recall_hit_id']) do
it { should exist }
end
# Recall by Name tag
describe aws_ec2_instance(name: fixtures['ec2_instance_recall_hit_name']) do
it { should exist }
end
end
# TODO: Most properties are untested. Some to consider including:
# security_groups
# state
# vpc_id
# tags
#----------------- has_role property ------------------#
control "aws_ec2_instance - has_role property" do
describe aws_ec2_instance(fixtures['ec2_instance_has_role_id']) do
it { should have_roles } # TODO: this is a misnomer, you may have only one role attached
end
describe aws_ec2_instance(fixtures['ec2_instance_no_role_id']) do
it { should_not have_roles } # TODO: this is a misnomer, you may have only one role attached
end
end
#----------------- instance_type property ------------------#
control "aws_ec2_instance - instance_type property" do
describe aws_ec2_instance(fixtures['ec2_instance_type_t2_micro_id']) do
its('instance_type') { should eq 't2.micro' }
end
describe aws_ec2_instance(fixtures['ec2_instance_type_t2_small_id']) do
its('instance_type') { should eq 't2.small' }
end
end
#-------------------- image_id property --------------------#
control "aws_ec2_instance - image_id property" do
describe aws_ec2_instance(fixtures['ec2_instance_centos_id']) do
its('image_id') { should eq fixtures['ec2_ami_id_centos'] }
end
describe aws_ec2_instance(fixtures['ec2_instance_debian_id']) do
its('image_id') { should eq fixtures['ec2_ami_id_debian'] }
end
end

View file

@ -1,30 +0,0 @@
fixtures = {}
[
'ec2_instance_recall_miss',
'ec2_instance_centos_id',
'ec2_instance_debian_id',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/ec2.tf',
)
end
#------------------- Recall / Miss -------------------#
control "aws_ec_instances - Recall" do
describe aws_ec2_instances do
it { should exist }
end
end
#------------------- Property instance_ids -------------#
control "aws_ec_instances - instance_ids property" do
describe aws_ec2_instances do
its('instance_ids') { should_not be_empty }
its('instance_ids') { should include fixtures['ec2_instance_centos_id'] }
its('instance_ids') { should include fixtures['ec2_instance_debian_id'] }
its('instance_ids') { should_not include fixtures['ec2_instance_recall_miss'] }
its('instance_ids') { should_not include nil }
end
end

View file

@ -1,45 +0,0 @@
fixtures = {}
[
'eks_cluster_id',
'eks_cluster_name',
'eks_cluster_security_group_id',
'eks_vpc_subnets',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/eks.tf',
)
end
control "aws_eks_cluster recall" do
describe aws_eks_cluster(fixtures['eks_cluster_id']) do
it { should exist }
end
describe aws_eks_cluster('i-dont-exist') do
it { should_not exist }
end
end
control "aws_eks_cluster properties" do
describe aws_eks_cluster(fixtures['eks_cluster_id']) do
its('name') { should eq fixtures['eks_cluster_name'] }
its('status') { should be_in %w(ACTIVE CREATING) }
its('subnets_count') { should eq 3 }
its('security_groups_count') { should eq 1 }
fixtures['eks_vpc_subnets'].each do |subnet|
its('subnet_ids') { should include (subnet) }
end
end
end
control "aws_eks_cluster matchers" do
describe aws_eks_cluster(fixtures['eks_cluster_id']) do
it { should exist }
it { should be_active }
end
end

View file

@ -1,86 +0,0 @@
fixtures = {}
[
'aws_region',
'elb_subnet_a_id',
'elb_subnet_c_id',
'elb_alpha_name',
'elb_alpha_dns_name',
'elb_beta_name',
'elb_beta_dns_name',
'elb_beta_instance_1_id',
'elb_beta_instance_2_id',
'elb_security_group_to_instances_id',
'elb_security_group_to_lb_id',
'elb_vpc_id',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/ec2.tf',
)
end
#------------------- Search / Recall -------------------#
control "aws_elb - search" do
describe aws_elb(fixtures['elb_alpha_name']) do
it { should exist }
end
describe aws_elb(elb_name: fixtures['elb_beta_name']) do
it { should exist }
end
describe aws_elb('nonesuch') do
it { should_not exist }
end
end
# #------------------- Properties -------------#
control "aws_elb properties" do
describe aws_elb(fixtures['elb_alpha_name']) do
its('availability_zones') { should include fixtures['aws_region'] + 'a' }
its('availability_zones.count') { should cmp 1 }
its('dns_name') { should match /elb\.amazonaws\.com$/ }
its('external_ports') { should include 80 }
its('external_ports.count') { should cmp 1 }
its('instance_ids') { should be_empty }
its('internal_ports') { should include 8080 }
its('internal_ports.count') { should cmp 1 }
its('security_group_ids') { should include fixtures['elb_security_group_to_lb_id']}
its('security_group_ids.count') { should cmp 1 }
its('subnet_ids') { should include fixtures['elb_subnet_a_id']}
its('subnet_ids.count') { should cmp 1 }
its('vpc_id') { should cmp fixtures['elb_vpc_id']}
end
describe aws_elb(fixtures['elb_beta_name']) do
its('availability_zones') { should include fixtures['aws_region'] + 'a' }
its('availability_zones') { should include fixtures['aws_region'] + 'c' }
its('availability_zones.count') { should cmp 2 }
its('dns_name') { should match /elb\.amazonaws\.com$/ }
its('external_ports') { should include 80 }
its('external_ports.count') { should cmp 1 }
its('instance_ids') { should include fixtures['elb_beta_instance_1_id']}
its('instance_ids') { should include fixtures['elb_beta_instance_2_id']}
its('instance_ids.count') { should cmp 2 }
its('internal_ports') { should include 80 }
its('internal_ports.count') { should cmp 1 }
its('security_group_ids') { should include fixtures['elb_security_group_to_lb_id']}
its('security_group_ids.count') { should cmp 1 }
its('subnet_ids') { should include fixtures['elb_subnet_a_id']}
its('subnet_ids') { should include fixtures['elb_subnet_c_id']}
its('subnet_ids.count') { should cmp 2 }
its('vpc_id') { should cmp fixtures['elb_vpc_id']}
end
describe aws_elb('nonesuch') do
its('availability_zones') { should be_empty }
its('dns_name') { should be_nil }
its('external_ports') { should be_empty }
its('instance_ids') { should be_empty }
its('internal_ports') { should be_empty }
its('security_group_ids') { should be_empty }
its('subnet_ids') { should be_empty }
its('vpc_id') { should be_nil }
end
end

View file

@ -1,128 +0,0 @@
fixtures = {}
[
'aws_region',
'elb_subnet_a_id',
'elb_subnet_c_id',
'elb_alpha_name',
'elb_alpha_dns_name',
'elb_beta_name',
'elb_beta_dns_name',
'elb_beta_instance_1_id',
'elb_beta_instance_2_id',
'elb_security_group_to_instances_id',
'elb_security_group_to_lb_id',
'elb_vpc_id',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/ec2.tf',
)
end
#------------------- Recall / Miss -------------------#
control "aws_elbs - Recall" do
describe aws_elbs do
it { should exist }
end
describe aws_elbs.where(elb_name: 'nonesuch') do
it { should_not exist }
end
end
#------------------- Filtering -------------------#
control "aws_elbs - filtering" do
elbs = aws_elbs
# Alpha is in a only
# Beta is in a and c
region = fixtures['aws_region']
describe elbs.where { availability_zones.include? region + 'a' } do
its('elb_names') { should include fixtures['elb_alpha_name']}
its('elb_names') { should include fixtures['elb_beta_name']}
end
describe elbs.where { availability_zones.include? region + 'c' } do
its('elb_names') { should_not include fixtures['elb_alpha_name']}
its('elb_names') { should include fixtures['elb_beta_name']}
end
describe elbs.where(dns_name: /com$/) do
its('count') { should cmp 2 }
end
# Both listen on 80
describe elbs.where { external_ports.include? 80 } do
its('count') { should cmp 2 }
end
# Alpha has no instances, beta has two
describe elbs.where { instance_ids.count == 0 } do
its('count') { should cmp 1 }
its('elb_names') { should include fixtures['elb_alpha_name'] }
end
describe elbs.where { instance_ids.count == 2 } do
its('count') { should cmp 1 }
its('elb_names') { should include fixtures['elb_beta_name'] }
its('instance_ids') { should include fixtures['elb_beta_instance_1_id'] }
its('instance_ids') { should include fixtures['elb_beta_instance_2_id'] }
its('instance_ids.count') { should cmp 2 }
end
# Alpha uses 8080, beta uses 80
describe elbs.where { internal_ports.include? 8080 } do
its('count') { should cmp 1 }
its('elb_names') { should include fixtures['elb_alpha_name'] }
end
# Both have the same
describe elbs.where { security_group_ids.count == 1 } do
its('count') { should cmp 2 }
its('elb_names') { should include fixtures['elb_alpha_name'] }
its('elb_names') { should include fixtures['elb_beta_name'] }
end
# Alpha is in A only
# Beta is in A and C
describe elbs.where { subnet_ids.include? fixtures['elb_subnet_a_id']} do
its('count') { should cmp 2 }
its('elb_names') { should include fixtures['elb_alpha_name'] }
its('elb_names') { should include fixtures['elb_beta_name'] }
end
describe elbs.where { subnet_ids.include? fixtures['elb_subnet_c_id']} do
its('count') { should cmp 1 }
its('elb_names') { should_not include fixtures['elb_alpha_name'] }
its('elb_names') { should include fixtures['elb_beta_name'] }
end
describe elbs.where(vpc_id: fixtures['elb_vpc_id']) do
its('count') { should cmp 2 }
its('elb_names') { should include fixtures['elb_alpha_name'] }
its('elb_names') { should include fixtures['elb_beta_name'] }
end
end
# #------------------- Properties -------------#
control "aws_elbs properties" do
describe aws_elbs do
its('availability_zones') { should include fixtures['aws_region'] + 'a' }
its('availability_zones') { should include fixtures['aws_region'] + 'c' }
its('availability_zones.count') { should cmp 2 }
its('dns_names') { should include(a_string_ending_with('elb.amazonaws.com'))}
its('dns_names.count') { should cmp 2 }
its('external_ports') { should include 80 }
its('external_ports.count') { should cmp 1 }
its('instance_ids') { should include fixtures['elb_beta_instance_1_id']}
its('instance_ids') { should include fixtures['elb_beta_instance_2_id']}
its('instance_ids.count') { should cmp 2 }
its('internal_ports') { should include 80 }
its('internal_ports') { should include 8080 }
its('internal_ports.count') { should cmp 2 }
its('security_group_ids') { should include fixtures['elb_security_group_to_lb_id']}
its('security_group_ids.count') { should cmp 1 }
its('subnet_ids') { should include fixtures['elb_subnet_a_id']}
its('subnet_ids') { should include fixtures['elb_subnet_c_id']}
its('subnet_ids.count') { should cmp 2 }
its('vpc_ids') { should include fixtures['elb_vpc_id']}
its('vpc_ids.count') { should cmp 1 }
end
end

View file

@ -1,49 +0,0 @@
fixtures = {}
%w[flow_log_alpha_vpc_log_id flow_log_alpha_subnet_log_id
flow_log_alpha_subnet_id flow_log_vpc_id].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/flow_log.tf',
)
end
control 'aws_flow_log exists' do
describe aws_flow_log(fixtures['flow_log_alpha_vpc_log_id']) do
it { should exist}
end
end
control 'aws_flow_log should not exist' do
describe aws_flow_log(flow_log_id: 'fl-1122aabb') do
it { should_not exist }
end
end
control 'aws_flow_log search by flow_log_id exists' do
describe aws_flow_log(flow_log_id: fixtures['flow_log_alpha_vpc_log_id']) do
it { should exist }
it { should be_attached_to_vpc }
its('flow_log_id') { should cmp fixtures['flow_log_alpha_vpc_log_id'] }
its('log_group_name') { should cmp 'flow_log_alpha_log_group' }
its('resource_id') { should cmp fixtures['flow_log_vpc_id'] }
its('resource_type') { should cmp 'vpc' }
end
end
control 'aws_flow_log search by subnet exists' do
describe aws_flow_log(subnet_id: fixtures['flow_log_alpha_subnet_id']) do
it { should exist }
it { should be_attached_to_subnet }
its('flow_log_id') { should cmp fixtures['flow_log_alpha_subnet_log_id'] }
its('log_group_name') { should cmp 'flow_log_alpha_log_group' }
its('resource_id') { should cmp fixtures['flow_log_alpha_subnet_id'] }
its('resource_type') { should cmp 'subnet' }
end
end
control 'aws_flow_log search by vpc exists' do
describe aws_flow_log(vpc_id: fixtures['flow_log_vpc_id']) do
it { should exist }
end
end

View file

@ -1,43 +0,0 @@
fixtures = {}
[
'iam_user_with_access_key',
'iam_user_without_access_key',
'iam_user_recall_miss',
'iam_access_key_recall_hit',
'iam_access_key_recall_miss',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
#======================================================#
# IAM Access Key - Singular
#======================================================#
#------------------- Recall / Miss -------------------#
control "aws_iam_access_key recall" do
# Neither user nor access key ID exist
describe aws_iam_access_key(username: fixtures['iam_user_recall_miss'], id: fixtures['iam_access_key_recall_miss']) do
it { should_not exist }
end
# User exists but has no keys
describe aws_iam_access_key(username: fixtures['iam_user_without_access_key'], id: fixtures['iam_access_key_recall_miss']) do
it { should_not exist }
end
# User exists and has an access key
describe aws_iam_access_key(username: fixtures['iam_user_with_access_key'], id: fixtures['iam_access_key_recall_hit']) do
it { should exist }
end
end
#------------- Property - create_date -------------#
# TODO: create_date tests
#------------- Property - last_used_date -------------#
# TODO: last_used_date tests

View file

@ -1,58 +0,0 @@
fixtures = {}
[
'iam_user_with_access_key',
'iam_user_without_access_key',
'iam_access_key_recall_hit',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
#======================================================#
# IAM Access Key - Plural
#======================================================#
control 'IAM Access Keys - fetch all' do
describe aws_iam_access_keys do
it { should exist }
end
end
control 'IAM Access Keys - Client-side filtering' do
all_keys = aws_iam_access_keys
describe all_keys.where(username: fixtures['iam_user_with_access_key']) do
its('entries.length') { should be 1 }
its('access_key_ids.first') { should eq fixtures['iam_access_key_recall_hit'] }
end
describe all_keys.where(created_days_ago: 0) do
it { should exist }
end
describe all_keys.where { active } do
it { should exist }
end
# This would presumably refer to your test-user-last-key-use IAM user
# This test will fail if you have very recently setup your
# testing environment
describe all_keys.where { ever_used }
.where { last_used_days_ago > 0 } do
it { should exist }
end
describe all_keys.where { created_with_user } do
it { should exist }
end
end
control 'IAM Access Keys - fetch-time filtering' do
describe aws_iam_access_keys(username: fixtures['iam_user_with_access_key']) do
its('entries.length') { should be 1 }
its('access_key_ids.first') { should eq fixtures['iam_access_key_recall_hit'] }
end
describe aws_iam_access_keys(username: fixtures['iam_user_without_access_key']) do
it { should_not exist }
end
end

View file

@ -1,27 +0,0 @@
fixtures = {}
[
'iam_group_administrators',
'iam_user_recall_hit'
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
control "aws_iam_group recall" do
describe aws_iam_group(fixtures['iam_group_administrators']) do
it { should exist }
end
describe aws_iam_group('fakegroup') do
it { should_not exist }
end
end
control "aws_iam_group properties test" do
describe aws_iam_group(fixtures['iam_group_administrators']) do
its('users') { should include fixtures['iam_user_recall_hit'] }
end
end

View file

@ -1,31 +0,0 @@
fixtures = {}
[
'iam_group_administrators',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
control "aws_iam_groups search" do
describe aws_iam_groups do
it { should exist }
end
describe aws_iam_groups.where(group_name: fixtures['iam_group_administrators']) do
it { should exist }
its('count') { should cmp 1 }
end
describe aws_iam_groups.where(group_name: /fakegroup/) do
it { should_not exist }
end
end
control "aws_iam_groups properties test" do
describe aws_iam_groups do
its('group_names') { should include fixtures['iam_group_administrators'] }
end
end

View file

@ -1,33 +0,0 @@
# There are other tests in the "minimal" test account.
#---------------------- Recall ------------------------#
# Password policy is a per-account singleton. If it's been configured, it exists.
control "aws_iam_password_policy existence" do
describe aws_iam_password_policy do
it { should exist }
end
end
#------------- Properties -------------#
control "aws_iam_password_policy properties" do
describe aws_iam_password_policy do
its('max_password_age_in_days') { should cmp 365 }
its('number_of_passwords_to_remember') { should cmp 7 }
end
end
#------------- Matchers - Positive Case -------------#
control "aws_iam_password_policy matchers" do
describe aws_iam_password_policy do
it { should require_lowercase_characters }
it { should require_uppercase_characters }
it { should require_numbers }
it { should require_symbols }
it { should allow_users_to_change_passwords }
it { should expire_passwords }
it { should prevent_password_reuse }
end
end

View file

@ -1,5 +0,0 @@
control "aws_iam_policies recall" do
describe aws_iam_policies do
it { should exist }
end
end

View file

@ -1,110 +0,0 @@
fixtures = {}
[
'aws_iam_policy_alpha_name',
'aws_iam_policy_beta_name',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
control "aws_iam_policy recall" do
describe aws_iam_policy("AWSSupportAccess") do
it { should exist }
end
describe aws_iam_policy(policy_name: "AWSSupportAccess") do
it { should exist }
end
describe aws_iam_policy(fixtures['aws_iam_policy_alpha_name']) do
it { should exist }
end
end
control "aws_iam_policy properties" do
describe aws_iam_policy("AdministratorAccess") do
its('arn') { should cmp "arn:aws:iam::aws:policy/AdministratorAccess" }
its('default_version_id') { should cmp 'v1' }
its('attachment_count') { should cmp 1 }
its('attached_users') { should include "test-fixture-maker" }
its('attached_groups') { should be_empty }
its('attached_roles') { should be_empty }
end
describe aws_iam_policy(fixtures['aws_iam_policy_alpha_name']) do
its('statement_count') { should cmp 2 }
its('policy') { should be_kind_of(Hash) }
end
end
control "aws_iam_policy matchers" do
describe aws_iam_policy("AdministratorAccess") do
it { should be_attached }
end
describe aws_iam_policy("AdministratorAccess") do
it { should be_attached_to_user("test-fixture-maker") }
end
describe aws_iam_policy(fixtures['aws_iam_policy_alpha_name']) do
it { should have_statement('Resource' => '*')}
it { should have_statement('Resource' => '*', 'Sid' => 'alpha01')}
it { should have_statement('Resource' => 'arn:aws:s3:::bobs-stuff', 'Sid' => 'alpha02')}
it { should have_statement('Effect' => 'Allow', 'Sid' => 'alpha01')}
it { should have_statement('Effect' => 'Deny', 'Sid' => 'alpha02')}
it { should have_statement('Action' => 'ec2:Describe*', 'Sid' => 'alpha01')}
it { should_not have_statement('Action' => 'ec2:Describe')}
it { should have_statement('Action' => /^ec2:Describe\*$/, 'Sid' => 'alpha01')}
it { should have_statement('Action' => /^ec2:.+$/, 'Sid' => 'alpha01')}
it { should have_statement('Action' => 'ec2:Describe*', 'Resource' => '*', 'Effect' => 'Allow') }
it { should_not have_statement('Action' => 'ec2:Describe*', 'Resource' => 'arn:aws:s3:::bobs-stuff') }
end
describe aws_iam_policy(fixtures['aws_iam_policy_beta_name']) do
it { should have_statement('Action' => 'ec2:DescribeSubnets')}
it { should have_statement('Action' => 'ec2:DescribeSecurityGroups')}
# Array indicates all must match
it { should_not have_statement('Action' => ['ec2:DescribeSecurityGroups'])}
it { should have_statement('Action' => ['ec2:DescribeSubnets', 'ec2:DescribeSecurityGroups'])}
it { should have_statement('Action' => ['ec2:DescribeSecurityGroups', 'ec2:DescribeSubnets'])}
it { should have_statement('Resource' => 'arn:aws:ec2:::*')}
it { should have_statement('Resource' => '*')}
it { should_not have_statement('Resource' => ['*'])}
it { should have_statement('Resource' => ['arn:aws:ec2:::*', '*'])}
it { should have_statement('Resource' => ['*', 'arn:aws:ec2:::*'])}
end
# AWSCertificateManagerReadOnly has an odd shape:
# its Statement list is not an array, but a hash - it's a degenerate form.
# {
# "Version": "2012-10-17",
# "Statement": {
# "Effect": "Allow",
# "Action": [
# "acm:DescribeCertificate",
# "acm:ListCertificates",
# "acm:GetCertificate",
# "acm:ListTagsForCertificate"
# ],
# "Resource": "*"
# }
# }
describe aws_iam_policy('AWSCertificateManagerReadOnly') do
its('statement_count') { should cmp 1 }
it { should have_statement 'Effect' => 'Allow', 'Action' => 'acm:GetCertificate' }
end
# This policy has a statment with a NotAction, and no Action
# We don't yet support NotAction
# But if you ask for action, it should not match, and also not explode
# arn:aws:iam::aws:policy/PowerUserAccess
describe aws_iam_policy('PowerUserAccess') do
it { should_not have_statement 'Action' => 'iam:*' }
end
end

View file

@ -1,6 +0,0 @@
control 'AWS IAM Role search for default AWS role' do
# This should exist because we created an RDS
describe aws_iam_role('AWSServiceRoleForRDS') do
it { should exist }
end
end

View file

@ -1,35 +0,0 @@
fixtures = {}
[
'aws_account_id',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
#------------- Property - has_mfa_enabled -------------#
# Negative test in 'minimal' test set. See TESTING_AGAINST_AWS.md
# for fixture instructions.
control "aws_iam_root_user has_mfa_enabled property" do
describe aws_iam_root_user do
it { should have_mfa_enabled }
end
end
#---------- Property - has_virtual_mfa_enabled ----------#
# Negative test in 'minimal' test set.
control "aws_iam_root_user has_virtual_mfa_enabled property" do
describe aws_iam_root_user do
it { should have_virtual_mfa_enabled }
end
end
#------------- Property - has_access_key -------------#
# Positive test in 'minimal' test set
control "aws_iam_root_user has_access_key property" do
describe aws_iam_root_user do
it { should_not have_access_key }
end
end

View file

@ -1,111 +0,0 @@
fixtures = {}
[
'iam_user_recall_hit',
'iam_user_recall_miss',
'iam_user_no_mfa_enabled',
'iam_user_has_console_password',
'iam_user_with_access_key',
# Naming scheme: 1i_1a means one
# inline policy, one attached policy, etc
'iam_user_for_policies_0i_0a_name',
'iam_user_for_policies_1i_0a_name',
'iam_user_for_policies_2i_0a_name',
'iam_user_for_policies_1i_1a_name',
'iam_user_for_policies_0i_2a_name',
'iam_policy_user_attached_1i_1a_1_arn',
'iam_policy_user_attached_1i_1a_1_name',
'iam_policy_user_attached_0i_2a_1_arn',
'iam_policy_user_attached_0i_2a_1_name',
'iam_policy_user_attached_0i_2a_2_arn',
'iam_policy_user_attached_0i_2a_2_name',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
#------------------- Recall / Miss -------------------#
control "aws_iam_user recall" do
describe aws_iam_user(username: fixtures['iam_user_recall_hit']) do
it { should exist }
end
describe aws_iam_user(username: fixtures['iam_user_recall_miss']) do
it { should_not exist }
end
end
control "aws_iam_user properties" do
#------------- Property - has_mfa_enabled -------------#
# TODO: fixture and test for has_mfa_enabled
describe aws_iam_user(username: fixtures['iam_user_no_mfa_enabled']) do
it { should_not have_mfa_enabled }
it { should_not have_console_password } # TODO: this is working by accident, we should have a dedicated fixture
end
#---------- Property - has_console_password -----------#
describe aws_iam_user(username: fixtures['iam_user_has_console_password']) do
it { should have_console_password }
end
#------------- Property - access_keys -------------#
aws_iam_user(username: fixtures['iam_user_with_access_key']).access_keys.each { |access_key|
describe access_key.access_key_id do
subject { access_key }
its('status') { should eq 'Active' }
end
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Inline and Attached Policies
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
[
[0,0],
[1,0],
[2,0],
[1,1],
[0,2],
].each do |combo_as_ints|
inline_count, attached_count = *combo_as_ints
combo_as_string = "#{inline_count}i_#{attached_count}a"
describe aws_iam_user(fixtures["iam_user_for_policies_#{combo_as_string}_name"]) do
# Check inline has flag
if inline_count > 0
it { should have_inline_policies }
else
it { should_not have_inline_policies }
end
# Check inline count
its('inline_policy_names.count') { should eq inline_count }
# Check for expected inline name(s)
(1..inline_count).each do |idx|
its('inline_policy_names') { should include "test_#{combo_as_string}_#{idx}"}
end
# Check attached has flag
if attached_count > 0
it { should have_attached_policies }
else
it { should_not have_attached_policies }
end
# Check attached count
its('attached_policy_names.count') { should eq attached_count }
# Check for expected attached name(s) and arn(s)
(1..attached_count).each do |idx|
its('attached_policy_arns') { should include fixtures["iam_policy_user_attached_#{combo_as_string}_#{idx}_arn"] }
its('attached_policy_names') { should include fixtures["iam_policy_user_attached_#{combo_as_string}_#{idx}_name"] }
end
end
end
end

View file

@ -1,55 +0,0 @@
fixtures = {}
[
# Naming scheme: 1i_1a means one
# inline policy, one attached policy, etc
'iam_user_for_policies_0i_0a_name',
'iam_user_for_policies_1i_0a_name',
'iam_user_for_policies_2i_0a_name',
'iam_user_for_policies_1i_1a_name',
'iam_user_for_policies_0i_2a_name',
'iam_policy_user_attached_1i_1a_1_arn',
'iam_policy_user_attached_1i_1a_1_name',
'iam_policy_user_attached_0i_2a_1_arn',
'iam_policy_user_attached_0i_2a_1_name',
'iam_policy_user_attached_0i_2a_2_arn',
'iam_policy_user_attached_0i_2a_2_name',
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/iam.tf',
)
end
control "aws_iam_users filtering" do
describe aws_iam_users.where(has_console_password?: true)
.where(has_mfa_enabled?: false) do
it { should exist }
end
describe aws_iam_users.where { user_name =~ /user_for_policies/ }
.where(has_inline_policies: true) do
its('entries.count') { should eq 3 }
its('usernames') { should include fixtures['iam_user_for_policies_1i_0a_name'] }
its('usernames') { should include fixtures['iam_user_for_policies_1i_1a_name'] }
its('usernames') { should_not include fixtures['iam_user_for_policies_0i_2a_name'] }
its('inline_policy_names.count') { should eq 4 }
its('inline_policy_names') { should include 'test_1i_0a_1' }
its('attached_policy_names.count') { should eq 1 }
end
describe aws_iam_users.where { user_name =~ /user_for_policies/ }
.where(has_attached_policies: true) do
# We have 2 users from our fixtures
its('entries.count') { should eq 2 }
its('usernames') { should include fixtures['iam_user_for_policies_0i_2a_name'] }
its('usernames') { should include fixtures['iam_user_for_policies_1i_1a_name'] }
its('usernames') { should_not include fixtures['iam_user_for_policies_1i_0a_name'] }
its('attached_policy_names.count') { should eq 3 }
its('attached_policy_names') { should include fixtures['iam_policy_user_attached_1i_1a_1_name'] }
its('attached_policy_arns.count') { should eq 3 }
its('attached_policy_arns') { should include fixtures['iam_policy_user_attached_1i_1a_1_arn'] }
end
end

View file

@ -1,51 +0,0 @@
fixtures = {}
[
'kms_key_enabled_arn',
'kms_key_enabled_key_id',
'kms_key_disabled_key_id',
'kms_key_enabled_key_description'
].each do |fixture_name|
fixtures[fixture_name] = input(
fixture_name,
default: "default.#{fixture_name}",
description: 'See ../build/kms.tf',
)
end
control "aws_kms_key recall" do
describe aws_kms_key(fixtures['kms_key_enabled_key_id']) do
it { should exist}
end
describe aws_kms_key(key_id: fixtures['kms_key_enabled_key_id']) do
it { should exist }
end
describe aws_kms_key('non-existent-key') do
it { should_not exist }
end
end
control "aws_kms_key properties" do
describe aws_kms_key(fixtures['kms_key_enabled_key_id']) do
its('key_id') { should eq fixtures['kms_key_enabled_key_id'] }
its('arn') { should eq fixtures['kms_key_enabled_arn'] }
its('description') { should eq fixtures['kms_key_enabled_key_description'] }
its('created_days_ago') { should eq 0 }
its('key_usage') { should eq 'ENCRYPT_DECRYPT' }
its('key_state') { should eq 'Enabled' }
end
end
control "aws_kms_key matchers" do
describe aws_kms_key(fixtures['kms_key_enabled_key_id']) do
it { should be_enabled }
it { should_not be_external }
it { should_not be_managed_by_aws }
it { should_not have_key_expiration }
end
describe aws_kms_key(fixtures['kms_key_enabled_key_id']) do
it { should have_rotation_enabled }
end
describe aws_kms_key(fixtures['kms_key_disabled_key_id']) do
it { should_not have_rotation_enabled }
end
end

Some files were not shown because too many files have changed in this diff Show more