HistoryPurge: Clearing 219 old commits

This commit is contained in:
xpk 2024-10-24 23:09:21 +08:00
commit d08b7cac59
Signed by: xpk
GPG Key ID: CD4FF6793F09AB86
348 changed files with 376141 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
.git

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "aws/aws-inventory"]
path = aws/aws-inventory
url = https://xpk.headdesk.me/git/xpk/aws-inventory.git

12
LICENSE Normal file
View File

@ -0,0 +1,12 @@
BSD Zero Clause License
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.

5
README.md Normal file
View File

@ -0,0 +1,5 @@
# General dumping of code
URL: https://xpk.headdesk.me/git/xpk/dumps
## Test
Testing commit after migrating to kubernetes

15
aws-ssm/ssm-test.yaml Normal file
View File

@ -0,0 +1,15 @@
---
schemaVersion: "2.2"
description: "SSM runCommand test"
mainSteps:
- action: "aws:runShellScript"
name: "Test"
inputs:
runCommand:
-
#!/usr/bin/env bash
/bin/touch /tmp/ssm-test
precondition:
StringEquals:
- "platformType"
- "Linux"

464
aws/AwsEnvReview.py Executable file
View File

@ -0,0 +1,464 @@
#!/usr/bin/python3
"""
Review AWS environment based on 6 WAR pillars, namely:
1. Operational Excellence
2. Security
3. Reliability
4. Performance Efficiency
5. Cost Optimization
6. Sustainability
"""
import boto3
import botocore
import jmespath
import re
from pprint import pprint
from datetime import date
def printTitle(title):
print("\n")
print("=" * len(title))
print(title.upper())
print("=" * len(title))
return
def printSubTitle(title):
print("\n" + title + "\n")
return
def getAllRegions(myclient):
return jmespath.search("Regions[*].RegionName", myclient.describe_regions(AllRegions=False))
def getAgeFromDate(inputDate):
today = date.today()
delta = today - inputDate.date()
return delta.days
def printResult(content: list, header: str):
header = "Index, " + header
if len(content) <= 0:
print("👍 No issue found.")
else:
print(header)
print("-" * len(header))
for count, row in enumerate(content):
print(count+1, *row, sep=", ")
return
sts = boto3.client("sts")
aid = sts.get_caller_identity().get("Account")
client = boto3.client('ec2', region_name="us-east-1")
regions = getAllRegions(client)
print("AWS Environment Review - " + str(date.today()) + "\n\n")
printTitle("Ec2 service review")
printSubTitle("[Cost Optimization] Instances stopped for over 14 days - Consider backing up and terminate instances "
"or use AutoScalingGroup to spin up and down instances as needed.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if i[0].get("State").get("Name") == "stopped":
outTable.append([r, aid, i[0].get("InstanceId"), getAgeFromDate(i[0].get("UsageOperationUpdateTime"))])
printResult(outTable, "Region, AccountID, InstanceId, DaysStopped")
printSubTitle("[Security] Insecure IDMSv1 allowed - Consider requiring IDMSv2. For more information, "
"see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if i[0].get("MetadataOptions").get("HttpTokens") == "optional":
outTable.append([r, aid, i[0].get("InstanceId"), i[0].get("MetadataOptions").get("HttpTokens") ])
printResult(outTable, "Region, AccountID, InstanceId, IDMSv2")
printSubTitle("[Sustainability] Use of previous generation instance type - "
"Consider using current generation instances")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if re.search("^(t1|t2|m3|m1|m2|m4|c1|c2|c3|c4|r3|r4|i2)", i[0].get("InstanceType")) is not None:
outTable.append([r, aid, i[0].get("InstanceId"), i[0].get("InstanceType")])
printResult(outTable, "Region, AccountID, InstanceId, InstanceType")
printSubTitle("[Cost Optimization] Unattached EBS volumes - Consider backing up the volumes and delete them")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_volumes(
Filters=[
{
'Name': 'status',
'Values': ['available']
}
]
)
for i in response.get("Volumes"):
outTable.append([r, aid, i.get("VolumeId"), i.get("Size"), i.get("VolumeType")])
printResult(outTable, "Region, AccountID, VolumeId, Size, VolumeType")
printSubTitle("[Cost Optimization] EBS snapshots more than 365 days old - "
"Consider removing snapshots if no longer needed")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_snapshots(
OwnerIds=[aid]
)
for i in response.get("Snapshots"):
if getAgeFromDate(i.get("StartTime")) > 365 and i.get("Description") != "This snapshot is created by the AWS Backup service.":
outTable.append([r, aid, i.get("SnapshotId"), i.get("Description")[:70], getAgeFromDate(i.get("StartTime"))])
printResult(outTable, "Region, AccountID, SnapshotId, Description, SnapshotAge")
printSubTitle("[Security] Unencrypted EBS volumes - Consider replacing volume with encrypted ones. "
"One can do so by stopping the Ec2 instance, creating snapshot for the unencrypted volume, "
"copy the snapshot to a new encrypted snapshot, create a volume from the encrypted snapshot,"
"detach the original volume and attach the encrypted volume. Remember to clean up the volumes"
"and snapshots afterwards.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_volumes(
Filters=[
{
'Name': 'encrypted',
'Values': ['false']
},
{
'Name': 'status',
'Values': ['in-use']
}
]
)
for i in response.get("Volumes"):
outTable.append([r, aid, i.get("VolumeId"), i.get("Size"), i.get("VolumeType")])
printResult(outTable, "Region, AccountID, VolumeId, Size, VolumeType")
printSubTitle("[Cost Optimization] Unused Elastic IP - Consider deleting unused EIP")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_addresses()
for i in response.get("Addresses"):
if i.get("AssociationId") is None:
outTable.append([r, aid, i.get("PublicIp")])
printResult(outTable, "Region, AccountID, PublicIp")
printTitle("Security group review")
printSubTitle("[Security] Security group rules allowing ingress from 0.0.0.0/0 - Consider setting more restrictive rules "
"allowing access from specific sources.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_security_group_rules()
for sgr in jmespath.search("SecurityGroupRules[?IsEgress==`false`]", response):
if (not sgr.get("IsEgress")
and sgr.get("CidrIpv4") == "0.0.0.0/0"
and sgr.get("FromPort") != 443
and sgr.get("ToPort") != 443
and sgr.get("FromPort") != 80
and sgr.get("ToPort") != 80):
outTable.append([r, aid, sgr.get("GroupId"), sgr.get("SecurityGroupRuleId"), sgr.get("FromPort"), sgr.get("ToPort")])
printResult(outTable, "Region, AccountID, SecurityGroup, Rule, FromPort, ToPort")
printTitle("Rds service review")
printSubTitle("[Security] Unencrypted RDS instances - Consider encrypting RDS instances. For more detail, see "
"https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/encrypt-an-existing-amazon-rds-for-postgresql-db-instance.html")
outTable = []
for r in regions:
client = boto3.client('rds', region_name=r)
response = client.describe_db_instances()
for i in response.get("DBInstances"):
if i.get("StorageEncrypted") == "False":
outTable.append([r, aid, i.get("DBInstanceIdentifier"), i.get("Engine")])
response = client.describe_db_clusters()
for i in response.get("DBClusters"):
if i.get("StorageEncrypted") == "False":
outTable.append([r, aid, i.get("DBClusterIdentifier"), i.get("Engine")])
printResult(outTable, "Region, AccountID, DBIdentifier, Engine")
printSubTitle("[Reliability] RDS instance running in single availability zone - "
"Consider enabling multi-az for production use.")
outTable = []
for r in regions:
client = boto3.client('rds', region_name=r)
response = client.describe_db_instances()
for i in response.get("DBInstances"):
if not i.get("MultiAZ"):
outTable.append([r, aid, i.get("DBInstanceIdentifier"), i.get("Engine")])
response = client.describe_db_clusters()
for i in response.get("DBClusters"):
if not i.get("MultiAZ"):
outTable.append([r, aid, i.get("DBClusterIdentifier"), i.get("Engine")])
printResult(outTable, "Region, AccountID, DBIdentifier, Engine")
printTitle("Lambda service review")
printSubTitle("[Security] Outdated Lambda runtime - Consider changing to currently supported Lambda runtime versions, "
"listed on https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html")
outTable = []
for r in regions:
client = boto3.client('lambda', region_name=r)
response = client.list_functions()
for i in response.get("Functions"):
if i.get("Runtime") is not None:
if re.search("python2|python3.[678]|java8|nodejs[468]|nodejs1[024]|dotnet6", i.get("Runtime")) is not None:
outTable.append([r, aid, i.get("FunctionName"), i.get("Runtime")])
printResult(outTable, "Region, AccountID, FunctionName, Runtime")
printTitle("Iam service review")
printSubTitle("[Security] Iam user access key not rotated for 180 days - Consider rotating access key")
outTable = []
client = boto3.client('iam', region_name="us-east-1")
listUsers = client.list_users()
users = jmespath.search("Users[*].UserName", listUsers)
for u in users:
response = client.list_access_keys(UserName=u)
for i in response.get("AccessKeyMetadata"):
if getAgeFromDate(i.get("CreateDate")) > 180:
outTable.append([aid, u, i.get("AccessKeyId"), getAgeFromDate(i.get("CreateDate"))])
printResult(outTable, "AccountID, UserName, AccessKeyId, AccessKeyAge")
printSubTitle("[Security] Iam AdministratorAccess policy attached - Consider granting minimum privileges "
"to users/groups/roles. AWS managed policies for job functions are recommended. See "
"https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html")
outTable = []
client = boto3.client('iam', region_name="us-east-1")
entityResp = client.list_entities_for_policy(
PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess'
)
for group in jmespath.search("PolicyGroups[*].GroupName", entityResp):
outTable.append([aid, "Group", group])
for user in jmespath.search("PolicyUsers[*].UserName", entityResp):
outTable.append([aid, "User", user])
for role in jmespath.search("PolicyRoles[*].RoleName", entityResp):
outTable.append([aid, "Role", role])
printResult(outTable, "AccountID, Type, Name")
printTitle("Cloudwatch service review")
printSubTitle("[Cost Optimization] Cloudwatch LogGroups without retention period - Consider setting retention")
outTable = []
for r in regions:
client = boto3.client('logs', region_name=r)
response = client.describe_log_groups()
for i in response.get("logGroups"):
if i.get("retentionInDays") is None:
outTable.append([r, aid, i.get("logGroupName"), int(round(i.get("storedBytes")/1024/1024,0))])
printResult(outTable, "Region, AccountID, LogGroup, SizeMiB")
printSubTitle("[Security] Cloudwatch LogGroups unencrypted - Consider encrypting LogGroups")
outTable = []
for r in regions:
client = boto3.client('logs', region_name=r)
response = client.describe_log_groups()
for i in response.get("logGroups"):
if i.get("kmsKeyId") is None:
outTable.append([r, aid, i.get("logGroupName")])
printResult(outTable, "Region, AccountID, LogGroup")
printTitle("Backup service review")
printSubTitle("[Reliability] Ec2/Rds instances found but AWSBackup plan missing - "
"Consider setting up AWSBackup plans to backup AWS resources.")
outTable = []
for r in regions:
client = boto3.client('backup', region_name=r)
response = client.list_backup_plans()
if len(response.get("BackupPlansList")) <= 0:
ec2client = boto3.client("ec2", region_name=r)
ec2resp = ec2client.describe_instances()
ec2instances = jmespath.search("Reservations[*].Instances[*]", ec2resp)
rdsclient = boto3.client("rds", region_name=r)
rdsresp = rdsclient.describe_db_instances()
rdsinstances = rdsresp.get("DBInstances")
instanceCount = len(ec2instances) + len(rdsinstances)
if instanceCount >= 1:
outTable.append([r, aid, "AWSBackup plan missing", instanceCount])
printResult(outTable, "Region, AccountID, BackupPlan, Ec2RdsInstances")
printTitle("S3 service review")
printSubTitle("[Security] S3 bucket policy missing - Consider creating bucket policy and restrict access to bucket")
outTable = []
client = boto3.client('s3', region_name="us-east-1")
response = client.list_buckets()
for i in jmespath.search("Buckets[*].Name", response):
try:
policyResp = client.get_bucket_policy(Bucket=i)
except:
outTable.append([aid, i])
printResult(outTable, "AccountID, BucketName")
printTitle("ElastiCache review")
printSubTitle("[Sustainability] ElastiCache instances on x64 platform - Consider Graviton instances "
"such as t4g/r7g to optimize your infrastructure investment.")
outTable = []
for r in regions:
client = boto3.client('elasticache', region_name=r)
response = client.describe_cache_clusters()
for i in response.get("CacheClusters"):
if re.search("[0-9]g.", i.get("CacheNodeType")) is None:
outTable.append([r, aid, i.get("CacheClusterId"), i.get("CacheNodeType")])
printResult(outTable, "Region, AccountID, CacheClusterId, CacheNodeType")
printTitle("LoadBalancer service review")
printSubTitle("[Cost Optimization] LB Target group without targets - Consider removing empty target groups")
outTable = []
for r in regions:
client = boto3.client('elbv2', region_name=r)
response = client.describe_target_groups()
for i in response.get("TargetGroups"):
tgResp = client.describe_target_health(TargetGroupArn=i.get("TargetGroupArn"))
if len(jmespath.search("TargetHealthDescriptions[*].Target", tgResp)) == 0:
outTable.append([r, aid, i.get("TargetGroupName")])
printResult(outTable, "Region, AccountID, TargetGroup")
printTitle("KMS service review")
printSubTitle("[Security] Customer Managed Keys do not have auto rotation enabled - "
"Consider enabling auto key rotation. When a key is rotated, previous ones "
"are still kept within AWS to allow data retrival.")
outTable = []
for r in regions:
client = boto3.client('kms', region_name=r)
response = client.list_keys()
for i in jmespath.search("Keys[*].KeyId", response):
try:
keyResp = client.describe_key(KeyId=i)
if (keyResp.get("KeyMetadata").get("Enabled") == "True"
and keyResp.get("KeyMetadata").get("KeyManager") == "CUSTOMER"):
krResp = client.get_key_rotation_status(KeyId=i)
if krResp.get("KeyRotationEnabled") != "False":
outTable.append([r, aid, i])
except:
pass
printResult(outTable, "Region, AccountID, KeyId")
printTitle("ApiGateway service review")
printSubTitle("[Security] ApiGateway resource policy missing - Consider restricting access to private API with a "
"policy. Private Api should be accessed through Vpc endpoint and a policy ensures the Api cannot "
"be accessed otherwise. For more detail, see "
"https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-resource-policies-examples.html")
outTable = []
for r in regions:
client = boto3.client('apigateway', region_name=r)
response = client.get_rest_apis()
for i in response.get("items"):
if "PRIVATE" in i.get("endpointConfiguration").get("types") and len(i.get("policy")) <= 0:
outTable.append([r, aid, i.get("name")])
printResult(outTable, "Region, AccountID, PrivateApiName")
printTitle("Cloudtrail service review")
printSubTitle("[Security] Cloudtrail not encrypted - Consider enabling encryption for cloudtrail")
outTable = []
for r in regions:
client = boto3.client('cloudtrail', region_name=r)
response = client.describe_trails()
for i in response.get("trailList"):
if i.get("KmsKeyId") is None:
outTable.append([r, aid, i.get("Name")])
printResult(outTable, "Region, AccountID, Trail")
printSubTitle("[Security] Multi-Region cloudtrail not enabled - Consider enabling Multi-Region for at least 1 cloudtrail")
outTable = []
multiRegionTrailCount = 0
for r in regions:
client = boto3.client('cloudtrail', region_name=r)
response = client.describe_trails()
for i in response.get("trailList"):
if i.get("IsMultiRegionTrail"):
multiRegionTrailCount += 1
if multiRegionTrailCount <= 0:
outTable.append([r, aid, "Missing multi region trail"])
printResult(outTable, "Region, AccountID, Status")
printTitle("Vpc service review")
printSubTitle("[Reliability] Insufficient VPN tunnels - Consider having 2 tunnels for each site VPN connection. "
"AWS performs VPN tunnel endpoint maintenance rather frequently. Having 2 tunnel reduces the risk "
"of service interruption.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_vpn_connections()
for i in response.get("VpnConnections"):
if len(jmespath.search("Options.TunnelOptions[*].OutsideIpAddress", i)) < 2:
outTable.append([r, aid, i.get("VpnConnectionId"), len(jmespath.search("Options.TunnelOptions[*].OutsideIpAddress", i))])
printResult(outTable, "Region, AccountID, VpnConnection, TunnelCount")
printTitle("Eks service review")
printSubTitle("[Sustainability] Eks node running on AmazonLinux2 (AL2) - Consider using AmazonLinux2023. "
"AL2's end of life date is 2025-06-30. AmazonLinux2023 runs on newer kernel and libraries, "
"which offers better performance and security.")
outTable = []
for r in regions:
client = boto3.client('eks', region_name=r)
response = client.list_clusters()
for cluster in response.get("clusters"):
ngsResp = client.list_nodegroups(clusterName=cluster)
for ng in ngsResp.get("nodegroups"):
ngResp = client.describe_nodegroup(
clusterName=cluster,
nodegroupName=ng
)
if re.search("^AL2_", ngResp.get("nodegroup").get("amiType")):
outTable.append([r, aid, cluster, ng, ngResp.get("nodegroup").get("amiType")])
printResult(outTable, "Region, AccountID, Cluster, NodeGroup, AmiType")
printSubTitle("[Sustainability] Eks control plane version outdated - Consider using upgrading Eks cluster. "
"Reference https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html for a list "
"of current versions. Reference https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html "
"for upgrade instructions.")
outTable = []
for r in regions:
client = boto3.client('eks', region_name=r)
response = client.list_clusters()
for cluster in response.get("clusters"):
clusterResp = client.describe_cluster(name=cluster)
if float(jmespath.search("cluster.version", clusterResp)) < 1.28:
outTable.append([r, aid, cluster, clusterResp.get("cluster").get("version")])
printResult(outTable, "Region, AccountID, Cluster, Version")
# TODO
"""
- config enabled for all regions
- list users/groups/roles with administrator access
"""

479
aws/AwsEnvReviewMarkdown.py Executable file
View File

@ -0,0 +1,479 @@
#!/usr/bin/python3
"""
Review AWS environment based on 6 WAR pillars, namely:
1. Operational Excellence
2. Security
3. Reliability
4. Performance Efficiency
5. Cost Optimization
6. Sustainability
"""
import boto3
import botocore
import jmespath
import re
from pprint import pprint
from datetime import date
from mdutils.mdutils import MdUtils
def printTitle(level: int, title: str):
if level <= 2:
mdFile.new_header(level=level, title=title)
else:
mdFile.new_paragraph(title)
return
def getAllRegions(myclient):
return jmespath.search("Regions[*].RegionName", myclient.describe_regions(AllRegions=False))
def getAgeFromDate(inputDate):
today = date.today()
delta = today - inputDate.date()
return delta.days
def printResult(content: list, header: str):
if len(content) <= 0:
mdFile.new_paragraph("👏 No issue found.")
return
header = "Item," + header
table = header.split(",")
tableCol = len(table)
for count, row in enumerate(content):
row.insert(0, count+1)
table.extend(row)
mdFile.new_line()
mdFile.new_table(columns=tableCol, rows=len(content)+1, text=table, text_align='left')
return
mdFile = MdUtils(file_name='AwsReviewReport.md', title='Aws Review ' + str(date.today()))
sts = boto3.client("sts")
aid = sts.get_caller_identity().get("Account")
client = boto3.client('ec2', region_name="us-east-1")
regions = getAllRegions(client)
mdFile.write("-" * 5)
printTitle(1, "Ec2 service review")
printTitle(2, "[Cost Optimization] Instances stopped for over 14 days")
printTitle(3, "Consider backing up and terminate instances "
"or use AutoScalingGroup to spin up and down instances as needed.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if i[0].get("State").get("Name") == "stopped":
outTable.append([r, aid, i[0].get("InstanceId"), getAgeFromDate(i[0].get("UsageOperationUpdateTime"))])
printResult(outTable, "Region, AccountID, InstanceId, DaysStopped")
printTitle(2, "[Security] Insecure IDMSv1 allowed")
printTitle(3, "Consider requiring IDMSv2. For more information, "
"see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if i[0].get("MetadataOptions").get("HttpTokens") == "optional":
outTable.append([r, aid, i[0].get("InstanceId"), i[0].get("MetadataOptions").get("HttpTokens")])
printResult(outTable, "Region, AccountID, InstanceId, IDMSv2")
printTitle(2,"[Sustainability] Use of early generation instance type")
printTitle(3, "Consider using current generation instances")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_instances()
if len(response.get("Reservations")) > 0:
for i in jmespath.search("Reservations[*].Instances[*]", response):
if re.search("^(t1|t2|m3|m1|m2|m4|c1|c2|c3|c4|r3|r4|i2)", i[0].get("InstanceType")) is not None:
outTable.append([r, aid, i[0].get("InstanceId"), i[0].get("InstanceType")])
printResult(outTable, "Region, AccountID, InstanceId, InstanceType")
printTitle(2, "[Cost Optimization] Unattached EBS volumes")
printTitle(3, "Consider backing up the volumes and delete them")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_volumes(
Filters=[
{
'Name': 'status',
'Values': ['available']
}
]
)
for i in response.get("Volumes"):
outTable.append([r, aid, i.get("VolumeId"), i.get("Size"), i.get("VolumeType")])
printResult(outTable, "Region, AccountID, VolumeId, Size, VolumeType")
printTitle(2, "[Cost Optimization] EBS snapshots more than 365 days old")
printTitle(3,"Consider removing snapshots if no longer needed")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_snapshots(
OwnerIds=[aid]
)
for i in response.get("Snapshots"):
if getAgeFromDate(i.get("StartTime")) > 365 and i.get(
"Description") != "This snapshot is created by the AWS Backup service.":
outTable.append(
[r, aid, i.get("SnapshotId"), i.get("Description")[:70], getAgeFromDate(i.get("StartTime"))])
printResult(outTable, "Region, AccountID, SnapshotId, Description, SnapshotAge")
printTitle(2, "[Security] Unencrypted EBS volumes")
printTitle(3, "Consider replacing volume with encrypted ones. "
"One can do so by stopping the Ec2 instance, creating snapshot for the unencrypted volume, "
"copy the snapshot to a new encrypted snapshot, create a volume from the encrypted snapshot,"
"detach the original volume and attach the encrypted volume. Remember to clean up the volumes"
"and snapshots afterwards.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_volumes(
Filters=[
{
'Name': 'encrypted',
'Values': ['false']
},
{
'Name': 'status',
'Values': ['in-use']
}
]
)
for i in response.get("Volumes"):
outTable.append([r, aid, i.get("VolumeId"), i.get("Size"), i.get("VolumeType")])
printResult(outTable, "Region, AccountID, VolumeId, Size, VolumeType")
printTitle(2, "[Cost Optimization] Unused Elastic IP")
printTitle(3, "Consider deleting unused EIP")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_addresses()
for i in response.get("Addresses"):
if i.get("AssociationId") is None:
outTable.append([r, aid, i.get("PublicIp")])
printResult(outTable, "Region, AccountID, PublicIp")
printTitle(1, "Security group review")
printTitle(2, "[Security] Security group rules allowing ingress from 0.0.0.0/0")
printTitle(3, "Consider setting more restrictive rules allowing access from specific sources.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_security_group_rules()
for sgr in jmespath.search("SecurityGroupRules[?IsEgress==`false`]", response):
if (not sgr.get("IsEgress")
and sgr.get("CidrIpv4") == "0.0.0.0/0"
and sgr.get("FromPort") != 443
and sgr.get("ToPort") != 443
and sgr.get("FromPort") != 80
and sgr.get("ToPort") != 80):
outTable.append(
[r, aid, sgr.get("GroupId"), sgr.get("SecurityGroupRuleId"), sgr.get("FromPort"), sgr.get("ToPort")])
printResult(outTable, "Region, AccountID, SecurityGroup, Rule, FromPort, ToPort")
printTitle(1, "Rds service review")
printTitle(2, "[Security] Unencrypted RDS instances")
printTitle(3, "Consider encrypting RDS instances. For more detail, see "
"https://docs.aws.amazon.com/prescriptive-guidance/latest/patterns/encrypt-an-existing-amazon-rds-for-postgresql-db-instance.html")
outTable = []
for r in regions:
client = boto3.client('rds', region_name=r)
response = client.describe_db_instances()
for i in response.get("DBInstances"):
if i.get("StorageEncrypted") == "False":
outTable.append([r, aid, i.get("DBInstanceIdentifier"), i.get("Engine")])
response = client.describe_db_clusters()
for i in response.get("DBClusters"):
if i.get("StorageEncrypted") == "False":
outTable.append([r, aid, i.get("DBClusterIdentifier"), i.get("Engine")])
printResult(outTable, "Region, AccountID, DBIdentifier, Engine")
printTitle(2, "[Reliability] RDS instance running in single availability zone")
printTitle(3, "Consider enabling multi-az for production use.")
outTable = []
for r in regions:
client = boto3.client('rds', region_name=r)
response = client.describe_db_instances()
for i in response.get("DBInstances"):
if not i.get("MultiAZ"):
outTable.append([r, aid, i.get("DBInstanceIdentifier"), i.get("Engine")])
response = client.describe_db_clusters()
for i in response.get("DBClusters"):
if not i.get("MultiAZ"):
outTable.append([r, aid, i.get("DBClusterIdentifier"), i.get("Engine")])
printResult(outTable, "Region, AccountID, DBIdentifier, Engine")
printTitle(1, "Lambda service review")
printTitle(2, "[Security] Outdated Lambda runtime")
printTitle(3, "Consider changing to currently supported Lambda runtime versions, "
"listed on https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html")
outTable = []
for r in regions:
client = boto3.client('lambda', region_name=r)
response = client.list_functions()
for i in response.get("Functions"):
if i.get("Runtime") is not None:
if re.search("python2|python3.[678]|java8|nodejs[468]|nodejs1[024]|dotnet6", i.get("Runtime")) is not None:
outTable.append([r, aid, i.get("FunctionName"), i.get("Runtime")])
printResult(outTable, "Region, AccountID, FunctionName, Runtime")
printTitle(1, "Iam service review")
printTitle(2, "[Security] Iam user access key not rotated for 180 days")
printTitle(3, "Consider rotating access key")
outTable = []
client = boto3.client('iam', region_name="us-east-1")
listUsers = client.list_users()
users = jmespath.search("Users[*].UserName", listUsers)
for u in users:
response = client.list_access_keys(UserName=u)
for i in response.get("AccessKeyMetadata"):
if getAgeFromDate(i.get("CreateDate")) > 180:
outTable.append([aid, u, i.get("AccessKeyId"), getAgeFromDate(i.get("CreateDate"))])
printResult(outTable, "AccountID, UserName, AccessKeyId, AccessKeyAge")
printTitle(2, "[Security] Iam AdministratorAccess policy attached")
printTitle(3, "Consider granting minimum privileges "
"to users/groups/roles. AWS managed policies for job functions are recommended. See "
"https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_job-functions.html")
outTable = []
client = boto3.client('iam', region_name="us-east-1")
entityResp = client.list_entities_for_policy(
PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess'
)
for group in jmespath.search("PolicyGroups[*].GroupName", entityResp):
outTable.append([aid, "Group", group])
for user in jmespath.search("PolicyUsers[*].UserName", entityResp):
outTable.append([aid, "User", user])
for role in jmespath.search("PolicyRoles[*].RoleName", entityResp):
outTable.append([aid, "Role", role])
printResult(outTable, "AccountID, Type, Name")
printTitle(1, "Cloudwatch service review")
printTitle(2, "[Cost Optimization] Cloudwatch LogGroups without retention period")
printTitle(3, "Consider setting retention")
outTable = []
for r in regions:
client = boto3.client('logs', region_name=r)
response = client.describe_log_groups()
for i in response.get("logGroups"):
if i.get("retentionInDays") is None:
outTable.append([r, aid, i.get("logGroupName"), int(round(i.get("storedBytes") / 1024 / 1024, 0))])
printResult(outTable, "Region, AccountID, LogGroup, SizeMiB")
printTitle(2, "[Security] Cloudwatch LogGroups unencrypted")
printTitle(3, "Consider encrypting LogGroups")
outTable = []
for r in regions:
client = boto3.client('logs', region_name=r)
response = client.describe_log_groups()
for i in response.get("logGroups"):
if i.get("kmsKeyId") is None:
outTable.append([r, aid, i.get("logGroupName")])
printResult(outTable, "Region, AccountID, LogGroup")
printTitle(1, "Backup service review")
printTitle(2, "[Reliability] Ec2/Rds instances found but AWSBackup plan missing")
printTitle(3, "Consider setting up AWSBackup plans to backup AWS resources.")
outTable = []
for r in regions:
client = boto3.client('backup', region_name=r)
response = client.list_backup_plans()
if len(response.get("BackupPlansList")) <= 0:
ec2client = boto3.client("ec2", region_name=r)
ec2resp = ec2client.describe_instances()
ec2instances = jmespath.search("Reservations[*].Instances[*]", ec2resp)
rdsclient = boto3.client("rds", region_name=r)
rdsresp = rdsclient.describe_db_instances()
rdsinstances = rdsresp.get("DBInstances")
instanceCount = len(ec2instances) + len(rdsinstances)
if instanceCount >= 1:
outTable.append([r, aid, "AWSBackup plan missing", instanceCount])
printResult(outTable, "Region, AccountID, BackupPlan, Ec2RdsInstances")
printTitle(1, "S3 service review")
printTitle(2, "[Security] S3 bucket policy missing")
printTitle(3, "Consider creating bucket policy and restrict access to bucket")
outTable = []
client = boto3.client('s3', region_name="us-east-1")
response = client.list_buckets()
for i in jmespath.search("Buckets[*].Name", response):
try:
policyResp = client.get_bucket_policy(Bucket=i)
except:
outTable.append([aid, i])
printResult(outTable, "AccountID, BucketName")
printTitle(1, "ElastiCache review")
printTitle(2, "[Sustainability] ElastiCache instances on x64 platform")
printTitle(3, "Consider Graviton instances such as t4g/r7g to optimize your infrastructure investment.")
outTable = []
for r in regions:
client = boto3.client('elasticache', region_name=r)
response = client.describe_cache_clusters()
for i in response.get("CacheClusters"):
if re.search("[0-9]g.", i.get("CacheNodeType")) is None:
outTable.append([r, aid, i.get("CacheClusterId"), i.get("CacheNodeType")])
printResult(outTable, "Region, AccountID, CacheClusterId, CacheNodeType")
printTitle(1, "LoadBalancer service review")
printTitle(2, "[Cost Optimization] LB Target group without targets")
printTitle(3, "Consider removing empty target groups")
outTable = []
for r in regions:
client = boto3.client('elbv2', region_name=r)
response = client.describe_target_groups()
for i in response.get("TargetGroups"):
tgResp = client.describe_target_health(TargetGroupArn=i.get("TargetGroupArn"))
if len(jmespath.search("TargetHealthDescriptions[*].Target", tgResp)) == 0:
outTable.append([r, aid, i.get("TargetGroupName")])
printResult(outTable, "Region, AccountID, TargetGroup")
printTitle(1, "KMS service review")
printTitle(2, "[Security] Customer Managed Keys do not have auto rotation enabled")
printTitle(3, "Consider enabling auto key rotation. When a key is rotated, previous ones "
"are still kept within AWS to allow data retrival.")
outTable = []
for r in regions:
client = boto3.client('kms', region_name=r)
response = client.list_keys()
for i in jmespath.search("Keys[*].KeyId", response):
try:
keyResp = client.describe_key(KeyId=i)
if (keyResp.get("KeyMetadata").get("Enabled") == "True"
and keyResp.get("KeyMetadata").get("KeyManager") == "CUSTOMER"):
krResp = client.get_key_rotation_status(KeyId=i)
if krResp.get("KeyRotationEnabled") != "False":
outTable.append([r, aid, i])
except:
pass
printResult(outTable, "Region, AccountID, KeyId")
printTitle(1, "ApiGateway service review")
printTitle(2, "[Security] ApiGateway resource policy missing")
printTitle(3, "Consider restricting access to private API with a "
"policy. Private Api should be accessed through Vpc endpoint and a policy ensures the Api cannot "
"be accessed otherwise. For more detail, see "
"https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-resource-policies-examples.html")
outTable = []
for r in regions:
client = boto3.client('apigateway', region_name=r)
response = client.get_rest_apis()
for i in response.get("items"):
if "PRIVATE" in i.get("endpointConfiguration").get("types") and len(i.get("policy")) <= 0:
outTable.append([r, aid, i.get("name")])
printResult(outTable, "Region, AccountID, PrivateApiName")
printTitle(1, "Cloudtrail service review")
printTitle(2, "[Security] Cloudtrail not encrypted")
printTitle(3, "Consider enabling encryption for cloudtrail")
outTable = []
for r in regions:
client = boto3.client('cloudtrail', region_name=r)
response = client.describe_trails()
for i in response.get("trailList"):
if i.get("KmsKeyId") is None:
outTable.append([r, aid, i.get("Name")])
printResult(outTable, "Region, AccountID, Trail")
printTitle(2, "[Security] Multi-Region cloudtrail not enabled")
printTitle(3, "Consider enabling Multi-Region for at least 1 cloudtrail")
outTable = []
multiRegionTrailCount = 0
for r in regions:
client = boto3.client('cloudtrail', region_name=r)
response = client.describe_trails()
for i in response.get("trailList"):
if i.get("IsMultiRegionTrail"):
multiRegionTrailCount += 1
if multiRegionTrailCount <= 0:
outTable.append([r, aid, "Missing multi region trail"])
printResult(outTable, "Region, AccountID, Status")
printTitle(1, "Vpc service review")
printTitle(2, "[Reliability] Insufficient VPN tunnels")
printTitle(3, "Consider having 2 tunnels for each site VPN connection. "
"AWS performs VPN tunnel endpoint maintenance rather frequently. Having 2 tunnel reduces the risk "
"of service interruption.")
outTable = []
for r in regions:
client = boto3.client('ec2', region_name=r)
response = client.describe_vpn_connections()
for i in response.get("VpnConnections"):
if len(jmespath.search("Options.TunnelOptions[*].OutsideIpAddress", i)) < 2:
outTable.append([r, aid, i.get("VpnConnectionId"),
len(jmespath.search("Options.TunnelOptions[*].OutsideIpAddress", i))])
printResult(outTable, "Region, AccountID, VpnConnection, TunnelCount")
printTitle(1, "Eks service review")
printTitle(2, "[Sustainability] Eks node running on AmazonLinux2 (AL2)")
printTitle(3, "Consider using AmazonLinux2023. "
"AL2's end of life date is 2025-06-30. AmazonLinux2023 runs on newer kernel and libraries, "
"which offers better performance and security.")
outTable = []
for r in regions:
client = boto3.client('eks', region_name=r)
response = client.list_clusters()
for cluster in response.get("clusters"):
ngsResp = client.list_nodegroups(clusterName=cluster)
for ng in ngsResp.get("nodegroups"):
ngResp = client.describe_nodegroup(
clusterName=cluster,
nodegroupName=ng
)
if re.search("^AL2_", ngResp.get("nodegroup").get("amiType")):
outTable.append([r, aid, cluster, ng, ngResp.get("nodegroup").get("amiType")])
printResult(outTable, "Region, AccountID, Cluster, NodeGroup, AmiType")
printTitle(2, "[Sustainability] Eks control plane version outdated")
printTitle(3, "Consider using upgrading Eks cluster. "
"Reference https://docs.aws.amazon.com/eks/latest/userguide/kubernetes-versions.html for a list "
"of current versions. Reference https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html "
"for upgrade instructions.")
outTable = []
for r in regions:
client = boto3.client('eks', region_name=r)
response = client.list_clusters()
for cluster in response.get("clusters"):
clusterResp = client.describe_cluster(name=cluster)
if float(jmespath.search("cluster.version", clusterResp)) < 1.28:
outTable.append([r, aid, cluster, clusterResp.get("cluster").get("version")])
printResult(outTable, "Region, AccountID, Cluster, Version")
mdFile.create_md_file()
print("Report written to AwsReviewReport.md")
# TODO
"""
- config enabled for all regions
"""

22
aws/add-sg.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
#
# script to add 1 SG to all instances.
# this scripts takes 2 arguments, first is the aws profile name, second is the SG to add.
# e.g. ./add-sg.sh acme sg-1234567
#
# you will need awscli for this script to work, and an aws profile
# associated with an IAM user with the AmazonEC2FullAccess policy
AWSPROFILE=$1
aws --profile=$AWSPROFILE ec2 describe-instances --output json \
| jq ".[][].Instances[].InstanceId" -r | while read l; do
SG=$(aws --profile=$AWSPROFILE ec2 describe-instances --instance-ids $l --output json | jq ".[][].Instances[].SecurityGroups[].GroupId" -r | xargs)
echo "Existing SGs on $l: $SG"
if [[ $SG == *$2* ]]; then
echo "$2 already associated, do nothing"
continue
fi
aws --profile=$AWSPROFILE ec2 modify-instance-attribute --instance-id $l --groups $SG $2
echo "New SGs on $l: $(aws --profile=$AWSPROFILE ec2 describe-instances --instance-ids $l --output json | jq ".[][].Instances[].SecurityGroups[].GroupId" -r | xargs)"
done

53
aws/allocation.txt Normal file
View File

@ -0,0 +1,53 @@
"eipalloc-0a5057ea75a8d68df"
"eipalloc-0b67a2e81123b0dfa"
"eipalloc-0ffe0084439dabfa5"
"eipalloc-05841bc3e86c0a02c"
"eipalloc-0c8c1cd56cc6812f0"
"eipalloc-0554a730c84838cb2"
"eipalloc-06400a193622f18ef"
"eipalloc-0943a26dff737c3bb"
"eipalloc-077d3ec5fd5d5815f"
"eipalloc-0ea7ba83567d8dc77"
"eipalloc-00d4f1e5bfe2a29ac"
"eipalloc-0f4ea29293daafa0c"
"eipalloc-0f17f811a17375567"
"eipalloc-08c66c8d4793f600f"
"eipalloc-052aee7ebca95a297"
"eipalloc-0290936c16a783f48"
"eipalloc-0cce181acf4f5228b"
"eipalloc-0b04be0d39e8b6805"
"eipalloc-0ff81f6efa6fb6601"
"eipalloc-0780c63f667aa53b1"
"eipalloc-0edd524a2afdfc7c0"
"eipalloc-0f3c83df95bd6ac76"
"eipalloc-0dc990d8299cade51"
"eipalloc-0a01d13764e9bb4d9"
"eipalloc-09b9958ddb3fb2b81"
"eipalloc-0662c94b444ee6fd7"
"eipalloc-06aeba0dd6bd37bbb"
"eipalloc-0064badc3c20d01cb"
"eipalloc-062f18a351eb3c44b"
"eipalloc-0d7fabd6b6736d8ba"
"eipalloc-08d9a5fd5a24f6410"
"eipalloc-0adda055467e5e5a6"
"eipalloc-01a938dfb927c8f3f"
"eipalloc-03cb14ef5313e4675"
"eipalloc-046e405d363fd4c4f"
"eipalloc-0b986e4e5ffd5b1e2"
"eipalloc-0ec66214895ce0c7c"
"eipalloc-01ea4b48ec61a3068"
"eipalloc-06abef49b076dd20c"
"eipalloc-056470c9f98dce3fc"
"eipalloc-01e870c66848a8991"
"eipalloc-036e72654499bb46a"
"eipalloc-0d52b854250ed9d83"
"eipalloc-01185fd790b008301"
"eipalloc-015859de51da5208c"
"eipalloc-00ee026701f21593f"
"eipalloc-00855bb5bae5e0e8b"
"eipalloc-06038eeb961c72d1a"
"eipalloc-0e92317ee06b38396"
"eipalloc-03a47ae15fccad2c5"
"eipalloc-0d13f893a9ac741f0"
"eipalloc-041d3f17fa7e019f5"
"eipalloc-033c782d570cd537d"

View File

@ -0,0 +1,102 @@
{
"metrics": {
"append_dimensions": {
"AutoScalingGroupName": "${aws:AutoScalingGroupName}",
"ImageId": "${aws:ImageId}",
"InstanceId": "${aws:InstanceId}",
"InstanceType": "${aws:InstanceType}"
},
"aggregation_dimensions": [
[
"InstanceId"
],
[
"InstanceType"
],
[
"AutoScalingGroupName"
],
[
"InstanceId",
"InstanceType"
]
],
"metrics_collected": {
"collectd": {
"metrics_aggregation_interval": 60
},
"procstat": [
{
"pattern": "/usr/sbin/sshd",
"measurement": [
"cpu_usage",
"memory_rss"
]
},
{
"pattern": "crond",
"measurement": [
"cpu_usage",
"memory_rss"
],
"metrics_collection_interval": 10
}
],
"cpu": {
"measurement": [
"cpu_usage_idle",
"cpu_usage_iowait",
"cpu_usage_user",
"cpu_usage_system"
],
"metrics_collection_interval": 60,
"resources": [
"*"
],
"totalcpu": false
},
"disk": {
"measurement": [
"used_percent",
"inodes_free"
],
"metrics_collection_interval": 60,
"resources": [
"*"
],
"ignore_file_system_types": [
"sysfs",
"devtmpfs",
"tmpfs"
]
},
"diskio": {
"measurement": [
"io_time"
],
"metrics_collection_interval": 60,
"resources": [
"*"
]
},
"mem": {
"measurement": [
"mem_used_percent"
],
"metrics_collection_interval": 60
},
"statsd": {
"metrics_aggregation_interval": 60,
"metrics_collection_interval": 60,
"service_address": ":8125"
},
"swap": {
"measurement": [
"swap_used_percent"
],
"metrics_collection_interval": 60
}
}
}
}

16
aws/assume-role.py Normal file
View File

@ -0,0 +1,16 @@
import json
import boto3
def lambda_handler(event, context):
# TODO implement
sts_client = boto3.client('sts')
assumed_role_object=sts_client.assume_role(
RoleArn="arn:aws:iam::111111111111:role/rolex",
RoleSessionName="lambda"
)
print("export AWS_ACCESS_KEY_ID=" + assumed_role_object['Credentials']['AccessKeyId'])
print("export AWS_SECRET_ACCESS_KEY=" + assumed_role_object['Credentials']['SecretAccessKey'])
print("export AWS_SESSION_TOKEN=" + assumed_role_object['Credentials']['SessionToken'])
print("export AWS_DEFAULT_REGION=ap-east-1")

9
aws/aws-config-status.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
# Check config recorder status in all regions
aws --region=us-east-1 ec2 describe-regions --query Regions[].RegionName --output text | sed -e 's/\t/\n/g' | while read r; do
echo "$r"
echo "Recorder on: $(aws --region $r configservice describe-configuration-recorder-status --query ConfigurationRecordersStatus[].recording --output text)"
echo "Recording global resources: $(aws --region $r configservice describe-configuration-recorders --query ConfigurationRecorders[].recordingGroup.includeGlobalResourceTypes --output text)"
done

3
aws/aws-endpoint-inventory.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
aws ec2 describe-regions --query Regions[].RegionName --output text | tr '\t' '\n' | parallel \
aws ec2 --region {} describe-vpc-endpoints --query VpcEndpoints[].ServiceName --output text | tr '\t' '\n' | sort | uniq -c

View File

@ -0,0 +1,53 @@
#!/usr/bin/python3
import feedparser
import sqlite3
import smtplib
import json
import requests
feeds = ['https://status.aws.amazon.com/rss/ec2-ap-east-1.rss',
'https://status.aws.amazon.com/rss/rds-ap-east-1.rss',
'https://status.aws.amazon.com/rss/vpnvpc-ap-east-1.rss',
'https://status.aws.amazon.com/rss/directconnect-ap-east-1.rss',
'https://status.aws.amazon.com/rss/directconnect-ap-southeast-1.rss']
dbconn = sqlite3.connect('aws-rss.db')
for f in feeds:
NewsFeed = feedparser.parse(f)
for e in NewsFeed.entries:
try:
dbconn.execute("insert into rss values (?,?,?,?,false)", (e.id, e.published, e.title, e.summary))
except sqlite3.IntegrityError:
pass
dbconn.commit()
results = dbconn.execute('select * from rss where notified = false')
msg = " "
records = results.fetchall()
if len(records) == 0:
print("All events already notified")
quit()
url = 'https://api.telegram.org/botXXX/sendMessage'
for r in records:
lineBreak = "\n"
content = lineBreak.join((
r[0].split('#')[1].split('-')[0].upper() + " // <b>" + r[2] + "</b>",
"PublishTime: " + r[1],
"<pre>",
r[3],
"</pre>"))
tgMessage = {
"chat_id": 1111111,
"parse_mode": "HTML",
"text": content
}
requests.post(url, json=tgMessage)
dbconn.execute('update rss set notified = true where notified = false')
dbconn.commit()
dbconn.close()

58
aws/aws-health-events.sh Normal file
View File

@ -0,0 +1,58 @@
#!/usr/bin/python3
import feedparser
import sqlite3
import smtplib
import json
from email.message import EmailMessage
from email.headerregistry import Address
feeds = ['https://status.aws.amazon.com/rss/ec2-ap-east-1.rss',
'https://status.aws.amazon.com/rss/rds-ap-east-1.rss',
'https://status.aws.amazon.com/rss/vpnvpc-ap-east-1.rss',
'https://status.aws.amazon.com/rss/directconnect-ap-east-1.rss',
'https://status.aws.amazon.com/rss/directconnect-ap-southeast-1.rss']
dbconn = sqlite3.connect('aws-rss.db')
for f in feeds:
NewsFeed = feedparser.parse(f)
for e in NewsFeed.entries:
try:
dbconn.execute("insert into rss values (?,?,?,?,false)", (e.id, e.published, e.title, e.summary))
except sqlite3.IntegrityError:
pass
dbconn.commit()
results = dbconn.execute('select * from rss where notified = false')
msg = " "
records = results.fetchall()
if len(records) == 0:
print("All events already notified")
quit()
content_list = []
for r in records:
map1 = {
'Service': r[0].split('#')[1].split('-')[0].upper(),
'PublishTime': r[1],
'Link': r[0].replace("http","h t t p"),
'Title': r[2],
'Summary': r[3]
}
content_list.append(map1)
dbconn.execute('update rss set notified = true where notified = false')
dbconn.commit()
dbconn.close()
email = EmailMessage()
email['Subject'] = "[ALERT] AWS event detected!"
email['From'] = 'DoNotReply@racker.pro'
email['To'] = 'user@domain.com'
email.set_content(json.dumps(content_list, sort_keys=False, indent=2, ensure_ascii = False))
s = smtplib.SMTP('localhost')
s.send_message(email)

1
aws/aws-inventory Submodule

@ -0,0 +1 @@
Subproject commit 122fe95f953aa539f5a671c3db911eda2cf322de

View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import boto3
import pandas as pd
import csv
client = boto3.client('config')
resp = client.get_discovered_resource_counts()
# print('totalDiscoveredResources', resp['totalDiscoveredResources'], sep=": ")
results = []
for item in resp['resourceCounts']:
paginator = client.get_paginator('list_discovered_resources')
page_iterator = paginator.paginate(resourceType=item['resourceType'])
for page in page_iterator:
for res in page['resourceIdentifiers']:
if item['resourceType'] in [
"AWS::Config::ResourceCompliance",
"AWS::Backup::RecoveryPoint",
"AWS::RDS::DBSnapshot",
"AWS::SSM::ManagedInstanceInventory",
"AWS::SSM::AssociationCompliance",
"AWS::SSM::PatchCompliance",
"AWS::IAM::Policy"
]:
continue
preferResName = res.get('resourceName', res.get('resourceId'))
results += [[item['resourceType'], preferResName]]
df = pd.DataFrame(results, columns=['ResourceType', 'ResourceId'])
# print(df)
print(df.to_csv(index=False, quoting=csv.QUOTE_NONNUMERIC))

40
aws/aws-inventory.sh Executable file
View File

@ -0,0 +1,40 @@
#!/bin/bash
function formatprint() {
cat - > /tmp/formatprint.tmp
echo "## $1 ($(cat /tmp/formatprint.tmp | wc -l))"
#cat /tmp/formatprint.tmp | sed -e 's/^/ /g'
cat /tmp/formatprint.tmp | column -t -s, | sed -e 's/^/ /g'
rm -f /tmp/formatprint.tmp
}
# Generate inventory of ec2, rds, lb, and s3 buckets.
aws --region=us-east-1 ec2 describe-regions --query Regions[].RegionName --output text | sed -e 's/\t/\n/g' | while read r; do
export AWS_DEFAULT_REGION=$r
echo "---"
echo "# Region: $r"
echo "---"
aws ec2 describe-instances --query 'Reservations[*].Instances[*].[InstanceId, Tags[?Key==`Name`].Value[] | [0], PlatformDetails, InstanceType,PrivateIpAddress]' --output json | jq -cr '.[][] | @csv' | tr -d '[\" ' | formatprint EC2
aws rds describe-db-instances --query 'DBInstances[*].[DBInstanceIdentifier, DBInstanceClass, Engine, AllocatedStorage]' --output json | jq -cr '.[]|@csv' | tr -d \" | formatprint RDS
aws elasticache describe-cache-clusters --query 'CacheClusters[*].[CacheClusterId, CacheNodeType, Engine, EngineVersion]' --output json | jq -cr '.[]|@csv' | tr -d \" | formatprint ElastiCache
aws elb describe-load-balancers --query 'LoadBalancerDescriptions[*].[LoadBalancerName,DNSName,Scheme]' --output json | jq -cr '.[]|@csv' | tr -d \" | formatprint ELB
aws elbv2 describe-load-balancers --query 'LoadBalancers[*].[LoadBalancerName,DNSName,Scheme]' --output json | jq -cr '.[]|@csv' | tr -d \" | formatprint ALB
aws ec2 describe-vpcs --query 'Vpcs[*].[VpcId, CidrBlock]' --output json | jq -cr '.[]|@csv' | tr -d \" | formatprint VPC
aws s3api list-buckets --output text | awk '{print $NF}' | formatprint S3Bucket
aws ecs list-clusters | jq -cr '.[][]' | awk -F/ '{print $NF}' | formatprint ECS
aws eks list-clusters | jq '.[][]' | awk -F/ '{print $NF}' | formatprint EKS
aws cloudfront list-distributions --query 'DistributionList.Items[*].[DomainName]' --output text | formatprint CloudFront
aws --no-cli-pager route53 list-hosted-zones --query 'HostedZones[*].[Name,ResourceRecordSetCount]' --output json | jq -cr '.[]|@csv' | tr -d \" | sort -k1 | formatprint R53-RecordCount
done
# echo "# IAM roles"
# aws iam list-roles | jq -cr '.Roles[] | .RoleName' | grep -v AWSServiceRoleFor

62
aws/aws-inventory2.sh Executable file
View File

@ -0,0 +1,62 @@
#!/bin/bash
function list-resources-in-region {
echo "***"
echo -e "Region: $1"
echo "***"
echo -e "\n## EC2:"
aws --region=$1 ec2 describe-instances --query 'Reservations[*].Instances[*].[InstanceId, Tags[?Key==`Name`].Value[] | [0], PlatformDetails, InstanceType,PrivateIpAddress]' --output json | jq -cr '.[][] | @tsv' | tr -d '[\" '
echo -e "\n## RDS:"
aws --region=$1 rds describe-db-instances --query 'DBInstances[*].[DBInstanceIdentifier, DBInstanceClass, Engine, AllocatedStorage]' --output json | jq -cr '.[]|@tsv' | tr -d \"
echo -e "\n## ElastiCache:"
aws --region=$1 elasticache describe-cache-clusters --query 'CacheClusters[*].[CacheClusterId, CacheNodeType, Engine, EngineVersion]' --output json | jq -cr '.[]|@tsv' | tr -d \"
echo -e "\n## ELB:"
aws --region=$1 elb describe-load-balancers --query 'LoadBalancerDescriptions[*].[LoadBalancerName,DNSName,Scheme]' --output json | jq -cr '.[]|@tsv' | tr -d \"
echo -e "\n## ALB:"
aws --region=$1 elbv2 describe-load-balancers --query 'LoadBalancers[*].[LoadBalancerName,DNSName,Scheme]' --output json | jq -cr '.[]|@tsv' | tr -d \"
echo -e "\n## VPC:"
aws --region=$1 ec2 describe-vpcs --query 'Vpcs[*].[VpcId, CidrBlock]' --output json | jq -cr '.[]|@tsv' | tr -d \"
echo -e "\n## ECS_Clusters:"
aws --region=$1 ecs list-clusters | jq -cr '.[][]' | awk -F/ '{print $NF}'
echo -e "\n## EKS_Clusters:"
aws --region=$1 eks list-clusters | jq '.[][]' | awk -F/ '{print $NF}'
echo -e "\n## EFS:"
aws --region=$1 efs describe-file-systems --output text
echo -e "\n## EMR:"
aws --region=$1 emr list-clusters --output text
# global resources
if [ $1 == "us-east-1" ]; then
echo -e "\n## Route53_zones:"
aws --region=$1 --no-cli-pager route53 list-hosted-zones --query 'HostedZones[].Name' --output text | tr '\t' '\n'
echo -e "\n## S3_Buckets:"
aws --region=$1 s3api list-buckets --output text | awk '{print $NF}'
echo -e "\n## Cloudfront:"
aws --region=$1 cloudfront list-distributions --query 'DistributionList.Items[*].[DomainName]' --output text
fi
}
# Generate inventory of ec2, rds, lb, and s3 buckets.
export -f list-resources-in-region
aws --region=us-east-1 ec2 describe-regions --query Regions[].RegionName --output text | sed -e 's/\t/\n/g' | parallel list-resources-in-region {}
#aws --region=us-east-1 ec2 describe-regions --query Regions[].RegionName --output text | sed -e 's/\t/\n/g' | while read r; do
# list-resources-in-region $r > /tmp/aws-inventory-$r.txt &
#done
# echo "This may take a moment..."
# sleep 50
#cat /tmp/aws-inventory*.txt
#rm -f /tmp/aws-inventory*.txt

11
aws/aws-inventory3.sh Executable file
View File

@ -0,0 +1,11 @@
#!/bin/bash
exclude_services=("AWS::AppConfig::DeploymentStrategy" "AWS::Athena::WorkGroup" "AWS::Cassandra::Keyspace" "AWS::CloudWatch::Alarm" "AWS::CodeDeploy::DeploymentConfig" "AWS::Config::ResourceCompliance" "AWS::EC2::DHCPOptions" "AWS::EC2::EC2Fleet" "AWS::EC2::LaunchTemplate" "AWS::EC2::NetworkAcl" "AWS::EC2::NetworkInsightsPath" "AWS::EC2::RouteTable" "AWS::EC2::SubnetRouteTableAssociation" "AWS::EventSchemas::Registry" "AWS::IAM::Policy" "AWS::RDS::DBSubnetGroup" "AWS::S3::AccountPublicAccessBlock" "AWS::Route53Resolver::ResolverRuleAssociation" "AWS::Route53Resolver::ResolverRule" "AWS::EC2::FlowLog" "AWS::Events::Rule" "AWS::SecretsManager::Secret" "AWS::SSM::PatchCompliance" "AWS::SSM::ManagedInstanceInventory" "AWS::SSM::AssociationCompliance" "AWS::IAM::Role" "AWS::RDS::DBSnapshot" "AWS::EC2::NetworkInterface" "AWS::Backup::RecoveryPoint" "AWS::Route53Resolver::ResolverRuleAssociation" "AWS::Events::EventBus" "AWS::GuardDuty::IPSet" "AWS::Config::ConfigurationRecorder" "AWS::Backup::BackupSelection" "AWS::KMS::Key" )
aws configservice get-discovered-resource-counts | jq -cr '.resourceCounts[] | .resourceType' | while read r; do
if [[ " ${exclude_services[@]} " =~ "${r}" ]]; then
continue
fi
echo "* $r"
aws configservice list-discovered-resources --resource-type $r | jq -cr '.resourceIdentifiers[] | .resourceId' | nl
done

51
aws/aws-org-dump.py Executable file
View File

@ -0,0 +1,51 @@
#!/usr/bin/python3
import boto3
def recurseChildren(ouid: str, level: int) -> None:
"""
Recurse down the AWS organization tree and invoke printChildAccounts once
the bottom has been reached
:param ouid: Parent OUID
:param level: Used internally for printing dots
:return: None
"""
global client
children = client.list_organizational_units_for_parent(ParentId=ouid).get('OrganizationalUnits')
if len(children) == 0:
printChildAccounts(ouid, level)
return
else:
if ouid.startswith('r'):
printChildAccounts(ouid, level)
for ou in children:
print('.' * 2 * level, ou.get('Name'), ou.get('Id'))
if ouid.startswith('ou'):
printChildAccounts(ouid, level)
recurseChildren(ou.get('Id'), level + 1)
def printChildAccounts(ouid: str, level: int) -> None:
"""
Print child account details
:param ouid: Parent OUID
:param level: Used internally for printing dots
:return: None
"""
global client
childAccounts = client.list_accounts_for_parent(ParentId=ouid).get('Accounts')
for account in childAccounts:
print('.' * 2 * level, account.get('Name'), account.get('Id'))
return
if __name__ == '__main__':
client = boto3.client('organizations')
response = client.list_roots()
rootId = response['Roots'][0]['Id']
print('Root', rootId, sep=": ")
recurseChildren(rootId, 1)

23
aws/aws-role-policies.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
function formatprint() {
cat - > /tmp/formatprint.tmp
echo "# $1 ($(cat /tmp/formatprint.tmp | wc -l))"
cat /tmp/formatprint.tmp | sed -e 's/^/ /g'
rm -f /tmp/formatprint.tmp
}
# Generate inventory of ec2, rds, lb, and s3 buckets.
# aws eks list-clusters | jq '.[][]' | awk -F/ '{print $NF}' | formatprint EKS
# IAM roles"
# aws iam list-roles | jq -cr '.Roles[] | .RoleName' | grep -v AWSServiceRoleFor
# IAM users
aws iam list-roles --page-size 100| jq -cr '.Roles[] | .RoleName ' | while read r; do
echo "Role: $r"
aws iam list-attached-role-policies --role-name $r | jq -cr '.AttachedPolicies[] | .PolicyArn' | formatprint RoleManagedPolicies
aws iam list-role-policies --role-name $r | jq -cr '.PolicyNames[]' | formatprint RoleInlinePolicies
echo ""
done

View File

@ -0,0 +1 @@
aws securityhub get-findings --filters '{"SeverityLabel":[{"Value": "HIGH","Comparison":"EQUALS"}]}' --max-items 500 | jq -cr '.Findings[] | [.AwsAccountId,.Title,.Resources[].Id,.Severity.Label] | @csv'

28
aws/aws-users-policies.sh Executable file
View File

@ -0,0 +1,28 @@
#!/bin/bash
function formatprint() {
cat - > /tmp/formatprint.tmp
echo "# $1 ($(cat /tmp/formatprint.tmp | wc -l))"
cat /tmp/formatprint.tmp | sed -e 's/^/ /g'
rm -f /tmp/formatprint.tmp
}
# Generate inventory of ec2, rds, lb, and s3 buckets.
# aws eks list-clusters | jq '.[][]' | awk -F/ '{print $NF}' | formatprint EKS
# IAM roles"
# aws iam list-roles | jq -cr '.Roles[] | .RoleName' | grep -v AWSServiceRoleFor
# IAM users
aws iam list-users | jq -cr '.Users[] | .UserName' | while read u; do
echo "User: $u"
aws iam list-attached-user-policies --user-name $u | jq -cr '.AttachedPolicies[] | .PolicyArn' | formatprint UserManagedPolicies
aws iam list-user-policies --user-name $u | jq -cr '.PolicyNames[]' | formatprint UserInlinePolicies
aws iam list-groups-for-user --user-name $u | jq -cr '.Groups[] | .GroupName' | while read g; do
echo "Groups: $g"
aws iam list-attached-group-policies --group-name $g | jq -cr '.AttachedPolicies[] | .PolicyArn' | formatprint GroupManagedPolicies
aws iam list-group-policies --group-name $g | jq -cr '.PolicyNames[]' | formatprint GroupInlinePolicies
done
echo ""
done

BIN
aws/co.db Normal file

Binary file not shown.

29
aws/cost-optimization.sh Executable file
View File

@ -0,0 +1,29 @@
#!/bin/bash
# Work in progress. This script does not work with organization. SP rouutines have not been written.
# Create sqlite database
rm -f co.db
sqlite3 co.db 'create table instances(type varchar(20) primary key,running int, ri int);'
# Count instance types in region
echo "Getting list of Ec2 instances..."
aws ec2 describe-instances --query 'Reservations[].Instances[].InstanceType' --filter Name=instance-state-name,Values=running --output text | tr '\t' '\n' | sort | uniq -c | while read count type; do
echo "insert into instances values(\"$type\", $count,0);"
done | sqlite3 co.db
# List RI in region
echo "Getting list of RI..."
aws ec2 describe-reserved-instances --query ReservedInstances[].[InstanceCount,InstanceType] --output text | while read count type; do
echo "update instances set ri = $count where type = \"$type\";"
done | sqlite3 co.db
# List ISP
echo "Getting ISP..."
aws savingsplans describe-savings-plans
# List CSP
echo "Getting CSP..."
aws savingsplans describe-savings-plans
# List table
sqlite3 -header -column co.db "select type, running, ri, running - ri as candidates from instances;"

1
aws/dump-scp.sh Executable file
View File

@ -0,0 +1 @@
aws organizations list-policies --filter SERVICE_CONTROL_POLICY | jq -cr '.Policies[] | .Id' | while read i; do aws organizations describe-policy --policy-id $i --output text | tee $i.txt; done

3
aws/ec2-inventory.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
aws ec2 describe-instances | jq -cr '.Reservations[].Instances[] | [.InstanceId,.PublicIpAddress,.PrivateIpAddress,(.Tags[] | select(.Key=="Name")| .Value), .InstanceType, .Placement.AvailabilityZone, .Platform, .State.Name] | @csv' > ec2-inventory.csv

6
aws/ec2-with-public-ip.sh Executable file
View File

@ -0,0 +1,6 @@
#!/usr/bin/env bash
aws sts get-caller-identity --query Account
aws ec2 describe-instances --query 'Reservations[].Instances[?PublicIpAddress != `null`].[InstanceId, PublicIpAddress]' --output text
echo "= = ="

View File

@ -0,0 +1,6 @@
aws iam list-policies --scope Local | jq -cr '.Policies[].Arn' | while read i; do
VER=$(aws iam get-policy --policy-arn $i | jq -cr .Policy.DefaultVersionId)
POLNAME=$(echo $i | awk -F/ '{print $NF}')
aws iam get-policy-version --policy-arn $i --version-id $VER > iam-policies/$POLNAME.json
done

6
aws/find-unused-sg.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
aws ec2 describe-security-groups | jq -cr '.SecurityGroups[].GroupId' | while read s; do
echo -n "$s: "
aws ec2 describe-network-interfaces --filters Name=group-id,Values=$s | jq -cr '[.NetworkInterfaces[].NetworkInterfaceId] | length'
done

8
aws/gzip-file.py Executable file
View File

@ -0,0 +1,8 @@
#!/usr/bin/python3
import gzip
import shutil
with open('test.txt', 'rb') as f_in:
with gzip.open('test.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)

115
aws/iam-last-activity.py Executable file
View File

@ -0,0 +1,115 @@
#!/usr/bin/python3
from datetime import datetime
import boto3
import jmespath
import time
import re
# dump user/group/role last activity
def generateLastAccessed(myclient: boto3.client, arn: str, myAccountId: str) -> list[str]:
response = myclient.generate_service_last_accessed_details(
Arn=arn,
Granularity='SERVICE_LEVEL')
jobId = response.get("JobId")
accessDetails = client.get_service_last_accessed_details(JobId=jobId, MaxItems=20)
while True:
time.sleep(2)
accessDetails = client.get_service_last_accessed_details(JobId=jobId, MaxItems=20)
if accessDetails.get("JobStatus") != "COMPLETED":
continue
else:
break
r2 = client.list_policies_granting_service_access(
Arn=arn,
ServiceNamespaces=jmespath.search("ServicesLastAccessed[*].ServiceNamespace", accessDetails)
)
returnString = []
for p in jmespath.search("PoliciesGrantingServiceAccess[*].Policies[]", r2):
if p.get("PolicyType") == "INLINE":
returnString.append("INLINE:" + p.get("PolicyName"))
else:
if myAccountId in p.get("PolicyArn"):
returnString.append(p.get("PolicyArn"))
return list(dict.fromkeys(returnString))
def formatDate(myTime: time) -> str:
if myTime is None:
return "Never"
else:
return myTime.date()
def getPolicyUpdateTime(myClient: boto3.client, arn: str) -> str:
resp = myClient.get_policy(PolicyArn=arn)
return resp.get("Policy").get("UpdateDate").date()
def heading(title: str) -> None:
print("=" * 40)
print("**", title, "**")
print("=" * 40)
sts = boto3.client('sts')
accountId = sts.get_caller_identity()["Account"]
client = boto3.client('iam')
heading("Users")
entity = client.list_users()
for u in jmespath.search("Users[*]", entity):
accessKeyQuery = client.list_access_keys(UserName=u.get('UserName'))
keys = accessKeyQuery.get("AccessKeyMetadata")
print("UserName", u.get("UserName"), sep=": ")
print("CreateDate", formatDate(u.get("CreateDate")), sep=": ")
print("PasswordLastUsed", formatDate(u.get("PasswordLastUsed")), sep=": ")
doPolicyLastUsed = False if u.get("PasswordLastUsed") is None else True
for k in accessKeyQuery.get("AccessKeyMetadata"):
print("AccessKeyId", k.get("AccessKeyId"), sep=": ")
print("AccessKeyStatus", k.get("Status"), sep=": ")
if k.get("Status") == "Inactive":
doPolicyLastUsed = False
print("AccessKeyCreateDate", formatDate(k.get("CreateDate")), sep=": ")
akLastUsedQuery = client.get_access_key_last_used(AccessKeyId=k.get("AccessKeyId"))
print("AccessKeyLastUsed", formatDate(akLastUsedQuery.get("AccessKeyLastUsed").get("LastUsedDate")), sep=": ")
if doPolicyLastUsed:
lastAccessed = generateLastAccessed(client, u.get("Arn"), accountId)
if len(lastAccessed) > 0:
print("CustomerPolicyLastUsed and PolicyLastModified:")
for p in lastAccessed:
if "INLINE" not in p:
print(p, getPolicyUpdateTime(client, p), sep=", ")
else:
print(p)
print("-" * 10)
heading("Groups")
entity = client.list_groups()
print("GroupName", "CreateDate", sep=", ")
for g in jmespath.search("Groups[*]", entity):
print(g.get("GroupName"), formatDate(g.get("CreateDate")), sep=", ")
heading("Roles")
entity = client.list_roles()
for r in jmespath.search("Roles[*]", entity):
if re.match("^/.*service-role/.*", r.get("Path")) is not None or re.match("^/aws-reserved/",
r.get("Path")) is not None:
continue
getRoleQuery = client.get_role(RoleName=r.get("RoleName"))
r1 = getRoleQuery.get("Role")
print("RoleName", r1.get("RoleName"), sep=": ")
print("CreateDate", formatDate(r1.get("CreateDate")), sep=": ")
print("RoleLastUsed", formatDate(jmespath.search("RoleLastUsed.LastUsedDate", r1)), sep=": ")
if jmespath.search("RoleLastUsed.LastUsedDate", r1) is not None:
lastAccessed = generateLastAccessed(client, r1.get("Arn"), accountId)
if len(lastAccessed) > 0:
print("CustomerPolicyLastUsed and PolicyLastModified:")
for p in lastAccessed:
if "INLINE" not in p:
print(p, getPolicyUpdateTime(client, p), sep=", ")
else:
print(p)
print("-" * 10)

15
aws/iam-user-audit.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
AID=$(aws sts get-caller-identity --query Account --output text)
# dump list of user to temp file
aws iam list-users | jq -cr '.Users[] | [.UserName, .PasswordLastUsed // "NoPassword"] | @csv' > /tmp/iusers.txt
cat /tmp/iusers.txt | while read line; do
USER=$(echo $line | awk -F, '{print $1}' | tr -d \")
PLU=$(grep "$USER\"," /tmp/iusers.txt | awk -F, '{print $2}' | awk -FT '{print $1}' | tr -d \")
echo "$AID, $USER, $PLU, NA, NA"
aws iam list-access-keys --user-name $USER --query AccessKeyMetadata[].AccessKeyId --output text | tr '\t' '\n' | while read k; do
echo "$AID, $USER, NA, $k, $(aws iam get-access-key-last-used --access-key-id $k --query AccessKeyLastUsed.LastUsedDate | awk -FT '{print $1}' | tr -d \")"
done
done

View File

@ -0,0 +1,26 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ec2:DescribeReservedInstances",
"ec2:ModifyReservedInstances",
"ec2:PurchaseReservedInstancesOffering",
"ec2:DescribeInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeReservedInstancesOfferings"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"rds:PurchaseReservedDBInstancesOffering",
"rds:Describe*"
],
"Resource": "*"
}
]
}

33
aws/kms-create-import-key.sh Executable file
View File

@ -0,0 +1,33 @@
#!/bin/bash
if [ $# -lt 2 ]; then
echo "This tool requires openssl, awscli, jq and base64."
echo "One can generate a key using openssl rand -out PlaintextKeyMaterial.bin 32"
echo "Usage: key-import.sh key-file key-alias"
exit 0
fi
keyAlias=$2
aws kms create-key --origin EXTERNAL --description "Customer managed key" | jq -cr .KeyMetadata.KeyId > keyid.txt
aws kms get-parameters-for-import --key-id $(cat keyid.txt) \
--wrapping-algorithm RSAES_OAEP_SHA_256 \
--wrapping-key-spec RSA_2048 > import.json
cat import.json | jq -cr .PublicKey | base64 -d > PublicKey.bin
cat import.json | jq -cr .ImportToken | base64 -d > ImportToken.bin
openssl pkeyutl -encrypt -in $1 -inkey PublicKey.bin -keyform DER \
-pubin -out EncryptedKeyMaterial.bin -pkeyopt rsa_padding_mode:oaep -pkeyopt rsa_oaep_md:sha256
aws kms import-key-material --key-id $(cat keyid.txt) \
--encrypted-key-material fileb://EncryptedKeyMaterial.bin \
--import-token fileb://ImportToken.bin \
--expiration-model KEY_MATERIAL_DOES_NOT_EXPIRE
aws kms create-alias --alias-name "alias/$keyAlias" --target-key-id $(cat keyid.txt)
aws kms describe-key --key-id $(cat keyid.txt)
rm -f EncryptedKeyMaterial.bin ImportToken.bin PublicKey.bin import.json keyid.txt

View File

@ -0,0 +1,56 @@
# reference: https://aws.amazon.com/premiumsupport/knowledge-center/start-stop-lambda-eventbridge/
import boto3
import os
import json
import time
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def start_instances(instances: list[str]) -> dict:
ec2 = boto3.client('ec2', region_name=os.environ['AWS_REGION'])
resp = ec2.start_instances(InstanceIds=instances)
ec2.close()
return resp
def stop_instances(instances: list[str]) -> dict:
ec2 = boto3.client('ec2', region_name=os.environ['AWS_REGION'])
resp = ec2.stop_instances(InstanceIds=instances)
ec2.close()
return resp
def instance_status(instances: list[str]) -> str:
time.sleep(10)
ec2 = boto3.client('ec2', region_name=os.environ['AWS_REGION'])
ec2Array: list[dict] = []
response = ec2.describe_instances(InstanceIds=instances)
for r in response['Reservations']:
for i in r['Instances']:
for t in i['Tags']:
if t['Key'] == 'Name':
ec2Array.append({'id': i['InstanceId'], 'name': t['Value'], 'state': i['State']['Name']})
ec2.close()
return json.dumps(ec2Array)
def lambda_handler(event, context):
instances: list[str] = json.loads(os.environ['instances'])
if event['action'] == 'start':
logger.info('Starting instances: ' + str(instances))
resp = start_instances(instances)
logger.info(instance_status(instances))
elif event['action'] == 'stop':
logger.info('Stopping instances: ' + str(instances))
resp = stop_instances(instances)
logger.info(instance_status(instances))
else:
resp = "Event action not provided"
return resp

50
aws/lambda-s3-email.py Normal file
View File

@ -0,0 +1,50 @@
import os.path
import boto3
import gzip
import shutil
from botocore.exceptions import ClientError
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
s3 = boto3.client("s3")
def lambda_handler(event, context):
mail_sender = "abc@abc.com"
mail_recipient = "efg@efg.com"
aws_region = "ap-east-1"
mail_subject = "Monthly billing csv 410429265162"
FILEOBJ = event["Records"][0]
BUCKET_NAME = str(FILEOBJ['s3']['bucket']['name'])
KEY = str(FILEOBJ['s3']['object']['key'])
FILE_NAME = os.path.basename(KEY)
temp_file = '/tmp/' + FILE_NAME
s3.download_file(BUCKET_NAME, KEY, temp_file)
with open(temp_file, 'rb') as f_in:
with gzip.open('billing-csv.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
ATTACHMENT = '/tmp/billing-csv.gz'
BODY_TEXT = "The Object file was uploaded to S3"
client = boto3.client('ses',region_name=aws_region)
msg = MIMEMultipart()
# Add subject, from and to lines.
msg['Subject'] = mail_subject
msg['From'] = mail_sender
msg['To'] = mail_recipient
textpart = MIMEText(BODY_TEXT)
msg.attach(textpart)
att = MIMEApplication(open(ATTACHMENT, 'rb').read())
att.add_header('Content-Disposition','attachment',filename=ATTACHMENT)
msg.attach(att)
print(msg)
try:
response = client.send_raw_email(
Source=mail_sender,
Destinations=[mail_sender,mail_recipient],
RawMessage={ 'Data':msg.as_string() }
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:",response['MessageId'])

View File

@ -0,0 +1,18 @@
#!/usr/bin/env bash
alias nl="nl -s '. '"
shopt -s expand_aliases
echo "As of $(date), the following resources are detected on your AWS account $(aws sts get-caller-identity | jq .Account)"
echo ""
echo ec2:
aws ec2 describe-instances | jq -cr '.Reservations[] | .Instances[].InstanceId' | nl
echo rds:
aws rds describe-db-instances | jq -cr '.DBInstances[] | .DBInstanceIdentifier' | nl
echo lambda:
aws lambda list-functions | jq -cr '.Functions[] | .FunctionName' | nl
echo s3:
aws s3api list-buckets | jq -cr '.Buckets[] | .Name' | nl
echo efs:
aws efs describe-file-systems | jq -cr '.FileSystems[] | .FileSystemId' | nl
echo vpc:
aws ec2 describe-vpcs | jq -cr '.Vpcs[] | .VpcId' | nl

10
aws/list-unused-sg.sh Executable file
View File

@ -0,0 +1,10 @@
#!/bin/bash
TMPFILE=/dev/shm/enisg.lst
aws ec2 describe-network-interfaces --query NetworkInterfaces[].Groups --output text > $TMPFILE
for sg in $(aws ec2 describe-security-groups --query 'SecurityGroups[*].GroupId' --output text); do
echo -n "$sg : "
grep -c $sg $TMPFILE
done | sort -k3 -n
rm -f $TMPFILE

4
aws/listInstances.sh Executable file
View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
jq -c '.Reservations[].Instances[] | select(.Platform!="windows") | [.InstanceId,.PublicIpAddress,.PrivateIpAddress,(.Tags[] | select(.Key=="Name")| .Value)?, .InstanceType, .Placement.AvailabilityZone, .State.Name]' | tr -d '[["]'

72
aws/metrics.json Normal file
View File

@ -0,0 +1,72 @@
{
"Metrics": [
{
"Namespace": "CWAgent",
"MetricName": "Memory % Committed Bytes In Use",
"Dimensions": [
{
"Name": "InstanceId",
"Value": "i-050d4adeafaa53cd0"
},
{
"Name": "objectname",
"Value": "Memory"
},
{
"Name": "ImageId",
"Value": "ami-0bb821cb43852704d"
},
{
"Name": "InstanceType",
"Value": "t3.large"
}
]
},
{
"Namespace": "CWAgent",
"MetricName": "Memory % Committed Bytes In Use",
"Dimensions": [
{
"Name": "InstanceId",
"Value": "i-050d4adeafaa53cd0"
}
]
},
{
"Namespace": "CWAgent",
"MetricName": "LogicalDisk % Free Space",
"Dimensions": [
{
"Name": "InstanceId",
"Value": "i-050d4adeafaa53cd0"
}
]
},
{
"Namespace": "CWAgent",
"MetricName": "LogicalDisk % Free Space",
"Dimensions": [
{
"Name": "instance",
"Value": "C:"
},
{
"Name": "InstanceId",
"Value": "i-050d4adeafaa53cd0"
},
{
"Name": "objectname",
"Value": "LogicalDisk"
},
{
"Name": "ImageId",
"Value": "ami-0bb821cb43852704d"
},
{
"Name": "InstanceType",
"Value": "t3.large"
}
]
}
]
}

53
aws/public-ips Normal file
View File

@ -0,0 +1,53 @@
43.204.26.15
43.204.154.79
15.207.215.132
3.111.102.180
3.111.9.137
13.234.95.59
3.111.208.83
35.154.17.21
13.232.84.95
13.126.211.148
3.111.115.157
3.111.183.35
13.235.176.20
3.109.149.228
3.111.124.193
3.110.26.128
3.111.222.108
3.111.241.224
43.204.205.1
65.0.212.10
3.6.32.23
15.207.189.166
3.109.130.3
13.235.226.61
3.111.148.80
3.111.124.249
3.111.212.74
52.66.119.127
3.111.180.154
3.108.169.208
3.111.209.74
3.111.250.88
13.234.183.132
43.204.8.135
3.108.214.127
3.111.154.131
43.204.119.5
13.126.248.99
3.111.140.112
43.204.4.54
13.127.80.244
43.205.36.29
43.205.13.140
13.127.138.166
3.109.229.90
43.205.37.245
65.1.5.211
15.206.197.33
43.204.249.129
43.205.26.115
65.1.233.123
3.108.139.126
65.0.59.28

8
aws/rds-custom-alarm.sh Normal file
View File

@ -0,0 +1,8 @@
aws rds describe-db-instances | jq -cr '.DBInstances[].DBInstanceIdentifier' | while read i; do
dbStatus=$(aws rds describe-db-instances --db-instance-identifier $i --query DBInstances[].DBInstanceStatus --output text)
if [ $dbStatus != "available" ]; then
aws cloudwatch put-metric-data --metric-name RdsInstanceUnavailable --namespace Custom/RDS --value 1 --dimensions DBInstanceIdentifier=$i
else
aws cloudwatch put-metric-data --metric-name RdsInstanceAvailable --namespace Custom/RDS --value 1 --dimensions DBInstanceIdentifier=$i
fi
done

View File

@ -0,0 +1,6 @@
#!/bin/bash
BUCKET=$1
aws s3 ls s3://$BUCKET --recursive | awk '{print $NF}' | while read i; do
aws s3api restore-object --bucket $BUCKET --key $i --restore-request Days=20
done

1
aws/sso-login-events.sh Normal file
View File

@ -0,0 +1 @@
aws cloudtrail lookup-events --start-time $(date -d '48 hour ago' +%s) --lookup-attributes AttributeKey=Username,AttributeValue=LEDP636 | jq -cr '.Events[] | .CloudTrailEvent' | jq -cr '[.eventTime, .eventName, .userIdentity.userName, .sourceIPAddress, .additionalEventData.CredentialType, .serviceEventDetails.CredentialVerification] | @tsv'

45
azure/nsg-cli-example.sh Normal file
View File

@ -0,0 +1,45 @@
RGNAME=IQTStreamServer-Migrated
SGNAME=IQTStreamServer-SG
az network nsg create -n $SGNAME -g $RGNAME -l "east asia"
STARTP=1001
cat <<EOF |
122.147.128.180
219.87.71.10
210.61.122.2
122.147.141.118
219.87.64.222
122.147.15.66
122.146.84.72
118.163.58.205
66.70.1.47
85.133.14.254
217.72.241.183
66.70.104.94
63.131.154.55
72.36.244.8
EOF
while read i; do
az network nsg rule create -g $RGNAME --nsg-name $SGNAME -n $STARTP --protocol 'tcp' --source-address-prefixes "$i/32" --source-port-ranges '*' --destination-address-prefixes '*' --destination-port-ranges '*' --access Allow --priority $STARTP
((STARTP++))
done
STARTP=2001
for p in 7070 8000 8001 80; do
az network nsg rule create -g $RGNAME --nsg-name $SGNAME -n $STARTP --protocol 'tcp' --source-address-prefixes "*" --source-port-ranges '*' --destination-address-prefixes '*' --destination-port-ranges $p --access Allow --priority $STARTP
((STARTP++))
done
STARTP=3001
cat <<EOF |
220.128.71.180/32
122.147.213.24/29
61.218.44.0/24
60.251.61.120/29
175.98.157.0/24
122.147.173.0/24
EOF
while read i; do
az network nsg rule create -g $RGNAME --nsg-name $SGNAME -n "CHG0186180-$STARTP" --protocol 'tcp' --source-address-prefixes $i --source-port-ranges '*' --destination-address-prefixes '*' --destination-port-ranges '22' --access Allow --priority $STARTP
((STARTP++))
done

View File

@ -0,0 +1,16 @@
#!/bin/bash
# Get AWS EC2 instances
links2 -dump https://aws.amazon.com/ec2/instance-types/ | pcre2grep '.*(micro|small|large)\s+[0-9]\s+[0-9]' | awk '{print "insert into instances values(\""$1 "\"," $2 "," $3 ",\"aws\");"}' > insert.sql
# Get GCP instances
links2 -dump https://cloud.google.com/compute/docs/general-purpose-machines | pcre2grep '(standard|high).*\s+[0-9]\s+[0-9]' | awk '{print "insert into instances values(\""$1"\",", $2, ",", $3, ",\"gcp\");"}' >> insert.sql
# Get alicloud instances
links2 -dump https://www.alibabacloud.com/help/en/doc-detail/108490.html | pcre2grep 'ecs.*[0-9]' | awk '{print "insert into instances values(\""$1"\",",$2","$3,",\"ali\");"}' >> insert.sql
echo "delete from instances;" | sqlite3 vm-spec.db
sqlite3 vm-spec.db < insert.sql
rm -f insert.sql
echo 'select provider, count(1) from instances group by "provider";' | sqlite3 vm-spec.db

BIN
cloud-vm-spec/vm-spec.db Normal file

Binary file not shown.

View File

@ -0,0 +1,9 @@
#!/bin/bash
# this script requires httpie and cfcli
#
cfcli zones -f json | jq -cr '.[] | .name,.id' | paste - - | awk '{print $1,$2}' | while read d k; do
cfcli ls -d $d -f json | jq -cr '.[] | select(.type == "A") | .name' | while read s; do
cat template.json | sed s/REPLACE_ME/$s/g | http https://api.cloudflare.com/client/v4/zones/$k/healthchecks \
X-Auth-Email:ken.fong@rackspace.com \
X-Auth-Key:xxx
done

25
cloudflare/template.json Normal file
View File

@ -0,0 +1,25 @@
{
"name": "site-monitor",
"description": "Health check",
"check_regions": [
"SEAS"
],
"type": "HTTPS",
"consecutive_successes": 1,
"consecutive_fails": 2,
"http_config": {
"method": "GET",
"port": 443,
"path": "/",
"expected_codes": [
"200"
],
"follow_redirects": true,
"allow_insecure": false
},
"timeout": 5,
"retries": 2,
"interval": 60,
"address": "REPLACE_ME"
}

View File

@ -0,0 +1,89 @@
Description: >-
Cloud Formation template for Acrolinx instance.
Parameters:
InputInstanceType:
Description: EC2 Instance Type
Default: t3a.medium
Type: String
InputSubdomain:
Description: Subdomain of acrolinx instance
Type: String
Default: rstest
InputTimezone:
Description: Timezone of instance
Type: String
Default: UTC
InputSubnet:
Description: Subnet of instance
Type: String
Default: "Enter an unused CIDR"
Mappings:
RegionMap:
us-west-2:
VpcId: vpc-7155ca14
ImageId: ami-0a8d75344fec6c412
SshKey: ACROLINX-OREGON
SecGroups: ["sg-0c8c5cf4dca2147ee","sg-d444e4b2","sg-0b93187b","sg-859d16f5"]
eu-west-1:
VpcId: vpc-55a5a930
ImageId: ami-0385d2ff9f9c3706d
SshKey: ACROLINX-IRELAND
SecGroups: ["sg-07ffa804cbff134ed","sg-5217ce2e","sg-620ad31e","sg-aa0ed7d6"]
Resources:
InstanceSubnet:
Type: AWS::EC2::Subnet
Properties:
VpcId: !FindInMap [RegionMap, !Ref "AWS::Region", VpcId]
CidrBlock: !Ref InputSubnet
Ec2Instance:
Type: AWS::EC2::Instance
Properties:
IamInstanceProfile: AmazonSSMRoleForInstancesQuickSetup
KeyName: !FindInMap [RegionMap, !Ref "AWS::Region", SshKey]
InstanceType: !Ref InputInstanceType
ImageId: !FindInMap [RegionMap, !Ref "AWS::Region", ImageId]
SubnetId: !Ref InstanceSubnet
SecurityGroupIds: !FindInMap [RegionMap, !Ref "AWS::Region", SecGroups]
Tags:
- Key: "Name"
Value: !Join ['', [!Ref 'InputSubdomain', ".acrolinx.cloud"]]
- Key: "TZ"
Value: !Ref InputTimezone
- Key: "SSM-Enabled"
Value: "yes"
UserData:
'Fn::Base64': !Sub |
#!/bin/bash -ex
yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
systemctl enable amazon-ssm-agent
Ec2EIP:
Type: AWS::EC2::EIP
Properties:
Domain: vpc
InstanceId: !Ref Ec2Instance
PublicDnsRecord:
Type: AWS::Route53::RecordSet
Properties:
HostedZoneName: "acrolinx.cloud."
Comment: DNS name for my instance.
Name: !Join ['', [!Ref 'InputSubdomain', ".acrolinx.cloud"]]
Type: A
TTL: '900'
ResourceRecords:
- !Ref Ec2EIP
OriginDnsRecord:
Type: AWS::Route53::RecordSet
Properties:
HostedZoneName: "acrolinx.cloud."
Comment: DNS name for my instance.
Name: !Join ['', ['origin-', !Ref 'InputSubdomain', ".acrolinx.cloud"]]
Type: A
TTL: '900'
ResourceRecords:
- !Ref Ec2EIP

View File

@ -0,0 +1,8 @@
FROM alpine:latest
ENV container docker
RUN apk add python openssl wget curl git bash gcc make musl-dev zlib-dev libffi-dev bzip2-dev openssl-dev readline-dev
RUN git clone https://github.com/aws/aws-elastic-beanstalk-cli-setup.git
RUN ./aws-elastic-beanstalk-cli-setup/scripts/bundled_installer
RUN /root/.pyenv/versions/3.7.2/bin/pip install awscli
ENV PATH="/root/.ebcli-virtual-env/executables:/root/.pyenv/versions/3.7.2/bin:$PATH"
CMD ["/bin/bash"]

View File

@ -0,0 +1,18 @@
FROM alpine:latest
LABEL description="Jenkins"
LABEL maintainer="racken@one27.cf"
LABEL notes="Get initial admin password by docker exec -it <container-id> and cat /root/.jenkins/secrets/initialAdminPassword"
LABEL java-version="openjdk11"
LABEL jenkins-version="2.222.3"
LABEL exposed-port="8080"
LABEL jenkins-path="/jenkins"
ENV container docker
RUN apk add openjdk11 wget openjdk11-jre-headless ttf-dejavu; mkdir /opt/tomcat
WORKDIR /opt/tomcat
RUN wget -q -O- http://ftp.cuhk.edu.hk/pub/packages/apache.org/tomcat/tomcat-9/v9.0.34/bin/apache-tomcat-9.0.34.tar.gz | tar zxf - --strip-components=1
RUN rm -rf /opt/tomcat/webapps/ROOT
RUN wget -q -O /opt/tomcat/webapps/jenkins.war http://mirrors.jenkins.io/war-stable/latest/jenkins.war
ENV JAVA_HOME="/usr"
ENV JAVA_OPTS="-Djava.awt.headless=true -Xmx2g -Dhudson.DNSMultiCast.disabled=true -Duser.timezone=Asia/Hong_Kong"
EXPOSE 8080
CMD ["/opt/tomcat/bin/catalina.sh", "run"]

View File

@ -0,0 +1,9 @@
FROM centos:8
ENV container docker
RUN yum -y install gcc make git python3 zlib-devel openssl-devel ncurses-devel libffi-devel sqlite-devel readline-devel bzip2-devel
RUN git clone https://github.com/aws/aws-elastic-beanstalk-cli-setup.git
RUN ./aws-elastic-beanstalk-cli-setup/scripts/bundled_installer
RUN /root/.pyenv/versions/3.7.2/bin/pip install awscli
ENV PATH="/root/.ebcli-virtual-env/executables:/root/.pyenv/versions/3.7.2/bin:$PATH"
CMD ["/bin/bash"]

View File

@ -0,0 +1,26 @@
FROM centos:8
LABEL description="Jenkins on Tomcat, with git and ansible"
LABEL maintainer="racken@racker.tech"
LABEL notes="Get initial admin password by docker exec -it <container-id> and cat /root/.jenkins/secrets/initialAdminPassword"
LABEL java-version="java-11-openjdk"
LABEL jenkins-version="2.222.3"
LABEL exposed-port="8080"
LABEL jenkins-path="/jenkins"
ENV container docker
RUN yum -y install java-11-openjdk-headless wget dejavu-fonts-common dejavu-sans-fonts dejavu-serif-fonts dejavu-sans-mono-fonts java-11-openjdk-devel git python3-pip yum-utils; mkdir -p /opt/tomcat
RUN pip3 install ansible
WORKDIR /opt/tomcat
RUN wget -q -O- http://ftp.cuhk.edu.hk/pub/packages/apache.org/tomcat/tomcat-9/v9.0.34/bin/apache-tomcat-9.0.34.tar.gz | tar zxf - --strip-components=1
RUN rm -rf /opt/tomcat/webapps/ROOT
RUN wget -q -O /opt/tomcat/webapps/jenkins.war http://mirrors.jenkins.io/war-stable/latest/jenkins.war
RUN yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
RUN yum -y install docker-ce-cli
ENV TINI_VERSION v0.19.0
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
ENV JAVA_HOME="/usr"
ENV JAVA_OPTS="-Djava.awt.headless=true -Xmx2g -Dhudson.DNSMultiCast.disabled=true -Duser.timezone=Asia/Hong_Kong"
EXPOSE 8080
ENTRYPOINT ["/tini", "--"]
CMD ["/opt/tomcat/bin/catalina.sh", "run"]

33
hardening/apache24-php.sh Normal file
View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
# This script requires TLS1.2
APACHE_BASE=$( httpd -S 2>&1 | awk '/ServerRoot/ {print $2}' | tr -d \")
# Harden apache
cp -p $APACHE_BASE/conf/httpd.conf $APACHE_BASE/conf/httpd.conf-pre-hardening
cat <<EOF >> $APACHE_BASE/conf/httpd.conf
TraceEnable off
RewriteEngine On
RewriteCond %{REQUEST_METHOD} ^(TRACE|TRACK)
RewriteRule .* - [F]
SSLFIPS On
ServerTokens PROD
ServerSignature off
EOF
# Disable unused modules
if [ -d $APACHE_BASE/conf.modules.d ]; then
sed -i.preHarden -e 's/^/###/g' $APACHE_BASE/conf.modules.d/00-dav.conf
sed -i.preHarden -e 's/^/###/g' $APACHE_BASE/conf.modules.d/00-proxy.conf
sed -i.preHarden -e 's/^/###/g' $APACHE_BASE/conf.modules.d/01-cgi.conf
sed -i.preHarden -e 's/^Load.*\(authn_dbd\|authn_dbm\|authn_socache\|authz_dbd\|authz_dbm\|cache\|cache_disk\|cache_socache\|macro\|socache_dbm\|socache_memcache\)_module.*/###&/1' /etc/httpd/conf.modules.d/00-base.conf
fi
httpd -S 2>/dev/null | grep ":443" | awk '{print $NF}' | tr -d '[()]' | awk -F: '{print $1}' | while read c; do
sed -i.preHardening -e 's/SSLProtocol.*/SSLProtocol -ALL +TLSv1.2/g' $c
sed -i.preHardening2 -e 's/SSLCipherSuite.*/SSLCipherSuite \"EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+AESGCM EECDH HIGH !MEDIUM !LOW !aNULL !eNULL !LOW !RC4 !MD5 !EXP !PSK !SRP !DSS !DH !3DES\"/g' $c
done
sed -i.preHardening -e 's/^expose_php.*/expose_php = Off/g' /etc/php.ini

54
idrac/idrac-dns.yml Normal file
View File

@ -0,0 +1,54 @@
---
- hosts: all
become: false
gather_facts: false
tasks:
- name: Update drac DNS1
raw: set iDRAC.IPv4.DNS1 72.3.128.240
register: dns1Result
changed_when:
- '"modified successfully" in dns1Result.stdout'
- name: Update drac DNS2
raw: set iDRAC.IPv4.DNS2 72.3.128.241
register: dns2Result
changed_when:
- '"modified successfully" in dns2Result.stdout'
- name: Change snmptrap version
raw: set idrac.snmp.trapformat SNMPv1
register: snmpVersionResult
changed_when:
- '"modified successfully" in snmpVersionResult.stdout'
- name: Change trap destination
raw: config -g cfgIpmiPet -o cfgIpmiPetAlertDestIPAddr -i 1 hardwarealerts.dfw3.rackspace.com
register: trapDestResult
changed_when:
- '"modified successfully" in trapDestResult.stdout'
- name: Enable trap destination
raw: config -g cfgIpmiPet -o cfgIpmiPetAlertEnable -i 1 1
register: trapDestOnResult
changed_when:
- '"modified successfully" in trapDestOnResult.stdout'
- name: Check changes
raw: |
get iDRAC.IPv4.DNS2
register: checkOutput
- name: Show check output
debug:
var: checkOutput.stdout_lines
- name: Check changes
raw: |
get iDRAC.IPv4.DNS1
register: checkOutput1
- name: Show check output1
debug:
var: checkOutput1.stdout_lines

Binary file not shown.

View File

@ -0,0 +1,55 @@
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.util.ArrayList;
import java.util.List;
class PasswordGenerator {
private static final int DEFAULT_PASSWORD_LENGTH = 3;
public static void main(String[] args) {
try {
List<String> dictionary = loadDictionary(args[0]);
String password = generatePassword(dictionary, DEFAULT_PASSWORD_LENGTH);
System.out.println(password.replaceAll("o", "0"));
} catch (IOException e) {
System.err.println("Failed to load dictionary file: " + e.getMessage());
}
}
private static List<String> loadDictionary(String filename) throws IOException {
List<String> dictionary = new ArrayList<>();
try (BufferedReader br = new BufferedReader(new FileReader(filename))) {
String line;
while ((line = br.readLine()) != null) {
dictionary.add(line.trim());
}
}
return dictionary;
}
private static String generatePassword(List<String> dictionary, int length) {
StringBuilder passwordBuilder = new StringBuilder();
SecureRandom random;
try {
random = SecureRandom.getInstanceStrong();
} catch (NoSuchAlgorithmException e) {
random = new SecureRandom();
}
for (int i = 0; i < length; i++) {
int randomIndex = random.nextInt(dictionary.size());
String word = dictionary.get(randomIndex);
passwordBuilder.append(toCamelCase(word));
if ( i < length-1) {
passwordBuilder.append("+");
}
}
return passwordBuilder.toString();
}
private static String toCamelCase(String inputString) {
return inputString.substring(0,1).toUpperCase() + inputString.substring(1).toLowerCase();
}
}

View File

@ -0,0 +1,26 @@
apple
banana
carrot
dog
elephant
flower
green
happiness
icecream
jungle
kangaroo
lemon
monkey
notebook
orange
pineapple
queen
rabbit
sunshine
tiger
umbrella
victory
watermelon
xylophone
yellow
zebra

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -0,0 +1,5 @@
Useful for use with cloudwatch alarm and redeploying EMR cluster should the cluster
nodes fail. For instance zonal failure.
This lambda function was developed by IBM.

View File

@ -0,0 +1,123 @@
import json
import boto3
import os
import logging
print('Loading function')
s3 = boto3.client('s3')
sm = boto3.client('secretsmanager')
emr = boto3.client('emr')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_emr_config():
obj = s3.get_object(
Bucket=os.environ['S3_BUCKET'],
Key=os.environ['S3_OBJECT_KEY']
)
input = json.load(obj['Body'])
return input
def parse_emr_config(emr_config):
kdc_admin_password = get_kdc_admin_password(
emr_config['kerberos_kdc_admin_secret'])
config = {}
config['Name'] = emr_config["name"][:-1]+'2'
config['LogUri'] = emr_config["s3_log_uri"]
config['ReleaseLabel'] = emr_config["release_label"]
config['Instances'] = {}
config['Instances']['KeepJobFlowAliveWhenNoSteps'] = emr_config["keep_job_flow_alive_when_no_steps"]
config['Instances']['TerminationProtected'] = emr_config["termination_protection"]
config['Instances']['TerminationProtected'] = emr_config["termination_protection"]
master = {}
master['Name'] = 'Master'
master['Market'] = 'ON_DEMAND'
master['InstanceRole'] = 'MASTER'
master['InstanceType'] = emr_config["master_instance_type"]
master['InstanceCount'] = emr_config["master_instance_count"]
master['EbsConfiguration'] = {
'EbsBlockDeviceConfigs': [
{
'VolumeSpecification': {
'VolumeType': emr_config["master_ebs_config_type"],
'SizeInGB': emr_config["master_ebs_config_size"],
},
'VolumesPerInstance': 1
},
],
}
core = {}
core['Name'] = 'Core'
core['Market'] = 'ON_DEMAND'
core['InstanceRole'] = 'CORE'
core['InstanceType'] = emr_config["core_instance_type"]
core['InstanceCount'] = emr_config["core_instance_count"]
core['EbsConfiguration'] = {
'EbsBlockDeviceConfigs': [
{
'VolumeSpecification': {
'VolumeType': emr_config["core_ebs_config_type"],
'SizeInGB': emr_config["core_ebs_config_size"],
},
'VolumesPerInstance': 1
},
],
}
config['Instances']['InstanceGroups'] = [master, core]
config['Instances']['Ec2KeyName'] = emr_config["key_name"]
config['Instances']['Ec2SubnetIds'] = emr_config["subnet_ids"]
config['Instances']['EmrManagedMasterSecurityGroup'] = emr_config["master_security_group"]
config['Instances']['EmrManagedSlaveSecurityGroup'] = emr_config["slave_security_group"]
config['Instances']['ServiceAccessSecurityGroup'] = emr_config["service_security_group"]
config['Instances']['AdditionalMasterSecurityGroups'] = emr_config["additional_master_security_group"].split(",")
config['Instances']['AdditionalSlaveSecurityGroups'] = emr_config["additional_master_security_group"].split(",")
config['Tags'] = [{'Key': key, 'Value': value} for key, value in emr_config["tags"].items() if key != 'Terraform']
config['Tags'].append({'Key': 'emr-failover', 'Value': 'true'})
config['Tags'].append({'Key': 'Name', 'Value': config['Name']})
config['BootstrapActions'] = [
{
'Name': emr_config["bootstrap_action_name"],
'ScriptBootstrapAction': {
'Path': emr_config["bootstrap_action_path"],
}
}
]
config['Applications'] = [{'Name': name}
for name in emr_config["applications"]]
config['Configurations'] = json.loads(emr_config["configurations_json"])
config['SecurityConfiguration'] = emr_config["security_configuration_name"]
config['EbsRootVolumeSize'] = emr_config["ebs_root_volume_size"]
config['CustomAmiId'] = emr_config["custom_ami_id"]
if emr_config['kerberos_realm'] is not None and kdc_admin_password is not None:
config['KerberosAttributes'] = {}
config['KerberosAttributes']['Realm'] = emr_config['kerberos_realm']
config['KerberosAttributes']['KdcAdminPassword'] = kdc_admin_password
config['StepConcurrencyLevel'] = emr_config["step_concurrency_level"]
config['VisibleToAllUsers'] = emr_config["visible_to_all_users"]
config['ServiceRole'] = emr_config["iam_service_role"]
config['JobFlowRole'] = emr_config["instance_profile"]
if emr_config["log_encryption_kms_key_id"] is not None:
config['LogEncryptionKmsKeyId'] = emr_config["log_encryption_kms_key_id"]
return config
def get_kdc_admin_password(arn):
if arn is None:
return None
return sm.get_secret_value(SecretId=arn, VersionStage="AWSCURRENT")['SecretString']
def lambda_handler(event, context):
# print("Received event: " + json.dumps(event, indent=2))
emr_config = get_emr_config()
message = event['Records'][0]['Sns']['Message']
logger.info("From SNS: " + message)
config = parse_emr_config(emr_config)
response = emr.run_job_flow(**config)
logger.info("Creating failover cluster")
logger.info(json.dumps(response, indent=2))
return response

View File

@ -0,0 +1,5 @@
# Steps to create a lambda python package
pip install dnspython -t ./
touch lambda_function.py
zip -r dist.zip .

View File

@ -0,0 +1 @@
pip

View File

@ -0,0 +1,35 @@
ISC License
Copyright (C) Dnspython Contributors
Permission to use, copy, modify, and/or distribute this software for
any purpose with or without fee is hereby granted, provided that the
above copyright notice and this permission notice appear in all
copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
Copyright (C) 2001-2017 Nominum, Inc.
Copyright (C) Google Inc.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose with or without fee is hereby granted,
provided that the above copyright notice and this permission notice
appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,126 @@
Metadata-Version: 2.1
Name: dnspython
Version: 2.3.0
Summary: DNS toolkit
Home-page: https://www.dnspython.org
License: ISC
Author: Bob Halley
Author-email: halley@dnspython.org
Requires-Python: >=3.7,<4.0
Classifier: License :: OSI Approved
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Provides-Extra: curio
Provides-Extra: dnssec
Provides-Extra: doh
Provides-Extra: doq
Provides-Extra: idna
Provides-Extra: trio
Provides-Extra: wmi
Requires-Dist: aioquic (>=0.9.20) ; extra == "doq"
Requires-Dist: cryptography (>=2.6,<40.0) ; extra == "dnssec"
Requires-Dist: curio (>=1.2,<2.0) ; extra == "curio"
Requires-Dist: h2 (>=4.1.0) ; (python_full_version >= "3.6.2") and (extra == "doh")
Requires-Dist: httpx (>=0.21.1) ; (python_full_version >= "3.6.2") and (extra == "doh")
Requires-Dist: idna (>=2.1,<4.0) ; extra == "idna"
Requires-Dist: requests (>=2.23.0,<3.0.0) ; extra == "doh"
Requires-Dist: requests-toolbelt (>=0.9.1,<0.11.0) ; extra == "doh"
Requires-Dist: sniffio (>=1.1,<2.0) ; extra == "curio"
Requires-Dist: trio (>=0.14,<0.23) ; extra == "trio"
Requires-Dist: wmi (>=1.5.1,<2.0.0) ; extra == "wmi"
Project-URL: Bug Tracker, https://github.com/rthalley/dnspython/issues
Project-URL: Documentation, https://dnspython.readthedocs.io/en/stable/
Project-URL: Repository, https://github.com/rthalley/dnspython.git
Description-Content-Type: text/markdown
# dnspython
[![Build Status](https://github.com/rthalley/dnspython/actions/workflows/python-package.yml/badge.svg)](https://github.com/rthalley/dnspython/actions/)
[![Documentation Status](https://readthedocs.org/projects/dnspython/badge/?version=latest)](https://dnspython.readthedocs.io/en/latest/?badge=latest)
[![PyPI version](https://badge.fury.io/py/dnspython.svg)](https://badge.fury.io/py/dnspython)
[![License: ISC](https://img.shields.io/badge/License-ISC-brightgreen.svg)](https://opensource.org/licenses/ISC)
[![Coverage](https://codecov.io/github/rthalley/dnspython/coverage.svg?branch=master)](https://codecov.io/github/rthalley/dnspython)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
## INTRODUCTION
dnspython is a DNS toolkit for Python. It supports almost all record types. It
can be used for queries, zone transfers, and dynamic updates. It supports TSIG
authenticated messages and EDNS0.
dnspython provides both high and low level access to DNS. The high level classes
perform queries for data of a given name, type, and class, and return an answer
set. The low level classes allow direct manipulation of DNS zones, messages,
names, and records.
To see a few of the ways dnspython can be used, look in the `examples/`
directory.
dnspython is a utility to work with DNS, `/etc/hosts` is thus not used. For
simple forward DNS lookups, it's better to use `socket.getaddrinfo()` or
`socket.gethostbyname()`.
dnspython originated at Nominum where it was developed
to facilitate the testing of DNS software.
## ABOUT THIS RELEASE
This is dnspython 2.3.0.
Please read
[What's New](https://dnspython.readthedocs.io/en/stable/whatsnew.html) for
information about the changes in this release.
## INSTALLATION
* Many distributions have dnspython packaged for you, so you should
check there first.
* If you have pip installed, you can do `pip install dnspython`
* If not just download the source file and unzip it, then run
`sudo python setup.py install`
* To install the latest from the master branch, run `pip install git+https://github.com/rthalley/dnspython.git`
Dnspython's default installation does not depend on any modules other than
those in the Python standard library. To use some features, additional modules
must be installed. For convenience, pip options are defined for the
requirements.
If you want to use DNS-over-HTTPS, run
`pip install dnspython[doh]`.
If you want to use DNSSEC functionality, run
`pip install dnspython[dnssec]`.
If you want to use internationalized domain names (IDNA)
functionality, you must run
`pip install dnspython[idna]`
If you want to use the Trio asynchronous I/O package, run
`pip install dnspython[trio]`.
If you want to use the Curio asynchronous I/O package, run
`pip install dnspython[curio]`.
If you want to use WMI on Windows to determine the active DNS settings
instead of the default registry scanning method, run
`pip install dnspython[wmi]`.
If you want to try the experimental DNS-over-QUIC code, run
`pip install dnspython[doq]`.
Note that you can install any combination of the above, e.g.:
`pip install dnspython[doh,dnssec,idna]`
### Notices
Python 2.x support ended with the release of 1.16.0. Dnspython 2.0.0 through
2.2.x support Python 3.6 and later. As of dnspython 2.3.0, the minimum
supported Python version will be 3.7. We plan to align future support with the
lifetime of the Python 3 versions.
Documentation has moved to
[dnspython.readthedocs.io](https://dnspython.readthedocs.io).

View File

@ -0,0 +1,273 @@
dns/__init__.py,sha256=5Zy6sqPFV7fliXABg6ltaiVJaoxjjdtu8GcvBucbbqA,1645
dns/__pycache__/__init__.cpython-310.pyc,,
dns/__pycache__/_asyncbackend.cpython-310.pyc,,
dns/__pycache__/_asyncio_backend.cpython-310.pyc,,
dns/__pycache__/_curio_backend.cpython-310.pyc,,
dns/__pycache__/_immutable_ctx.cpython-310.pyc,,
dns/__pycache__/_trio_backend.cpython-310.pyc,,
dns/__pycache__/asyncbackend.cpython-310.pyc,,
dns/__pycache__/asyncquery.cpython-310.pyc,,
dns/__pycache__/asyncresolver.cpython-310.pyc,,
dns/__pycache__/dnssec.cpython-310.pyc,,
dns/__pycache__/dnssectypes.cpython-310.pyc,,
dns/__pycache__/e164.cpython-310.pyc,,
dns/__pycache__/edns.cpython-310.pyc,,
dns/__pycache__/entropy.cpython-310.pyc,,
dns/__pycache__/enum.cpython-310.pyc,,
dns/__pycache__/exception.cpython-310.pyc,,
dns/__pycache__/flags.cpython-310.pyc,,
dns/__pycache__/grange.cpython-310.pyc,,
dns/__pycache__/immutable.cpython-310.pyc,,
dns/__pycache__/inet.cpython-310.pyc,,
dns/__pycache__/ipv4.cpython-310.pyc,,
dns/__pycache__/ipv6.cpython-310.pyc,,
dns/__pycache__/message.cpython-310.pyc,,
dns/__pycache__/name.cpython-310.pyc,,
dns/__pycache__/namedict.cpython-310.pyc,,
dns/__pycache__/node.cpython-310.pyc,,
dns/__pycache__/opcode.cpython-310.pyc,,
dns/__pycache__/query.cpython-310.pyc,,
dns/__pycache__/rcode.cpython-310.pyc,,
dns/__pycache__/rdata.cpython-310.pyc,,
dns/__pycache__/rdataclass.cpython-310.pyc,,
dns/__pycache__/rdataset.cpython-310.pyc,,
dns/__pycache__/rdatatype.cpython-310.pyc,,
dns/__pycache__/renderer.cpython-310.pyc,,
dns/__pycache__/resolver.cpython-310.pyc,,
dns/__pycache__/reversename.cpython-310.pyc,,
dns/__pycache__/rrset.cpython-310.pyc,,
dns/__pycache__/serial.cpython-310.pyc,,
dns/__pycache__/set.cpython-310.pyc,,
dns/__pycache__/tokenizer.cpython-310.pyc,,
dns/__pycache__/transaction.cpython-310.pyc,,
dns/__pycache__/tsig.cpython-310.pyc,,
dns/__pycache__/tsigkeyring.cpython-310.pyc,,
dns/__pycache__/ttl.cpython-310.pyc,,
dns/__pycache__/update.cpython-310.pyc,,
dns/__pycache__/version.cpython-310.pyc,,
dns/__pycache__/versioned.cpython-310.pyc,,
dns/__pycache__/win32util.cpython-310.pyc,,
dns/__pycache__/wire.cpython-310.pyc,,
dns/__pycache__/xfr.cpython-310.pyc,,
dns/__pycache__/zone.cpython-310.pyc,,
dns/__pycache__/zonefile.cpython-310.pyc,,
dns/__pycache__/zonetypes.cpython-310.pyc,,
dns/_asyncbackend.py,sha256=MElqnQ-n2jJh_S3WMfhV5iCBupaDPI-bI0fyXu1wxu0,2002
dns/_asyncio_backend.py,sha256=LXFopzcEdp5Dvo8femJys5-fu_fhtWPa5IqyzwO2sxI,5152
dns/_curio_backend.py,sha256=oS69XPE0hdDwUJ8BE50boHNkokZwzquKyRo8BlKsy_A,3561
dns/_immutable_ctx.py,sha256=cNpW_k8KIHcph3pTtSFy2amAo--nhjQJLG4PlyvQ6Es,2460
dns/_trio_backend.py,sha256=eHIFsQNxc8M4O5osIpuBkT2pW_0eFHNBRXUnGmS0d2k,3923
dns/asyncbackend.py,sha256=HJ-zCCMAy_xVtKevdY_0as2YWGAfj9Hj6J5i6RD5RS8,2948
dns/asyncquery.py,sha256=g_C35IGS3gNFvppAu5REgr_nqkbgD7TIVTrwDNFswBg,25273
dns/asyncresolver.py,sha256=h2p80ht5tEWw84bhJmGGo8a4_lgJNIo9JRDeWK-ItFI,10490
dns/dnssec.py,sha256=iAeM5OjgvW7QpH2vQRRCyv1rn4XvZCY4YqiZtaJrh14,43181
dns/dnssectypes.py,sha256=CyeuGTS_rM3zXr8wD9qMT9jkzvVfTY2JWckUcogG83E,1799
dns/e164.py,sha256=EsK8cnOtOx7kQ0DmSwibcwkzp6efMWjbRiTyHZO8Q-M,3978
dns/edns.py,sha256=78aJXpqEuLSxM9yhIwQMamDMgEJk5tiH4AW4nr_jlGc,14021
dns/entropy.py,sha256=5PsAUA_fK_EENaZwf54uu0_jl7nFj2ocIb6lp8Hc_BI,4244
dns/enum.py,sha256=4obucqiRmLeH-ufppWiNe1mNaqmXPpVIJpgZibHoTbE,3243
dns/exception.py,sha256=Bgb22xO-_nFy8EPXKjhFT0KIIiBE1-xLh0GvY1CvohE,5583
dns/flags.py,sha256=Mg8OX6697WpCTi17V5KkkNCofGudX5dZOfpB86GvZfU,2751
dns/grange.py,sha256=HA623Mv2mZDmOK_BZNDDakT0L6EHsMQU9lFFkE8dKr0,2148
dns/immutable.py,sha256=Mo2z0EQnrrMHuoWJyvUCIk-i1Hl7AqJmz5BJMx8BSXE,1837
dns/inet.py,sha256=VscG7j7OWB51X6pJN6pBQeJ4SD-Ff9ubqOodmv11Gps,5021
dns/ipv4.py,sha256=5Y8BJeByXZTfiFATNFY4KkE-h-ypivr2xX-hU1Lm2Ro,2065
dns/ipv6.py,sha256=tuCteE1vUIZWrZub2f0APudg7vhOLmJkkMCVhhMgBA0,6193
dns/message.py,sha256=e6tn5T2JZKHpjw76wn5YQOGh8fPMLsXvidW63c2f2fI,61832
dns/name.py,sha256=3CqHh162YDf1lx9p0k_8Ht2E5JRTcrLOtnfCnI-0zcM,34427
dns/namedict.py,sha256=hJRYpKeQv6Bd2LaUOPV0L_a0eXEIuqgggPXaH4c3Tow,4000
dns/node.py,sha256=zXIU6IAaglABTOfnPQaqySEXUvA658YQUER2TE61pLk,12666
dns/opcode.py,sha256=I6JyuFUL0msja_BYm6bzXHfbbfqUod_69Ss4xcv8xWQ,2730
dns/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
dns/query.py,sha256=R3fNabLsWyfBWP96cmdU7BpEsfPxYgQMDuYh1AahZCc,49177
dns/quic/__init__.py,sha256=8I9WP_z-mOGpC4-P5lGt0LKUbT3YkdeYqI4nrlP6Jx4,2162
dns/quic/__pycache__/__init__.cpython-310.pyc,,
dns/quic/__pycache__/_asyncio.cpython-310.pyc,,
dns/quic/__pycache__/_common.cpython-310.pyc,,
dns/quic/__pycache__/_sync.cpython-310.pyc,,
dns/quic/__pycache__/_trio.cpython-310.pyc,,
dns/quic/_asyncio.py,sha256=PfTAdPgpueojObypgHtLzK2UaOShBg5RylabIaME2pE,7249
dns/quic/_common.py,sha256=PD0ocrpqwNmACGZ9PJHsQgtQvqySg1eaKu-V5cKVVJk,5402
dns/quic/_sync.py,sha256=jVR5xt4cdXcHf-7962X0rPRFbLqYd5Rc9ScloRPwM2M,7179
dns/quic/_trio.py,sha256=wD0E-sb_8vwwf-nkYA3aC-e_co1m9Qpw25oSNnyn4xo,5932
dns/rcode.py,sha256=N6JjrIQjCdJy0boKIp8Hcky5tm__LSDscpDz3rE_sgU,4156
dns/rdata.py,sha256=WOADxsIumC2cA3vivaQcQzEbyW_qIKXthJj8rZarMjs,29766
dns/rdataclass.py,sha256=TK4W4ywB1L_X7EZqk2Gmwnu7vdQpolQF5DtQWyNk5xo,2984
dns/rdataset.py,sha256=nBzffUHIAWHTi1jG9zTc12lR1NED2-6FRBztMx7Ki1o,17066
dns/rdatatype.py,sha256=gIdYZ0iHRlgiTEO-ftobUANmaAmjTnNc4JljMaP1OnQ,7339
dns/rdtypes/ANY/AFSDB.py,sha256=SUdLEDU_H23BnN_kYsL5hDHn6lAvfkNg3oGXt6FnFVc,1662
dns/rdtypes/ANY/AMTRELAY.py,sha256=YO4zGhp8SBFibhNYu12Z3xZImvGicWfTcwS3ISiPgYc,3382
dns/rdtypes/ANY/AVC.py,sha256=LHF3DeIbJVQPHsHx1W5_0d2QB5u8wx3CCvbDcTzMAig,1025
dns/rdtypes/ANY/CAA.py,sha256=fqSrgAtNrWeJMbQTLiJuCKyUGuUMeyrI0dHxRcG0NYk,2512
dns/rdtypes/ANY/CDNSKEY.py,sha256=iCuVRT1NcPFvgLscWytLtfb5HmA1mJuBHEwfYqcAOhc,1226
dns/rdtypes/ANY/CDS.py,sha256=yLltG-Tv7BxZZgRTvVD4Cz-LneqRLIGSDaqe80VrEbs,1164
dns/rdtypes/ANY/CERT.py,sha256=aIXNbQf_74Ih5fRp1zjHeg2_-YZ99k6UgiNOajp-pQE,3534
dns/rdtypes/ANY/CNAME.py,sha256=OvSnWyZ6gomsicHCgxES9G3upRKbGLxLK5Vt2IhUXcc,1207
dns/rdtypes/ANY/CSYNC.py,sha256=Fp-jO7G-kIAgA1cJEUrxhdeqUCw8zIslS4I0XdeZxBg,2440
dns/rdtypes/ANY/DLV.py,sha256=9AQWoYOS9i-GdlsIn6Y-3x9MWKLeDddAyDXZWT8Hsik,987
dns/rdtypes/ANY/DNAME.py,sha256=Oil8B_mgVQ6_YvxDIVNO3AssA_RtsyeZmZJTUBXD_0g,1151
dns/rdtypes/ANY/DNSKEY.py,sha256=vOOKesQwrj9CVd4DOJ8YBD8xzNXLJP2AVG9KNyk4veA,1224
dns/rdtypes/ANY/DS.py,sha256=KWhwhK-mKuJvjb4jujnfeiRwHVjznsZ8MfBJ-fmz0WI,996
dns/rdtypes/ANY/EUI48.py,sha256=BvcEhncVjcRktBxZw0dWx_wYP5JGVBnpAzMypixTW7w,1152
dns/rdtypes/ANY/EUI64.py,sha256=qfqRmkc-wXmPpN4p559kpH4sc0JG5uBpQjsx0ZNaU30,1162
dns/rdtypes/ANY/GPOS.py,sha256=KO16H4VcRe7P2o67jD4eMV1MuVK8W_-xtJeZSUKer3E,4434
dns/rdtypes/ANY/HINFO.py,sha256=uzMjQoc286m7jFcZ7Bc388NIyG_6lrUM4GrskN6wUMM,2250
dns/rdtypes/ANY/HIP.py,sha256=4xXviQjTzlkKOi3LPbarZ3RuhjfktnUFj_udTQNdNY0,3229
dns/rdtypes/ANY/ISDN.py,sha256=mOI9rSD8BqkdwvNKWwS580Rp-yq13Z_Q3MVoI0aXn_k,2714
dns/rdtypes/ANY/L32.py,sha256=dF4DNMVOubJysF1YuoIDqpGhzEC7MDIK8r5MJS5WVLw,1287
dns/rdtypes/ANY/L64.py,sha256=BQC32j-ED1jgNJkZuQ6cP1qpmPQrJU_WwYN4wXAujoE,1593
dns/rdtypes/ANY/LOC.py,sha256=WgZnFQfYtm2ZpbwfPY45F9dLyydnWtz9PpqPPHVNwBM,12028
dns/rdtypes/ANY/LP.py,sha256=Gc9WDLUJDLCVBiBv9OHHsASELdd7IFJ0LXUN8Ud6a8A,1339
dns/rdtypes/ANY/MX.py,sha256=OWfJEANZXF1gKYgXXRAkpz_BYlxxQ8cy0QgKPc7K0bA,996
dns/rdtypes/ANY/NID.py,sha256=ezcvntoK3FQ_LbtzVDkUWP1bIqmqOq2COcQ1EEQf_Uc,1545
dns/rdtypes/ANY/NINFO.py,sha256=Fn7D8tXFbUIBRSEKlsd92T9M_O1xOGqKvsgX6aeRAco,1042
dns/rdtypes/ANY/NS.py,sha256=OdaHATafwdGDk5AABbW8E3DIUtr5zuItWNRB7AUg-Go,996
dns/rdtypes/ANY/NSEC.py,sha256=YsUupmBYhhwP2f_dueKHL-aTexKF_jIc9x1Ds3WfgwQ,2476
dns/rdtypes/ANY/NSEC3.py,sha256=HckDp5OAOvmzwfSbCqxk7oll_i4tj32N1bhZQMezcyU,3957
dns/rdtypes/ANY/NSEC3PARAM.py,sha256=wcI3mmhdo_0s7jJit1fg6li0mL0pVPJPUAoLOwhaClk,2636
dns/rdtypes/ANY/OPENPGPKEY.py,sha256=rhcB9knQVTcSoS3yOamXzNUzxNuSbYQP0WkEUNF3k_g,1852
dns/rdtypes/ANY/OPT.py,sha256=cySeMHbL4X9IDF7zGICIuO5iybhZowANdhUXQXzwUw8,2563
dns/rdtypes/ANY/PTR.py,sha256=GrTYECslYH8wafdIyNhdOqwOMHm-h8cO954DRw3VMNg,998
dns/rdtypes/ANY/RP.py,sha256=fjMSl6hAfCyaJ8AihCVH5lsiatRLWtQpIRsPXXvGbtE,2185
dns/rdtypes/ANY/RRSIG.py,sha256=OWWXA1-LkJ5GKl7xPNrqMKZ0fXgxQWJ6YrmgFSLRfG0,4924
dns/rdtypes/ANY/RT.py,sha256=9CHkE9dKz2n_RZpuG4zOOWOi4fW3tahxllRXF8043WI,1014
dns/rdtypes/ANY/SMIMEA.py,sha256=6yjHuVDfIEodBU9wxbCGCDZ5cWYwyY6FCk-aq2VNU0s,222
dns/rdtypes/ANY/SOA.py,sha256=XI_3R3gaiB3l_SC9VB8YAFby7kNwTIAzogudJrcdSJo,3146
dns/rdtypes/ANY/SPF.py,sha256=wHsKdQWUL0UcohQlbjkNkIfZUADtC-FZyIJNw8NFrIY,1023
dns/rdtypes/ANY/SSHFP.py,sha256=aUBsyKOUi8SpErKUv1y6BjpSb1M6k5Kn9MscaaeZv-I,2531
dns/rdtypes/ANY/TKEY.py,sha256=TznxiiL2E0rPXcvrl3zm2pDsMIGtyG-CAa6cNLhDWp8,4932
dns/rdtypes/ANY/TLSA.py,sha256=EYP7AXBh4zMtBgnfz828qfoLkFLCm5g8vR-6oX_DTbE,219
dns/rdtypes/ANY/TSIG.py,sha256=zrA3aWrgmL1Wf-svd1gHx4Us1QNA0qI1YcsVAybulSk,4751
dns/rdtypes/ANY/TXT.py,sha256=7IAIjgZ0hX_MIh_b0ApzKvxLXHTS0rteR-KXt4HLaV0,1001
dns/rdtypes/ANY/URI.py,sha256=LHnHMfF30-A_p6ID2nz4e228iJWidsw3HDBNOpcsWZk,2922
dns/rdtypes/ANY/X25.py,sha256=KLZT5BRBMor8GRlhqNSnjd5zVym0yihMt0MOuP97h2I,1945
dns/rdtypes/ANY/ZONEMD.py,sha256=3NqYelouTOxWqn2aNm8Qx8i7BTze_9KpH2zHN8UhXCM,2394
dns/rdtypes/ANY/__init__.py,sha256=Pox71HfsEnGGB1PGU44pwrrmjxPLQlA-IbX6nQRoA2M,1497
dns/rdtypes/ANY/__pycache__/AFSDB.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/AMTRELAY.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/AVC.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CAA.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CDNSKEY.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CDS.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CERT.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CNAME.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/CSYNC.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/DLV.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/DNAME.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/DNSKEY.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/DS.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/EUI48.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/EUI64.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/GPOS.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/HINFO.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/HIP.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/ISDN.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/L32.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/L64.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/LOC.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/LP.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/MX.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NID.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NINFO.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NS.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NSEC.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NSEC3.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/NSEC3PARAM.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/OPENPGPKEY.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/OPT.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/PTR.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/RP.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/RRSIG.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/RT.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/SMIMEA.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/SOA.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/SPF.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/SSHFP.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/TKEY.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/TLSA.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/TSIG.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/TXT.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/URI.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/X25.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/ZONEMD.cpython-310.pyc,,
dns/rdtypes/ANY/__pycache__/__init__.cpython-310.pyc,,
dns/rdtypes/CH/A.py,sha256=NmcDK5HqYNyWGpUMfywbh1kZWsO-1hQqLOC_2ahMOiY,2217
dns/rdtypes/CH/__init__.py,sha256=GD9YeDKb9VBDo-J5rrChX1MWEGyQXuR9Htnbhg_iYLc,923
dns/rdtypes/CH/__pycache__/A.cpython-310.pyc,,
dns/rdtypes/CH/__pycache__/__init__.cpython-310.pyc,,
dns/rdtypes/IN/A.py,sha256=pq9G7ZZrCCEBWOFWvLmivtu8b_9ZAFIJU0oQUMHHudE,1815
dns/rdtypes/IN/AAAA.py,sha256=GvQ1So05gExl1pw0HxO4ypfrT-EfnVCwBVVqgSvq5zk,1821
dns/rdtypes/IN/APL.py,sha256=RH8mxqv3cq7SrcuPT4leT9T86SbfieOJrlqYibTlQDU,5100
dns/rdtypes/IN/DHCID.py,sha256=x-JxOiEbaOUREw2QXBLKvGTrljn7mrs8fO2jjlmc4AI,1857
dns/rdtypes/IN/HTTPS.py,sha256=7ISjRVJv5Q36s-niKl3ic2ZQwP5ko-9_Vun0kW-tF4Y,220
dns/rdtypes/IN/IPSECKEY.py,sha256=pUBVwO0T4tR7cAMe1AyQq0Qgnar35ncvqDbDGsDiXbo,3291
dns/rdtypes/IN/KX.py,sha256=govh0YTer2-mpfhxUwc-fQb1WnqMkEkq5jIRo20nh1E,1014
dns/rdtypes/IN/NAPTR.py,sha256=MxuMrWCAR3p7vHPFddtWa2uQxvXTPKjkawbT_JxyZ0A,3751
dns/rdtypes/IN/NSAP.py,sha256=bHxNkjZYbq5MrkWQC8ILoTrqpoNzRfCrWtOFgSjoO60,2166
dns/rdtypes/IN/NSAP_PTR.py,sha256=hTnieARrAxO-yuFeMppj4wlRX6gv6RC089hLaurs-UQ,1016
dns/rdtypes/IN/PX.py,sha256=818J4ETvfVmy1NhseLGthZaJb9pd3oZg0OQbvYx4Z0I,2757
dns/rdtypes/IN/SRV.py,sha256=hgh10Ahyh-P6MDuFMCIcnBXmvB5tJXiNCfpCWaFBOss,2770
dns/rdtypes/IN/SVCB.py,sha256=GZbDKmNB48zKCuX6uvuSJRAIkvDq-Sbn27DLqzPUcG4,218
dns/rdtypes/IN/WKS.py,sha256=bKVL2hz_6UZ79M1gWJHyoufxxmD95-X0NAc7pciznrY,3653
dns/rdtypes/IN/__init__.py,sha256=HbI8aw9HWroI6SgEvl8Sx6FdkDswCCXMbSRuJy5o8LQ,1083
dns/rdtypes/IN/__pycache__/A.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/AAAA.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/APL.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/DHCID.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/HTTPS.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/IPSECKEY.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/KX.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/NAPTR.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/NSAP.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/NSAP_PTR.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/PX.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/SRV.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/SVCB.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/WKS.cpython-310.pyc,,
dns/rdtypes/IN/__pycache__/__init__.cpython-310.pyc,,
dns/rdtypes/__init__.py,sha256=NYizfGglJfhqt_GMtSSXf7YQXIEHHCiJ_Y_qaLVeiOI,1073
dns/rdtypes/__pycache__/__init__.cpython-310.pyc,,
dns/rdtypes/__pycache__/dnskeybase.cpython-310.pyc,,
dns/rdtypes/__pycache__/dsbase.cpython-310.pyc,,
dns/rdtypes/__pycache__/euibase.cpython-310.pyc,,
dns/rdtypes/__pycache__/mxbase.cpython-310.pyc,,
dns/rdtypes/__pycache__/nsbase.cpython-310.pyc,,
dns/rdtypes/__pycache__/svcbbase.cpython-310.pyc,,
dns/rdtypes/__pycache__/tlsabase.cpython-310.pyc,,
dns/rdtypes/__pycache__/txtbase.cpython-310.pyc,,
dns/rdtypes/__pycache__/util.cpython-310.pyc,,
dns/rdtypes/dnskeybase.py,sha256=ZOVr_LgqZUSDUetFE-L9jCpHr_f6ZBeEN_2e1s1tZBc,2851
dns/rdtypes/dsbase.py,sha256=E2nrdJJ9Sr2rsVcv6S3kavHNKPYKkAfl-8rqScVIPA4,3397
dns/rdtypes/euibase.py,sha256=kz8ObjJ61Mujrip4iLzSy8DA3l3zlirWP34qb6cpHQs,2631
dns/rdtypes/mxbase.py,sha256=mmyT0VD4o5yvF40E3NundIHJb6ABMNunZeyzLfnKkqw,3199
dns/rdtypes/nsbase.py,sha256=ye1JV1n3tTyzcFpm3smSCBf3vYmRW6c6VrELW7wqgNU,2325
dns/rdtypes/svcbbase.py,sha256=mV-08yZvNe-5Rk-OLLoCCc6rXB455g0xWQECdcg04ok,16936
dns/rdtypes/tlsabase.py,sha256=Y6SfucVqncdjL4t3s0O6_FiyNk0DeQ_6tK4l0m2tQfA,2597
dns/rdtypes/txtbase.py,sha256=anTxtjeu9idvAE_yooUSyYDnOxUW7r0Ado85XNJ8c-M,3631
dns/rdtypes/util.py,sha256=1n-l8WFg0cxi_wyRU60w7GnbFmm0FbG4HPOBj90qYjs,8745
dns/renderer.py,sha256=TaHsEDSVLMo-S3jbcdUCn34Vjc-wdR7Uw4U4c_pL4kc,10674
dns/resolver.py,sha256=vgkQ_CXpgydo2Gy8Q51dMoj0VEVini8voso4EMgi2XE,63405
dns/reversename.py,sha256=8q22s2g2dRYQLRd4GqU5sIPqXXpubbZrYBOg9hRDfMM,3828
dns/rrset.py,sha256=wirHviaC4-2pcv2qhBrkekSGZ9D0MNx7T8ZnCNGya4A,9184
dns/serial.py,sha256=-t5rPW-TcJwzBMfIJo7Tl-uDtaYtpqOfCVYx9dMaDCY,3606
dns/set.py,sha256=R8LN8_aUrvOfav5Za7VcSrdi0D10jJsSrNewdrc8Lwg,9089
dns/tokenizer.py,sha256=AcP4uOWcriLAmx_LptloiL9Mcb39FeHjXWLx0XfogJk,23584
dns/transaction.py,sha256=-aeZdpivp_5ZRH_oEPiag1dr9x-LRTLumzzaO8zfdfM,21629
dns/tsig.py,sha256=JZqBzmCCCxqTcFBQnFszTv2u3XFWwZrZSJhgX73E_HE,11457
dns/tsigkeyring.py,sha256=4G3NOtLDBUuyhGLeCY6AHwn4F19YBjn6Dx0KLfwJa4A,2638
dns/ttl.py,sha256=fWFkw8qfk6saTp7lAPxZOuD3U3TRxVRvIpljQnG-01I,2979
dns/update.py,sha256=UHoYWFNdjbrv9Dk8VPrxkFp7keh6alm957c0pNDb898,12252
dns/version.py,sha256=02ZCJolpTK-0bopKQEuWh4kjViB6at1iWYsOR3OTWDM,1926
dns/versioned.py,sha256=RmQgmsJr65KPKjNrFdRHlbmZZeznncACgzrPMjxTbho,11776
dns/win32util.py,sha256=NqPgxAShH37yqj5Yj7n743jtq-8dQ_dwBFvbFWadvgY,9057
dns/wire.py,sha256=TprNbU0iab4FhWWlqJKtrpgbKyTHcmjfF-_9er6wunQ,2831
dns/xfr.py,sha256=SUjumTpVSlDg2lz39J5xp3kj3oPueAGCzeA_ftzNLBs,13273
dns/zone.py,sha256=cgUXC1QmYYwjFSp00q9n4fPOJP58Ewz2zGKeDP9VyzE,51058
dns/zonefile.py,sha256=g5arZ1aza8yKHAaK8tqpF17iv58GnmcPk_39tOC0qX4,27484
dns/zonetypes.py,sha256=HrQNZxZ_gWLWI9dskix71msi9wkYK5pgrBBbPb1T74Y,690
dnspython-2.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
dnspython-2.3.0.dist-info/LICENSE,sha256=w-o_9WVLMpwZ07xfdIGvYjw93tSmFFWFSZ-EOtPXQc0,1526
dnspython-2.3.0.dist-info/METADATA,sha256=JWGx9abERf5Vf2m770UYN78sO7GqkaJQyK3ybo_00Ks,5180
dnspython-2.3.0.dist-info/RECORD,,
dnspython-2.3.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
dnspython-2.3.0.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88

View File

@ -0,0 +1,4 @@
Wheel-Version: 1.0
Generator: poetry-core 1.4.0
Root-Is-Purelib: true
Tag: py3-none-any

View File

@ -0,0 +1,20 @@
import dns.resolver
import dns.query
def lambda_handler(event, context):
res = dns.resolver.Resolver()
res.timeout = 5
res.lifetime = 30
res.nameservers = ['10.135.6.95','10.135.6.96']
results = res.query('_ldap._tcp.hkg.yourdomain.com', 'SRV', tcp=True)
print('_ldap._tcp.hkg.yourdomain.com:')
for line in results:
print(line.to_text())
results = res.query('_kerberos._tcp.hkg.yourdomain.com', 'SRV', tcp=True)
print('_kerberos._tcp.hkg.yourdomain.com:')
for line in results:
print(line.to_text())
return {}

View File

@ -0,0 +1,16 @@
import boto3
import os
import json
# reference: https://aws.amazon.com/premiumsupport/knowledge-center/start-stop-lambda-eventbridge/
ec2 = boto3.client('ec2', region_name=os.environ['region_name'])
def lambda_handler(event, context):
if (event['action'] == 'start'):
resp = ec2.start_instances(InstanceIds=json.loads(os.environ['instances']))
elif (event['action'] == 'stop'):
resp = ec2.stop_instances(InstanceIds=json.loads(os.environ['instances']))
else:
resp = "Event action not provided"
return resp

View File

@ -0,0 +1,12 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,22 @@
{ "Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"ec2:Start*",
"ec2:Stop*",
"kms:CreateGrant"
],
"Resource": "*"
}
]
}

2
nodejs/dnslookup.js Normal file
View File

@ -0,0 +1,2 @@
const dns = require('dns');
dns.lookup('blog.headdesk.me', (err,address,family) => console.log('results: %s', address));

View File

@ -0,0 +1,43 @@
#!/bin/bash
. /root/openrc
echo "Openstack usage generated on $(date)"
echo -en "\nNumber of active VMs: "
mysql nova -Nse "select count(1) from instances where vm_state = 'active'"
echo -en "\n***Usage summary***\n"
openstack usage list -f csv --quote none | column -t -s,
echo -en "\n***Ceph node disk usage***\n"
# ceph df | grep -v "0 B"
ceph osd df tree | grep host | awk '{print $NF,$(NF-4)"%"}' | tee /tmp/ceph-node-df.txt
echo -en "Overall usage "
cat /tmp/ceph-node-df.txt | tr -d \\% | datamash -t ' ' mean 2 | awk '{print $1"%"}'
echo -en "\n***Allocation per host (flavor/total)***\n"
echo -en "Host CPU% Memory%\n" | column -t
openstack host list -fvalue -c 'Host Name' | grep compute | sort | while read h; do openstack host show $h -fcsv | egrep '(total|now)' | tr -d \" | paste - - | awk -F, '{print $1, $7/$3*100"%", $8/$4*100"%"}'; done | column -t | tee /tmp/cluster-cpu-ram.txt
echo -en "Overall allocation "
cat /tmp/cluster-cpu-ram.txt | tr -d \\% | awk '{print $1,$2,$3}' | datamash -t ' ' mean 2 mean 3 | awk '{printf("%.1f %.1f\n", $1, $2)}'
echo -en "\n***Overcommit ratio***\n"
cat /etc/nova/nova.conf | grep allocation_ratio | grep -v ^#
# store key data into sqlite
# Table schema: create table usage(date date, active_vm number, ceph_usage float, alloc_cpu float, alloc_mem float);
#
TODAY=$(date +%Y-%m-%d)
ACTIVEVM=$(mysql nova -Nse "select count(1) from instances where vm_state = 'active'")
CEPHUSAGE=$(cat /tmp/ceph-node-df.txt | tr -d \\% | datamash -t ' ' mean 2)
ALLOCCPU=$(cat /tmp/cluster-cpu-ram.txt | tr -d \\% | awk '{print $1,$2,$3}' | datamash -t ' ' mean 2 mean 3 | awk '{print $1}')
ALLOCRAM=$(cat /tmp/cluster-cpu-ram.txt | tr -d \\% | awk '{print $1,$2,$3}' | datamash -t ' ' mean 2 mean 3 | awk '{print $2}')
echo "insert into usage values ('$TODAY', '$ACTIVEVM', '$CEPHUSAGE', '$ALLOCCPU', '$ALLOCRAM')" \
| sqlite3 /root/openstack-weekly.db
echo -en "\n***Historical records***\n"
echo "select * from usage;" | sqlite3 -header -column /root/openstack-weekly.db
echo -en "\n\nGenerated by /root/simple-report.sh from $(hostname)"

View File

@ -0,0 +1,15 @@
connect target /
configure device type disk parallelism 3;
CONFIGURE CHANNEL DEVICE TYPE DISK MAXPIECESIZE 5G;
run {
crosscheck archivelog all;
backup archivelog all not backed up 1 times format '/orabackup/arch/%d_%s_%p_%e_%t';
}
run {
delete noprompt archivelog until time = 'sysdate-2' backed up 1 times to device type disk;
}
exit;

View File

@ -0,0 +1,10 @@
#!/bin/bash
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=/u01/app/oracle/product/12.2.0.1/db_home
export PATH=$ORACLE_HOME/bin:$PATH
export LOG_DIR=/orabackup/logs
export DATE=$(date +%d)
for i in bvcprsu1 bvalfpr1 bvcprsw1; do
export ORACLE_SID=$i
rman cmdfile=/orabackup/scripts/rman-archivelog-backup.rcv log=$LOG_DIR/${DATE}_${ORACLE_SID}.log
done

25
php/mailapi.php Normal file
View File

@ -0,0 +1,25 @@
<?php
if (empty($_POST)) {
echo 'Usage: curl -d "subject=email subject&sender=SENDER@DOMAIN.TLD&recipient=RECIPIENT@DOMAIN.TLD&message=$(base64 /tmp/mailbody.txt)" -X POST https://racker.pro/mailapi/';
exit();
} else {
$to = $_POST['recipient'];
$subject = $_POST['subject'];
$headers = "From: " . $_POST['sender'] . "\r\n";
$headers .= "MIME-Version: 1.0\r\n";
$headers .= "Content-Type: text/html; charset=UTF-8\r\n";
$message = str_replace(" ", "&nbsp;", base64_decode($_POST['message']));
$message2 = "<span style=\"font-family: monospace; font-size: small;\">" . str_replace("\n", "<br />\n", $message) . "</span>";
if (mail($to, $subject, $message2, $headers)) {
echo 'Your message has been sent.';
} else {
echo 'There was a problem sending the email.';
}
}
?>

11
php/phpmail.php Normal file
View File

@ -0,0 +1,11 @@
<?php
// The message
$message = "Line 1\nLine 2\nLine 3";
// In case any of our lines are larger than 70 characters, we should use wordwrap()
$message = wordwrap($message, 70);
// Send
mail('test@headdesk.me', 'Test from phpmail.php', $message);
?>

12
php/snsalert.json Normal file
View File

@ -0,0 +1,12 @@
{
"Type" : "SnsMessage",
"MessageId" : "165545c9-2a5c-472c-8df2-7ff2be2b3b1b",
"Token" : "2336412f37f...",
"TopicArn" : "arn:aws:sns:us-west-2:123456789012:MyTopic",
"Message" : "Test message",
"SubscribeURL" : "https://sns.us-west-2.amazonaws.com/?Action=ConfirmSubscription&TopicArn=arn:aws:sns:us-west-2:123456789012:MyTopic&Token=2336412f37...",
"Timestamp" : "2012-04-26T20:45:04.751Z",
"SignatureVersion" : "1",
"Signature" : "EXAMPLEpH+...",
"SigningCertURL" : "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"
}

26
php/snsalert.php Normal file
View File

@ -0,0 +1,26 @@
<?php
// Fetch the raw POST body containing the message
$postBody = file_get_contents('php://input');
// JSON decode the body to an array of message data
$message = json_decode($postBody, true);
if ($message) {
// Do something with the data
// echo $message['Message'];
if ($message['Type'] == "SubscriptionConfirmation") {
echo "Now needs to reply to " . $message['SubscribeURL'];
} else {
$to = "sns@racker.pro";
$subject = "SNS alert from AWS";
$headers = "From: sns@racker.pro\r\n";
$headers .= "MIME-Version: 1.0\r\n";
$headers .= "Content-Type: text/html; charset=UTF-8\r\n";
if (mail($to, $subject, $message['Message'], $headers)) {
echo 'Your message has been sent.';
} else {
echo 'There was a problem sending the email.';
}
}
}

12
php/subrequest.json Normal file
View File

@ -0,0 +1,12 @@
{
"Type" : "SubscriptionConfirmation",
"MessageId" : "165545c9-2a5c-472c-8df2-7ff2be2b3b1b",
"Token" : "2336412f37f...",
"TopicArn" : "arn:aws:sns:us-west-2:123456789012:MyTopic",
"Message" : "You have chosen to subscribe to the topic arn:aws:sns:us-west-2:123456789012:MyTopic.\nTo confirm the subscription, visit the SubscribeURL included in this message.",
"SubscribeURL" : "https://sns.us-west-2.amazonaws.com/?Action=ConfirmSubscription&TopicArn=arn:aws:sns:us-west-2:123456789012:MyTopic&Token=2336412f37...",
"Timestamp" : "2012-04-26T20:45:04.751Z",
"SignatureVersion" : "1",
"Signature" : "EXAMPLEpH+...",
"SigningCertURL" : "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"
}

View File

@ -0,0 +1,26 @@
import json
import boto3
import base64
def lambda_handler(event, context):
# layer1
l1client = boto3.client('sts')
assumed_role_object=l1client.assume_role(
RoleArn="arn:aws:iam::111122223333:role/Role1",
RoleSessionName="lambda-assumeRoleL1"
)
# layer2
l2client = boto3.client(
'sts',
aws_access_key_id=assumed_role_object['Credentials']['AccessKeyId'],
aws_secret_access_key=assumed_role_object['Credentials']['SecretAccessKey'],
aws_session_token="lambda-assumeRoleMs")
l2_assumed_role_object=l2client.assume_role(
RoleArn="arn:aws:iam::111122223333:role/Role2",
RoleSessionName="lambda-assumeRoleL2"
)
print("export AWS_ACCESS_KEY_ID=" + l2_assumed_role_object['Credentials']['AccessKeyId'])
print("export AWS_SECRET_ACCESS_KEY=" + l2_assumed_role_object['Credentials']['SecretAccessKey'])
print("export AWS_SESSION_TOKEN=" + l2_assumed_role_object['Credentials']['SessionToken'])
print("export AWS_DEFAULT_REGION=ap-east-1")

16
py/aws-assume-role.py Normal file
View File

@ -0,0 +1,16 @@
from typing import NoReturn
import json
import boto3
import base64
def lambda_handler(event, context) -> NoReturn:
# TODO implement
sts_client = boto3.client('sts')
assumed_role_object=sts_client.assume_role(
RoleArn="arn:aws:iam::111122223333:role/SomeRole",
RoleSessionName="lambda-assumeRoleMs"
)
print("export AWS_ACCESS_KEY_ID=" + assumed_role_object['Credentials']['AccessKeyId'])
print("export AWS_SECRET_ACCESS_KEY=" + assumed_role_object['Credentials']['SecretAccessKey'])
print("export AWS_SESSION_TOKEN=" + assumed_role_object['Credentials']['SessionToken'])
print("export AWS_DEFAULT_REGION=ap-east-1")

5
py/dates-test.py Executable file
View File

@ -0,0 +1,5 @@
#!/usr/bin/python3
from datetime import datetime
# print (str(datetime.now().year) + "-" + str(datetime.now().month-1))
print (datetime.now().replace(month=datetime.now().month-1).strftime('%Y-%m'))

13
py/dict.py Executable file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env python3
# Python data types
# list = [value1, value2, value3,...valueN]
# set = {value1, value2, value3,...valueN}
# dict = { key1:value1, key2:value2,...keyN:valueN }
# Sample use of list of dict
datagroup = [{'name': '203.60.15.113/32', 'data': ''}, {'name': '222.186.30.174/32', 'data': ''},{'name': '120.136.32.106/32', 'data': ''}]
newrecord = {'name': '1.2.3.4/32', 'data': ''}
datagroup.append(newrecord)
print(datagroup)

15
py/dns-lookup-dnspython.py Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/python3
import dns.resolver
import dns.query
res = dns.resolver.Resolver()
res.timeout = 10
res.lifetime = 25
res.nameservers = ['192.168.86.50']
#results = res.query('blog.headdesk.me', 'A', tcp=True)
results = res.resolve('blog.headdesk.me', 'A', tcp=True)
print('Result:')
for line in results:
print(line.to_text())

3
py/dns-lookup.py Normal file
View File

@ -0,0 +1,3 @@
import socket
print(socket.gethostbyname('headdesk.me'))

13
py/dns-lookup2.py Normal file
View File

@ -0,0 +1,13 @@
import dns.resolver
results = dns.resolver.query('wordpress.com', 'A')
stringResults = []
for ip in results:
print(ip.to_text())
stringResults.append(ip.to_text())
dummyList = ['192.0.78.9', '192.0.78.17', '2.3.4.5']
def diff(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
print('Diff results: ', diff(stringResults,dummyList))

1
py/f5-sdk/README.md Normal file
View File

@ -0,0 +1 @@
Sample code using [F5 python sdk](https://f5-sdk.readthedocs.io/en/latest/index.html)

View File

@ -0,0 +1,18 @@
#!/usr/bin/env python
from f5.bigip import ManagementRoot
# Connect to the BigIP
session = ManagementRoot("10.11.232.247", "username", "password")
datagroup = session.tm.ltm.data_group.internals.internal.load(name='domain1-WHITELIST', partition='Common')
# Print current record
print(datagroup.records)
# Update record
# datagroup.records = [{'name': '203.60.15.113/32', 'data': ''}, {'name': '222.186.30.174/32', 'data': ''},{'name': '120.136.32.106/32', 'data': ''}]
datagroup.records = [{'name': '203.60.15.113/32', 'data': ''}, {'name': '222.186.30.174/32', 'data': ''}]
datagroup.update()
# Print latest record
print(datagroup.records)

View File

@ -0,0 +1,37 @@
#!/usr/bin/env python
from f5.bigip import ManagementRoot
# Connect to the BigIP
mgmt = ManagementRoot("10.11.232.247", "username", "password")
# Get a list of all pools on the BigIP and print their names and their
# members' names
pools = mgmt.tm.ltm.pools.get_collection()
for pool in pools:
print(pool.name)
for member in pool.members_s.get_collection():
print(member.name)
def showDatagroups() :
print("\n\n**** Showing Datagroups")
dgs = mgmt.tm.ltm.data_group.internals.get_collection()
for idx, dg in enumerate(dgs):
if "WHITELIST" in dg.name:
# print("\n{}: {}".format(idx, dg.raw))
print("\n{}: {}".format(idx, dg.name))
if hasattr(dg, 'records'):
print("\n{}: {}".format(idx, dg.records))
#for record in dg.records:
# print("\nrec: {}".format(record))
else:
print("\nObject {} has no records".format(dg.name))
def showDatagroupFiles():
print("\n\n**** Showing DatagroupFiles")
dgFiles = mgmt.tm.sys.file.data_groups.get_collection()
for idx, f in enumerate(dgFiles):
print('\n{}: {}'.format(idx, f.raw))
if __name__ == "__main__":
showDatagroups()

58
py/fsro.py Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env python
import sys, os, subprocess, socket, logging, logging.handlers, time
# script variables
checkPath = "/proc"
# some filesystems are only writable by root
if os.geteuid() != 0:
subprocess.call(['sudo', 'python', sys.argv[0]]);
sys.exit(0);
# log to syslog
log = logging.getLogger(__file__)
log.setLevel(logging.DEBUG)
handler = logging.handlers.SysLogHandler(address = '/dev/log');
formatter = logging.Formatter('%(name)s %(levelname)s: %(message)s');
handler.setFormatter(formatter);
log.addHandler(handler);
# print message and exit
def printExit(msg):
print (msg);
log.info(msg);
exit(0);
# program begins
if os.path.isfile('/dev/shm/fsro-py.lock'):
if time.time() - os.path.getmtime('/dev/shm/fsro-py.lock') > 3600:
os.remove('/dev/shm/fsro-py.lock');
printExit('OK Please health check ' + checkPath + ' and delete /dev/shm/fsro-py.lock')
else:
open('/dev/shm/fsro-py.lock', 'a').close();
mountExists = subprocess.call(["grep", "-q", checkPath, "/proc/mounts"]);
if mountExists != 0:
printExit('MAJOR Mount ' + checkPath + ' missing');
testFile = checkPath + "/." + socket.gethostname() + "-test.dat";
try:
with open(testFile, 'wb') as fw:
fw.write(os.urandom(10*1024*1024));
fw.close();
except Exception as e:
printExit('MAJOR Mount ' + checkPath + ' not writable: ' + repr(e));
try:
with open(testFile, 'r') as fr:
fr.read();
fr.close();
except Exception as e:
printExit('MAJOR Mount ' + checkPath + ' not readable: ' + repr(e));
os.remove(testFile);
os.remove('/dev/shm/fsro-py.lock');
printExit('OK Mount ' + checkPath + ' healthy');

6
py/html2text.py Normal file
View File

@ -0,0 +1,6 @@
from urllib import request
import html2text
url = 'https://duckduckgo.com'
text = request.urlopen(url).read().decode('utf8')
print(html2text.html2text(text))

33
py/ldap-monitor.py Normal file
View File

@ -0,0 +1,33 @@
import json
import socket
import boto3
def lambda_handler(event, context):
hosts=['10.129.72.63', '10.135.72.66', '10.129.72.64', '10.135.72.67']
port=636
timeout_seconds=1
test_results = 0
for host in hosts:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout_seconds)
result = sock.connect_ex((host,int(port)))
if result == 0:
print("Host {}:{} - Up".format(host, port))
test_results += 1
else:
print("Host {}:{} - Down".format(host, port))
sock.close()
if test_results == 4:
return {
'message' : 'Successfully connected to all LDAP servers'
}
else:
raise Exception('Not all LDAP servers can be connected!')
return {
'statusCode': 200,
'body': json.dumps("Finished")
}

13
py/ldaps.py Normal file
View File

@ -0,0 +1,13 @@
import ldap
LDAP_SERVER = 'ldap://192.168.86.87'
BASE_DN = 'dc=acme,dc=local' # base dn to search in
LDAP_LOGIN = 'Administrator'
LDAP_PASSWORD = 'qwerty-asdf-1234'
OBJECT_TO_SEARCH = 'userPrincipalName=Administrator@acme.local'
ATTRIBUTES_TO_SEARCH = ['memberOf']
connect = ldap.initialize(LDAP_SERVER)
connect.set_option(ldap.OPT_REFERRALS, 0) # to search the object and all its descendants
connect.simple_bind_s(LDAP_LOGIN, LDAP_PASSWORD)
result = connect.search_s(BASE_DN, ldap.SCOPE_SUBTREE, OBJECT_TO_SEARCH, ATTRIBUTES_TO_SEARCH)

11
py/log-test.py Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/python3
import logging
logging.basicConfig(format='%(levelname)s: %(message)s',level=logging.DEBUG)
logging.debug('Debug message')
logging.info('Info message')
logging.warning('Warning message')
logging.error('Error message')
logging.critical('Critical message')

Some files were not shown because too many files have changed in this diff Show More