Skip to content

feat: add default kms encrypted gp3 StorageClass and PersistentVolume… #15

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: integration
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 67 additions & 1 deletion config/eks/dev_eks_config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,73 @@ export function deploy_dependencies(config: Easy_EKS_Config_Data, stack: cdk.Sta
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

export function deploy_workload_dependencies(config: Easy_EKS_Config_Data, stack: cdk.Stack, cluster: eks.Cluster){

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
let name="test-claim-gp3";
let size="10Gi";
const volume_claim_gp3 = {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"name": `${name}`,
"namespace": "default"
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"storageClassName": "kms-encrypted-gp3",
"resources": {
"requests": {
"storage": `${size}`
}
}
}
}
const pod_using_volume_claim = {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "app"
},
"spec": {
"containers": [
{
"name": "app",
"image": "ubuntu:latest",
"command": [
"/bin/sh"
],
"args": [
"-c",
"while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"
],
"volumeMounts": [
{
"name": "persistent-storage",
"mountPath": "/data"
}
]
}
],
"volumes": [
{
"name": "persistent-storage",
"persistentVolumeClaim": {
"claimName": `${name}`
}
}
]
}
}
new eks.KubernetesManifest(stack, "persistentVolumeClaimManifest",
{
cluster: cluster,
manifest: [volume_claim_gp3, pod_using_volume_claim],
overwrite: true,
prune: true,
});
}//end deploy_workload_dependencies()

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
Expand Down
1 change: 1 addition & 0 deletions config/eks/higher_envs_eks_config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ export function apply_config(config: Easy_EKS_Config_Data, stack: cdk.Stack){ //
config.setKmsKeyAlias("eks/higher-envs"); //kms key with this alias will be created or reused if pre-existing
config.setVpcByName("higher-envs-vpc", config, stack); //Name as in VPC's Name Tag
//config.setVpcById("vpc-0dbcacb511f9bac4e", config, stack); //Alternative pre-existing VPC deployment option
config.setKmsKey(stack);
config.setBaselineMNGSize(2);
config.setBaselineMNGType(eks.CapacityType.ON_DEMAND);
if(process.env.CDK_DEFAULT_ACCOUNT==="111122223333"){
Expand Down
1 change: 1 addition & 0 deletions config/eks/lower_envs_eks_config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ export function apply_config(config: Easy_EKS_Config_Data, stack: cdk.Stack){ //
config.setKmsKeyAlias("eks/lower-envs"); //kms key with this alias will be created or reused if pre-existing
config.setVpcByName("lower-envs-vpc", config, stack); //Name as in VPC's Name Tag
//config.setVpcById("vpc-0dbcacb511f9bac4e", config, stack); //Alternative pre-existing VPC deployment option
config.setKmsKey(stack);
config.setBaselineMNGSize(2);
config.setBaselineMNGType(eks.CapacityType.SPOT);
if(process.env.CDK_DEFAULT_ACCOUNT==="111122223333"){
Expand Down
41 changes: 40 additions & 1 deletion config/eks/my_orgs_baseline_eks_config.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import request from 'sync-request-curl'; //npm install sync-request-curl (cdk requires sync functions, async not allowed)
import cluster from 'cluster';

import { Storage_YAML_Generator, Apply_Storage_Class_YAMLs } from '../../lib/Storage_Class_Manifest';

//Intended Use:
//A baseline config file (to be applied to all EasyEKS Clusters in your organization)
//EasyEKS Admins would be expected to edit this file with defaults specific to their org. (that rarely change and are low risk to add)
Expand Down Expand Up @@ -268,6 +271,40 @@ export function deploy_workload_dependencies(config: Easy_EKS_Config_Data, stack
},
}`, //end aws-ebs-csi-driver configurationValues override
});
// adding gp3 storage class
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I merged integration into your feature branch and then tested locally.
It doesn't deploy for me.

When I do a fresh install I get

4:26:52 PM | CREATE_FAILED        | Custom::AWSCDK-EKS-KubernetesResource | defaultEBSClassStorage41025CF0
Received response status [FAILED] from custom resource. Message returned: TooManyRequestsException: Rate Exceeded.

I suspect this issue is the result of order of operations.
I think the logic is trying to introduce your change too soon, and it needs to deploy your change at a later time.

A solution I think might work is to implement something like this

nodeLocalDNSCache.node.addDependency(cluster.awsAuth);

But more like this:

StorageClassManifests.node.addDependency(cluster.awsAuth);
//actually the following is probably even better
StorageClassManifests.node.addDependency(ebs_csi_addon);

^-- this should make it, so your logic doesn't start executing until after the cluster.awsAuth/csi addon is fully established/deployed

const storage_class_gp3 = {
"apiVersion": "storage.k8s.io/v1",
"kind": "StorageClass",
"metadata": {
"name": "kms-encrypted-gp3",
"annotations": {
"storageclass.kubernetes.io/is-default-class": "true"
}
},
"provisioner": "ebs.csi.aws.com",
"volumeBindingMode": "WaitForFirstConsumer",
"allowVolumeExpansion": true,
"reclaimPolicy": "Delete",
"parameters": {
"type": "gp3",
"encrypted": "true",
//"kmsKeyId": `${config.kmsKey.keyArn}` //commentig it out as while we test the logic to add permissions to customer's KMS key
}
}
new eks.KubernetesManifest(stack, "StorageClassManifest",
{
cluster: cluster,
manifest: [storage_class_gp3],
overwrite: true,
prune: true,
}
);

//test persistent volume claim: This code is to be include in the constructions of your workload
// manifests, including here temporally as example and for testing purposes.
// const volume_claim = storage_class_YAMLs.generate_volume_claim_manifests("test-claim", "10Gi");
// Apply_Storage_Class_YAMLs(stack, cluster, config, "testClaim",volume_claim);

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

// v-- most won't need this, disabling by default
Expand All @@ -294,4 +331,6 @@ export function deploy_workload_dependencies(config: Easy_EKS_Config_Data, stack

export function deploy_workloads(config: Easy_EKS_Config_Data, stack: cdk.Stack, cluster: eks.Cluster){

}//end deploy_workloads()
}//end deploy_workloads()


69 changes: 62 additions & 7 deletions lib/Easy_EKS.ts
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import * as prod_eks_config from '../config/eks/prod_eks_config';
import * as observability from './Frugal_GPL_Observability_Stack';
import { execSync } from 'child_process'; //work around for kms UX issue
import request from 'sync-request-curl'; //npm install sync-request-curl (cdk requires sync functions, async not allowed)
import { get } from 'http';



Expand Down Expand Up @@ -239,9 +240,9 @@ export class Easy_EKS{ //purposefully don't extend stack, to implement builder p
// this.config.kmsKeyAlias, {description: "Easy EKS generated kms key, used to encrypt etcd and ebs-csi-driver provisioned volumes"}
// ));}
// else { eksBlueprint.resourceProvider(blueprints.GlobalResources.KmsKey, new blueprints.LookupKmsKeyProvider(this.config.kmsKeyAlias)); }
ensure_existance_of_aliased_kms_key(this.config.kmsKeyAlias);
const kms_key = kms.Key.fromLookup(this.stack, 'pre-existing-kms-key', { aliasName: this.config.kmsKeyAlias });

ensure_existance_of_aliased_kms_key(this.config.kmsKeyAlias, this.stack.stackName, this.stack.region);
const kms_key = this.config.kmsKey;
this.cluster = new eks.Cluster(this.stack, this.config.id, {
clusterName: this.config.id,
version: this.config.kubernetesVersion,
Expand Down Expand Up @@ -632,7 +633,7 @@ const enhanced_viewer_cr = {
}
///////////////////////////////////////////////////////////////////////////////////////////////////

function ensure_existance_of_aliased_kms_key(kmsKeyAlias: string){
function ensure_existance_of_aliased_kms_key(kmsKeyAlias: string, stackName: string, region: string){
/*UX Improvement: By default EKS Blueprint will make new KMS key everytime you make a cluster.
This logic checks for pre-existing keys, and prefers to reuse them. Else create if needed, reuse next time.
The intent is to achieve the following EasyEKS default: (which is overrideable):
Expand All @@ -641,15 +642,69 @@ function ensure_existance_of_aliased_kms_key(kmsKeyAlias: string){
* prod envs share a kms key: "alias/eks/prod"
*/
let kms_key:kms.Key;
const cmd = `aws kms list-aliases | jq '.Aliases[] | select(.AliasName == "${kmsKeyAlias}") | .TargetKeyId'`
const cmd = `aws kms list-aliases --region ${region} | jq '.Aliases[] | select(.AliasName == "${kmsKeyAlias}") | .TargetKeyId'`
const cmd_results = execSync(cmd).toString();
let key_id = "";
if(cmd_results===""){ //if alias not found, then make a kms key with the alias
const create_key_cmd = `aws kms create-key --description="Easy EKS generated kms key, used to encrypt etcd and ebs-csi-driver provisioned volumes"`
const results = JSON.parse( execSync(create_key_cmd).toString() );
const key_id = results.KeyMetadata.KeyId;
const add_alias_cmd = `aws kms create-alias --alias-name ${kmsKeyAlias} --target-key-id ${key_id}`;
key_id = results.KeyMetadata.KeyId;
const add_alias_cmd = `aws kms create-alias --alias-name ${kmsKeyAlias} --target-key-id ${key_id} --region ${region}`;
execSync(add_alias_cmd);
//get the ebs csi role, so it can be used to add permissions to the new key
}
// disabled for now, as we need to test that it assigns the permissions correctly before enable customer eks
// for encription.
//else { //if alias found, then get the key id
// key_id = cmd_results.replace(/"/g, ''); //remove quotes from string
//}
//give_kms_access_to_ebs_csi_role(stackName, region, key_id);

}


/*function give_kms_access_to_ebs_csi_role(stackName: string, region: string, KeyId: string){
const roleName = stackName + '-awsebscsidriveriamrole';
const cdm_list_ebs_csi_role = `aws iam list-roles --query "Roles[?contains(RoleName, '${roleName}')].Arn" --output text`;
const list_roles = execSync(cdm_list_ebs_csi_role);
if (list_roles.toString() !== '') {
const policy = `{
"Version": "2012-10-17",
"Id": "key-default-1",
"Statement": [
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::381492072749:root"
},
"Action": "kms:*",
"Resource": "*"
},
{
"Sid": "Enable IAM User Permissions",
"Effect": "Allow",
"Principal": {
"AWS": "${list_roles.toString().trim()}"
},
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey",
"kms:CreateGrant",
"kms:ListGrants",
"kms:RevokeGrant"
],
"Resource": "*"
}
]
}`;
const cmp_policy = `aws kms put-key-policy --policy-name default --key-id ${KeyId.trim()} --region ${region} --policy '${policy}'`;
execSync(cmp_policy);
} else {
console.log(`EBS CSI Role with name: ${roleName} already exists.`);
}
}*/
///////////////////////////////////////////////////////////////////////////////////////////////////
5 changes: 5 additions & 0 deletions lib/Easy_EKS_Config_Data.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import * as cdk from 'aws-cdk-lib';
import * as eks from 'aws-cdk-lib/aws-eks';
import * as ec2 from 'aws-cdk-lib/aws-ec2';
import * as iam from 'aws-cdk-lib/aws-iam';
import * as kms from 'aws-cdk-lib/aws-kms';
import { execSync } from 'child_process';
import { validateTag } from './Utilities';

Expand All @@ -23,6 +24,7 @@ export class Easy_EKS_Config_Data { //This object just holds config data.
clusterAdminAccessEksApiArns?: string[];
clusterViewerAccessAwsAuthConfigmapAccounts?: string[]; //only aws-auth configmap supports accounts
kmsKeyAlias: string; //kms key with this alias will be created or reused if pre-existing
kmsKey: kms.IKey; //optional, only used if you want to use a pre-existing KMS key`
baselineNodesNumber: number;
baselineNodesType: eks.CapacityType; //enum eks.CapacityType.SPOT or eks.CapacityType.ON_DEMAND
workerNodeRole: iam.Role; //used by baselineMNG & Karpenter
Expand Down Expand Up @@ -119,5 +121,8 @@ export class Easy_EKS_Config_Data { //This object just holds config data.
if(kms_key_alias.startsWith('alias/')){ this.kmsKeyAlias = kms_key_alias; }
else{ this.kmsKeyAlias = `alias/${kms_key_alias}`; }
}
setKmsKey(stack: cdk.Stack){
this.kmsKey = kms.Key.fromLookup(stack, 'pre-existing-kms-key', { aliasName: this.kmsKeyAlias });
}

}//end of Easy_EKS_Config_Data
88 changes: 88 additions & 0 deletions lib/Storage_Class_Manifest.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import { Easy_EKS_Config_Data } from './Easy_EKS_Config_Data';
import * as cdk from 'aws-cdk-lib';
import * as eks from 'aws-cdk-lib/aws-eks';
import * as kms from 'aws-cdk-lib/aws-kms';
import console = require('console');
import { sign } from 'crypto';
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

export class Storage_YAML_Generator{

config: Easy_EKS_Config_Data;
cluster: eks.Cluster;
constructor(input_parmeters: Partial<Storage_YAML_Generator>){ Object.assign(this, input_parmeters); }

generate_storage_class_manifests(){
let array_of_yaml_manifests_to_return: { [key:string]: any }[] = [];
let config = this.config;
const kms_key = config.kmsKey;
const storage_class_gp3 = {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

request:
can storage_class_gp3's YAML component be moved into config.

(reasoning behind the request)

  • I saw you split storage_class_gp3 into a lib
    (my guess is you did that based on guidance I shared about a good rule of thumb is to abstract away complexity into lib)
    I owe you an apology as I think you used this location based on my previous guidance, which was incomplete, so now I'm asking you to move something after you put it where I requested you put it. Which is why I'm asking for forgiveness at giving incomplete guidelines.
  • Karpenter.sh (what you used as a reference) was split into lib for 2 reasons.
    1. It's sufficiently complex (250 lines)
    2. I kept the config a user may wish to review in config, and split config a user wouldn't care about into lib.
  • 2nd piece of guidance that I forgot to share originally. (this ranks higher than request to abstract away complexity) try to keep config that a user may want to edit or review within config.
    • I doubt an end user would want to edit the config of the storage class, but I do believe they'd want to review it.
    • Because it's not overly complex and is useful for end users to review I think storageclass.yaml belongs in ./config/eks/my_orgs_baseline_eks_config.ts

when it comes to the persistent volume claim, I think it makes more sense to put it in ./config/eks/dev_eks_config.ts's deploy_workload_dependencies(), because it's functioning like a demo manifest, and since it's a demo it wouldn't belong in my_orgs_baseline (as that's intended to get applied to all environments dev, test, stage, prod) and demo apps wouldn't be appropriate to deploy to prod, but storage class would. (which is why storage_class.yaml makes sense here in ./config/eks/my_orgs_baseline_eks_config.ts, but pvc.yaml should be moved to ./config/eks/dev_eks_config.ts)


further point of clarification:
It'd be fine to put the pvc.yaml inline in ./config/eks/dev_eks_config.ts
or put the PVC's logic in lib/demo_manifests.ts and call the lib function from ./config/eks/dev_eks_config.ts

"apiVersion": "storage.k8s.io/v1",
"kind": "StorageClass",
"metadata": {
"name": "kms-encrypted-gp3",
"annotations": {
"storageclass.kubernetes.io/is-default-class": "true"
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

storageclass should be indented 4 spaces

}
},
"provisioner": "ebs.csi.aws.com",
"volumeBindingMode": "WaitForFirstConsumer",
"allowVolumeExpansion": true,
"reclaimPolicy": "Delete",
"parameters": {
"type": "gp3",
"encrypted": "true",
"kmsKeyId": `${kms_key.keyArn}`
}
}
array_of_yaml_manifests_to_return.push(storage_class_gp3)
return array_of_yaml_manifests_to_return;
} //end generate_manifests

generate_volume_claim_manifests(name: string, size: string){
let array_of_yaml_manifests_to_return: { [key:string]: any }[] = [];
let cluster = this.cluster;

const volume_claim_gp3 = {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
"name": `${name}`,
"namespace": "default"
},
"spec": {
"accessModes": [
"ReadWriteOnce"
],
"storageClassName": "kms-encrypted-gp3",
"resources": {
"requests": {
"storage": `${size}`
}
}
}
}
array_of_yaml_manifests_to_return.push(volume_claim_gp3)
return array_of_yaml_manifests_to_return;
} //end generate_manifests


} //end class Karepnter_Manifests

////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

export function Apply_Storage_Class_YAMLs(stack: cdk.Stack, cluster: eks.Cluster, config: Easy_EKS_Config_Data,
manifestName: string,storage_class_YAMLs: {[key: string]: any;}[]){
const apply_storage_class_YAML = new eks.KubernetesManifest(stack, manifestName,
{
cluster: cluster,
manifest: storage_class_YAMLs,
overwrite: true,
prune: true,
}
);

// Test volume claim


} //end function Storage_YAML_Generator