diff --git a/awsconfiguration.sh b/awsconfiguration.sh index 8d32031..326e1be 100755 --- a/awsconfiguration.sh +++ b/awsconfiguration.sh @@ -1,6 +1,12 @@ #!/bin/bash AWSENV=$1 AWS_REGION=$2 + +CIRCLE_PROJECT_USERNAME=$TC_GIT_ORG +CIRCLE_PROJECT_REPONAME=$TC_REPONAME +CIRCLE_BUILD_NUM=$BUILD_NUMBER +CIRCLE_BRANCH=$BRANCH_NAME + BASE64_DECODER="base64 -d" # option -d for Linux base64 tool echo AAAA | base64 -d > /dev/null 2>&1 || BASE64_DECODER="base64 -D" # option -D on MacOS decode_base64_url() { @@ -16,6 +22,7 @@ if [ -z "$AWS_REGION" ]; then AWS_REGION="us-east-1" fi +# echo "curl -X POST $CI_AUTH0_URL -H 'Content-Type: application/json' -d '{ \"client_id\": \"$CI_AUTH0_CLIENTID\", \"client_secret\": \"$CI_AUTH0_CLIENTSECRET\", \"audience\": \"$CI_AUTH0_AUDIENCE\", \"grant_type\": \"client_credentials\" , \"environment\" : \"$AWSENV\" , \"username\" : \"$CIRCLE_PROJECT_USERNAME\" , \"reponame\" : \"$CIRCLE_PROJECT_REPONAME\", \"build_num\": \"$CIRCLE_BUILD_NUM\", \"branch\": \"$CIRCLE_BRANCH\"}'" auth0cmd=$(echo "curl -X POST $CI_AUTH0_URL -H 'Content-Type: application/json' -d '{ \"client_id\": \"$CI_AUTH0_CLIENTID\", \"client_secret\": \"$CI_AUTH0_CLIENTSECRET\", \"audience\": \"$CI_AUTH0_AUDIENCE\", \"grant_type\": \"client_credentials\" , \"environment\" : \"$AWSENV\" , \"username\" : \"$CIRCLE_PROJECT_USERNAME\" , \"reponame\" : \"$CIRCLE_PROJECT_REPONAME\", \"build_num\": \"$CIRCLE_BUILD_NUM\", \"branch\": \"$CIRCLE_BRANCH\"}'") token=$( eval $auth0cmd | jq -r .access_token ) tokenjsonformat=$( decode_base64_url $(echo -n $token | cut -d "." -f 2) ) @@ -38,6 +45,16 @@ echo "export AWS_ENVIRONMENT=\"$AWS_ENVIRONMENT\"">>awsenvconf echo "export AWS_SESSION_TOKEN=\"$AWS_SESSION_TOKEN\"">>awsenvconf echo "export AWS_ACCOUNT_ID=\"$AWS_ACCOUNT_ID\"">>awsenvconf +echo "">awsenvconfg +echo "env.AWS_REGION=\"$AWS_REGION\"">>awsenvconfg +echo "env.AWS_ACCESS_KEY_ID=\"$AWS_ACCESS_KEY_ID\"">>awsenvconfg +echo "env.AWS_SECRET_ACCESS_KEY=\"$AWS_SECRET_ACCESS_KEY\"">>awsenvconfg +echo "env.AWS_ENVIRONMENT=\"$AWS_ENVIRONMENT\"">>awsenvconfg +echo "env.AWS_SESSION_TOKEN=\"$AWS_SESSION_TOKEN\"">>awsenvconfg +echo "env.AWS_ACCOUNT_ID=\"$AWS_ACCOUNT_ID\"">>awsenvconfg + +chmod +x awsenvconf + if grep -Fxq "awsenvconf" .dockerignore then echo "awsenvconf exist in docker ignore file list" diff --git a/buildenv.sh b/buildenv.sh index 3d6b91f..1268114 100755 --- a/buildenv.sh +++ b/buildenv.sh @@ -4,11 +4,11 @@ usage() cat << EOF usage: $0 options -This script need to be executed with below option. +This script needs to be executed with below options. OPTIONS: -e environment - -b Security file location GIT|AWS + -b security file location GIT|AWS EOF } @@ -51,6 +51,7 @@ uploading_buildenvvar() varvalue=$(echo $s| jq -r ".value") echo $varname echo export "$varname"="'$varvalue'" >>"buildenvvar" + echo "env.$varname"="'''$varvalue'''" >>"buildenvvarg" #export "$varname"="$varvalue" #echo export "$varname"="$varvalue" >>"$BASH_ENV" #echo export "$varname"="\"$varvalue\"" >> ~/.circlerc @@ -104,6 +105,7 @@ done ENV_CONFIG=`echo "$ENV" | tr '[:upper:]' '[:lower:]'` download_buildenvfile uploading_buildenvvar +chmod +x buildenvvar if grep -Fxq "buildenvvar" .dockerignore then @@ -111,3 +113,9 @@ then else echo "buildenvvar" >> .dockerignore fi +if grep -Fxq "buildenvvarg" .dockerignore +then + log "buildenvvarg exist in docker ignore file list" +else + echo "buildenvvarg" >> .dockerignore +fi diff --git a/buildproperties.sh b/buildproperties.sh index 183d15f..2ff2037 100755 --- a/buildproperties.sh +++ b/buildproperties.sh @@ -5,10 +5,10 @@ usage() { cat << EOF usage: $0 options -This script need to be executed with below option. +This script needs to be executed with below options. OPTIONS: -e environment - -b Security file location GIT|AWS + -b security file location GIT|AWS -k key location EOF } diff --git a/master_deploy.sh b/master_deploy.sh index b373884..9122b57 100755 --- a/master_deploy.sh +++ b/master_deploy.sh @@ -1,6 +1,5 @@ #!/bin/bash - #Variable Declaration JQ="jq --raw-output --exit-status" DEPLOYMENT_TYPE="" @@ -8,8 +7,10 @@ ENV="" BUILD_VARIABLE_FILE_NAME="./buildvar.conf" SECRET_FILE_NAME="./buildsecvar.conf" SHARED_PROPERTY_FILENAME="" +CIRCLE_BUILD_NUM=$BUILD_NUMBER + +# Common variables -#Common Varibles #echo $AWS_ACCESS_KEY_ID # AWS_ACCESS_KEY_ID="" # AWS_SECRET_ACCESS_KEY="" @@ -24,7 +25,8 @@ if [ -z "$COUNTER_LIMIT" ]; then COUNTER_LIMIT=12 fi -#Varibles specific to ECS +# Variables specific to ECS + #AWS_REPOSITORY="" #AWS_ECS_CLUSTER="" #AWS_ECS_SERVICE="" @@ -40,12 +42,14 @@ envcount=0 psenvcount=0 volcount=0 template="" +TEMPLATE_DIR="$(dirname "$(pwd)")"/buildscript TEMPLATE_SKELETON_FILE="base_template_v2.json" APP_IMAGE_NAME="" DEPLOYCATEGORY="" ECSCLI_ENVFILE="api.env" -#variable specific to EBS +# Variables specific to EBS + DOCKERRUN="Dockerrun.aws.json" #EBS_EB_EXTENSTION_LOCATION="" IMG_WITH_EBS_TAG="" @@ -69,12 +73,12 @@ ebstemplate="" #AWS_S3_SOURCE_SYNC_PATH="" CFCACHE="true" -#variable for Lambda +# Variables for Lambda #AWS_LAMBDA_DEPLOY_TYPE="" #AWS_LAMBDA_STAGE="" -#FUNCTIONS -#usage Function - provides information like how to execute the script +# FUNCTIONS +# usage Function - provides information about how to execute the script usage() { cat << EOF @@ -86,21 +90,23 @@ OPTIONS: -h Show this message -d Deployment Type [ECS|EBS|CFRONT] -e Environment [DEV|QA|PROD] - -t ECS Tag Name [mandatatory if ECS ] - -v EBS version [mandatatory if EBS deployment] + -t ECS Tag Name [mandatory if ECS ] + -v EBS version [mandatory if EBS deployment] + -i ECS Image name -c cache option true [optional : value = true| false]i -s Security file location GIT|AWS -p ECS template type - -g Enter common property file which has uploaded in shared-properties folder + -g Common property file which is uploaded to shared-properties folder EOF } -#log Function - Used to provide information of execution information with date and time + +# log Function - Used to provide information of execution information with date and time log() { echo "`date +'%D %T'` : $1" } -#track_error function validates whether the application execute without any error +# track_error function - validates whether the application execute without any error track_error() { if [ $1 != "0" ]; then @@ -108,12 +114,9 @@ track_error() log "completed execution IN ERROR at `date`" exit $1 fi - } - -#Function for aws login - +# Function for AWS login configure_aws_cli() { aws --version aws configure set aws_access_key_id $AWS_ACCESS_KEY_ID @@ -122,63 +125,68 @@ configure_aws_cli() { aws configure set default.output json log "Configured AWS CLI." } -#Function for private dcoker login + +# Function for private dcoker login configure_docker_private_login() { aws s3 cp "s3://appirio-platform-$ENV_CONFIG/services/common/dockercfg" ~/.dockercfg } -#ECS Deployment Functions - +# ECS Deployment Functions ECS_push_ecr_image() { + echo "\n\n" if [ -z "$APP_IMAGE_NAME" ]; then - log "Image has followed standard format" + log "ECS image follows the standard format" else - log "Image does not follow stanard format. Modifying the image and updating the ECS_TAG" - docker tag $APP_IMAGE_NAME:$ECS_TAG $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$CIRCLE_BUILD_NUM - ECS_TAG=$CIRCLE_BUILD_NUM + log "ECS Image does not follow the standard format. Modifying the image and updating the ECS_TAG" + docker tag $APP_IMAGE_NAME:$ECS_TAG $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:tc-jb-$CIRCLE_BUILD_NUM + ECS_TAG=tc-jb-$CIRCLE_BUILD_NUM fi CHECK_ECR_EXIST="" CHECK_ECR_EXIST=$(aws ecr describe-repositories --repository-names ${AWS_REPOSITORY} 2>&1) if [ $? -ne 0 ]; then if echo ${CHECK_ECR_EXIST} | grep -q RepositoryNotFoundException; then - echo "repo does not exist and creating repo" + echo "ECR repo does not exist -- creating repo" aws ecr create-repository --repository-name $AWS_REPOSITORY track_error $? "ECS ECR repo creation" - log "Repo created successfully." + log "ECR repo created successfully." else echo ${CHECK_ECR_EXIST} fi else - echo "$AWS_REPOSITORY repository already exist" + echo "$AWS_REPOSITORY ECR repository already exists" fi log "Pushing Docker Image..." + # aws ecr get-login --region $AWS_REGION --no-include-email eval $(aws ecr get-login --region $AWS_REGION --no-include-email) + # aws ecr get-login-password --region $AWS_REGION | docker -D login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$ECS_TAG track_error $? "ECS ECR image push" - log "Docker Image published." + log "Docker Image published\n\n" } -#=============== + ECSCLI_push_ecr_image() { ECS_REPONAME=$1 IMAGE_NAME=$2 if [ -z "$IMAGE_NAME" ]; then - log "Image has followed standard format" + log "ECS image follows the standard format" else - log "Image does not follow stanard format. Modifying the image and updating the ECS_TAG" - docker tag $IMAGE_NAME:$ECS_TAG $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECS_REPONAME:$CIRCLE_BUILD_NUM - ECS_TAG=$CIRCLE_BUILD_NUM + log "ECS image does not follow the standard format. Modifying the image and updating the ECS_TAG" + docker tag $IMAGE_NAME:$ECS_TAG $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECS_REPONAME:tc-jb-$CIRCLE_BUILD_NUM + ECS_TAG=tc-jb-$CIRCLE_BUILD_NUM fi log "Pushing Docker Image..." + # aws ecr get-login --region $AWS_REGION --no-include-email eval $(aws ecr get-login --region $AWS_REGION --no-include-email) + # aws ecr get-login-password --region $AWS_REGION | docker -D login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$ECS_REPONAME:$ECS_TAG track_error $? "ECS ECR image push" - log "Docker Image published." + log "Docker ECR Image published\n\n" } -#================ + ECSCLI_update_env() { Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g') @@ -187,7 +195,7 @@ ECSCLI_update_env() local o=$IFS IFS=$(echo -en "\n\b") envvars=$( cat $listname.json | jq -r ' . ' | jq ' . | to_entries[] | { "name": .key , "value": .value } ' | jq -s . ) - log "vars are fetched" + log "ECS env vars are fetched" for s in $(echo $envvars | jq -c ".[]" ); do #echo $envvars @@ -199,7 +207,7 @@ ECSCLI_update_env() IFS=$o done } -#================ + portmapping() { hostport=$1 containerport=$2 @@ -207,48 +215,42 @@ containerprotocol=$3 template=$(echo $template | jq --argjson hostPort $hostport --argjson containerPort $containerport --arg protocol $containerprotocol --arg portcount $portcount '.containerDefinitions[0].portMappings[$portcount |tonumber] |= .+ { hostPort: $hostPort, containerPort: $containerPort, protocol: $protocol }') let portcount=portcount+1 - } -#============================= - envaddition() { - #echo "envcount before " $envcount - -envname=$1 -envvalue=$2 -#echo "env value before" $envvalue -set -f -template=$(echo $template | jq --arg name "$envname" --arg value "$envvalue" --arg envcount $envcount '.containerDefinitions[0].environment[$envcount |tonumber] |= .+ { name: $name, value: $value }') -set +f -let envcount=envcount+1 -#echo "envcount after ---------" $envcount -#echo "envvalue after ---------" $envvalue + #echo "envcount before " $envcount + envname=$1 + envvalue=$2 + #echo "env value before" $envvalue + set -f + template=$(echo $template | jq --arg name "$envname" --arg value "$envvalue" --arg envcount $envcount '.containerDefinitions[0].environment[$envcount |tonumber] |= .+ { name: $name, value: $value }') + set +f + let envcount=envcount+1 + #echo "envcount after ---------" $envcount + #echo "envvalue after ---------" $envvalue } -#========================= + psenvaddition() { - #echo "psenvcount before " $psenvcount - -envname=$1 -envvalue=$2 -#echo "env value before" $envvalue -set -f -template=$(echo $template | jq --arg name "$envname" --arg value "$envvalue" --arg psenvcount $psenvcount '.containerDefinitions[0].secrets[$psenvcount |tonumber] |= .+ { name: $name, valueFrom: $value }') -set +f -let psenvcount=psenvcount+1 -#echo "psenvcount after ---------" $psenvcount -#echo "envvalue after ---------" $envvalue + #echo "psenvcount before " $psenvcount + envname=$1 + envvalue=$2 + #echo "env value before" $envvalue + set -f + template=$(echo $template | jq --arg name "$envname" --arg value "$envvalue" --arg psenvcount $psenvcount '.containerDefinitions[0].secrets[$psenvcount |tonumber] |= .+ { name: $name, valueFrom: $value }') + set +f + let psenvcount=psenvcount+1 + #echo "psenvcount after ---------" $psenvcount + #echo "envvalue after ---------" $envvalue } -#========================= logconfiguration() { -template=$(echo $template | jq --arg logDriver $CONTAINER_LOG_DRIVER '.containerDefinitions[0].logConfiguration.logDriver=$logDriver') -template=$(echo $template | jq --arg awslogsgroup "/aws/ecs/$AWS_ECS_CLUSTER" '.containerDefinitions[0].logConfiguration.options."awslogs-group"=$awslogsgroup') -template=$(echo $template | jq --arg awslogsregion $AWS_REGION '.containerDefinitions[0].logConfiguration.options."awslogs-region"=$awslogsregion') -template=$(echo $template | jq --arg awslogsstreamprefix $ENV '.containerDefinitions[0].logConfiguration.options."awslogs-stream-prefix"=$awslogsstreamprefix') -template=$(echo $template | jq 'del(.containerDefinitions[0].logConfiguration.options.KeyName)') + template=$(echo $template | jq --arg logDriver $CONTAINER_LOG_DRIVER '.containerDefinitions[0].logConfiguration.logDriver=$logDriver') + template=$(echo $template | jq --arg awslogsgroup "/aws/ecs/$AWS_ECS_CLUSTER" '.containerDefinitions[0].logConfiguration.options."awslogs-group"=$awslogsgroup') + template=$(echo $template | jq --arg awslogsregion $AWS_REGION '.containerDefinitions[0].logConfiguration.options."awslogs-region"=$awslogsregion') + template=$(echo $template | jq --arg awslogsstreamprefix $ENV '.containerDefinitions[0].logConfiguration.options."awslogs-stream-prefix"=$awslogsstreamprefix') + template=$(echo $template | jq 'del(.containerDefinitions[0].logConfiguration.options.KeyName)') } -#============================================= + volumeupdate() { volname=$1 sourcepath=$2 @@ -262,246 +264,259 @@ volumeupdate() { let volcount=volcount+1 } -#============================================ + ECS_Container_HealthCheck_integ() { -HealthCheckCmd="$1" + HealthCheckCmd="$1" -template=$(echo $template | jq '.containerDefinitions[0].healthCheck.retries=3') -template=$(echo $template | jq '.containerDefinitions[0].healthCheck.timeout=15') -template=$(echo $template | jq '.containerDefinitions[0].healthCheck.interval=60') -template=$(echo $template | jq '.containerDefinitions[0].healthCheck.startPeriod=120') -template=$(echo $template | jq --arg HealthCheckCmd "$HealthCheckCmd" '.containerDefinitions[0].healthCheck.command=["CMD-SHELL",$HealthCheckCmd]') + template=$(echo $template | jq '.containerDefinitions[0].healthCheck.retries=3') + template=$(echo $template | jq '.containerDefinitions[0].healthCheck.timeout=15') + template=$(echo $template | jq '.containerDefinitions[0].healthCheck.interval=60') + template=$(echo $template | jq '.containerDefinitions[0].healthCheck.startPeriod=120') + template=$(echo $template | jq --arg HealthCheckCmd "$HealthCheckCmd" '.containerDefinitions[0].healthCheck.command=["CMD-SHELL",$HealthCheckCmd]') } -#============================================ ECS_Container_cmd_integ() { -ContainerCmd="$1" -template=$(echo $template | jq --arg ContainerCmd "$ContainerCmd" '.containerDefinitions[0].command=[$ContainerCmd]') + ContainerCmd="$1" + template=$(echo $template | jq --arg ContainerCmd "$ContainerCmd" '.containerDefinitions[0].command=[$ContainerCmd]') } -#============================================ + ECS_template_create_register() { + #Getting Template skeleton + #template=`aws ecs register-task-definition --generate-cli-skeleton` + template=$(cat $TEMPLATE_SKELETON_FILE) -#Getting Template skeleton -#template=`aws ecs register-task-definition --generate-cli-skeleton` -template=$(cat $TEMPLATE_SKELETON_FILE) + #Updating ECS task def file + template=$(echo $template | jq --arg family $AWS_ECS_TASK_FAMILY '.family=$family') + log "ECS Task Family updated" -#Updating ECS task def file -template=$(echo $template | jq --arg family $AWS_ECS_TASK_FAMILY '.family=$family') -log "Family updated" + #taskrole and excution role has updated + if [ -z $AWS_ECS_TASK_ROLE_ARN ]; + then + log "No ECS Task Role defined" + else + template=$(echo $template | jq --arg taskRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_ECS_TASK_ROLE_ARN '.taskRoleArn=$taskRoleArn') + fi -#taskrole and excution role has updated -if [ -z $AWS_ECS_TASK_ROLE_ARN ]; -then - log "No Task Role defined" -else - template=$(echo $template | jq --arg taskRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_ECS_TASK_ROLE_ARN '.taskRoleArn=$taskRoleArn') -fi -if [ -z $AWS_ECS_TASK_EXECUTION_ROLE_ARN ]; -then - log "No Task Execution Role defined" -else - template=$(echo $template | jq --arg executionRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_ECS_TASK_EXECUTION_ROLE_ARN '.executionRoleArn=$executionRoleArn') -fi -#Container Name update -template=$(echo $template | jq --arg name $AWS_ECS_CONTAINER_NAME '.containerDefinitions[0].name=$name') -log "Container Name updated" - -#Container Image Name update -template=$(echo $template | jq --arg image $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$ECS_TAG '.containerDefinitions[0].image=$image') -log "Image name updated" - -#Container Memory reservation -if [ -z $AWS_ECS_CONTAINER_MEMORY_RESERVATION ]; -then - log "No reseveed memory defined . Going with default value 500 MB" - AWS_ECS_CONTAINER_MEMORY_RESERVATION="1000" - template=$(echo $template | jq --argjson memoryReservation $AWS_ECS_CONTAINER_MEMORY_RESERVATION '.containerDefinitions[0].memoryReservation=$memoryReservation') -else - template=$(echo $template | jq --argjson memoryReservation $AWS_ECS_CONTAINER_MEMORY_RESERVATION '.containerDefinitions[0].memoryReservation=$memoryReservation') -fi -log "Memory reservation updated" - -#Container Memory reservation -if [ -z $AWS_ECS_CONTAINER_CPU ]; -then - echo "No cpu defined . Going with default value 100" - AWS_ECS_CONTAINER_CPU=100 - template=$(echo $template | jq --argjson cpu $AWS_ECS_CONTAINER_CPU '.containerDefinitions[0].cpu=$cpu') -else - template=$(echo $template | jq --argjson cpu $AWS_ECS_CONTAINER_CPU '.containerDefinitions[0].cpu=$cpu') -fi + if [ -z $AWS_ECS_TASK_EXECUTION_ROLE_ARN ]; + then + log "No ECS Task Execution Role defined" + else + template=$(echo $template | jq --arg executionRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_ECS_TASK_EXECUTION_ROLE_ARN '.executionRoleArn=$executionRoleArn') + fi + + #Container Name update + template=$(echo $template | jq --arg name $AWS_ECS_CONTAINER_NAME '.containerDefinitions[0].name=$name') + log "ECS Container Name updated" + + #Container Image Name update + template=$(echo $template | jq --arg image $AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY:$ECS_TAG '.containerDefinitions[0].image=$image') + log "ECR Image name updated" + + #Container Memory reservation + if [ -z $AWS_ECS_CONTAINER_MEMORY_RESERVATION ]; + then + log "No ECS reserved memory defined. Going with default value 1024 MB" + AWS_ECS_CONTAINER_MEMORY_RESERVATION="1000" + template=$(echo $template | jq --argjson memoryReservation $AWS_ECS_CONTAINER_MEMORY_RESERVATION '.containerDefinitions[0].memoryReservation=$memoryReservation') + else + template=$(echo $template | jq --argjson memoryReservation $AWS_ECS_CONTAINER_MEMORY_RESERVATION '.containerDefinitions[0].memoryReservation=$memoryReservation') + fi + log "ECS memory reservation updated." -#Port Mapping -Buffer_portmap=$(echo $AWS_ECS_PORTS | sed 's/,/ /g') -for b1 in $Buffer_portmap; -do - hostport=$( echo $b1 | cut -d ':' -f 1 ) - containerport=$( echo $b1 | cut -d ':' -f 2 ) - protocolmapped=$( echo $b1 | cut -d ':' -f 3 ) - portmapping $hostport $containerport $protocolmapped -done -log "port mapping updated" -# Environment addition -Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g') -for listname in $Buffer_seclist; -do - local o=$IFS - IFS=$(echo -en "\n\b") - envvars=$( cat $listname.json | jq -r ' . ' | jq ' . | to_entries[] | { "name": .key , "value": .value } ' | jq -s . ) - log "vars are fetched" - - for s in $(echo $envvars | jq -c ".[]" ); do - #echo $envvars - varname=$(echo $s| jq -r ".name") - varvalue=$(echo $s| jq -r ".value") - envaddition "$varname" "$varvalue" + #Container CPU reservation + if [ -z $AWS_ECS_CONTAINER_CPU ]; + then + echo "No ECS container CPU defined. Going with default value 100" + AWS_ECS_CONTAINER_CPU=100 + template=$(echo $template | jq --argjson cpu $AWS_ECS_CONTAINER_CPU '.containerDefinitions[0].cpu=$cpu') + else + template=$(echo $template | jq --argjson cpu $AWS_ECS_CONTAINER_CPU '.containerDefinitions[0].cpu=$cpu') + fi + log "ECS container CPU updated." + + #Port Mapping + Buffer_portmap=$(echo $AWS_ECS_PORTS | sed 's/,/ /g') + for b1 in $Buffer_portmap; + do + hostport=$( echo $b1 | cut -d ':' -f 1 ) + log "ECS host port: $hostport" + containerport=$( echo $b1 | cut -d ':' -f 2 ) + log "ECS container port: $containerport" + protocolmapped=$( echo $b1 | cut -d ':' -f 3 ) + log "ECS mapped protocol: $protocolmapped" + portmapping $hostport $containerport $protocolmapped done - IFS=$o -done -if [ -z $SECPS_LIST ]; -then - log "No ps file provided" -else - Buffer_seclist=$(echo $SECPS_LIST | sed 's/,/ /g') + log "ECS container port mapping updated" + + # Environment addition + Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g') for listname in $Buffer_seclist; do local o=$IFS IFS=$(echo -en "\n\b") - varpath=$( cat $listname.json | jq -r ' .ParmeterPathList[] ' ) - #log "vars are fetched" - for k in $varpath; - do - echo $k - aws ssm get-parameters-by-path --path $k --query "Parameters[*].{Name:Name}" > paramnames.json - ###paramnames=$(cat paramnames.json | jq -r .[].Name | rev | cut -d / -f 1 | rev) - for s in $(cat paramnames.json | jq -r .[].Name ) - do - varname=$(echo $s | rev | cut -d / -f 1 | rev) - varvalue="arn:aws:ssm:$AWS_REGION:$AWS_ACCOUNT_ID:parameter$s" - psenvaddition "$varname" "$varvalue" - #echo "$varname" "$varvalue" - done + envvars=$( cat $listname.json | jq -r ' . ' | jq ' . | to_entries[] | { "name": .key , "value": .value } ' | jq -s . ) + log "vars are fetched" + + for s in $(echo $envvars | jq -c ".[]" ); do + #echo $envvars + varname=$(echo $s| jq -r ".name") + varvalue=$(echo $s| jq -r ".value") + envaddition "$varname" "$varvalue" done IFS=$o done -fi -log "environment has updated" -# Log Configuration -logconfiguration -log "log configuration has updated" - -#volume update -if [ -z $AWS_ECS_VOLUMES ]; -then - echo "No volume mapping defined" -else - Buffer_volumes=$(echo $AWS_ECS_VOLUMES | sed 's/,/ /g') - for v1 in $Buffer_volumes; - do - volname=$( echo $v1 | cut -d ':' -f 1 ) - sourcepath=$( echo $v1 | cut -d ':' -f 2 ) - mountpath=$( echo $v1 | cut -d ':' -f 3 ) - #mntpermission=$( echo $v1 | cut -d ':' -f 4 ) - #volumeupdate $volname $sourcepath $mountpath $mntpermission - volumeupdate $volname $sourcepath $mountpath - done - log "volumes are mapped" -fi -#Conteainer health check update -if [ -z "$AWS_ECS_CONTAINER_HEALTH_CMD" ]; -then - echo "No container Health check command defined" -else - ECS_Container_HealthCheck_integ "$AWS_ECS_CONTAINER_HEALTH_CMD" -fi -#Container command integration -if [ -z "$AWS_ECS_CONTAINER_CMD" ]; -then - echo "No container command not defined" -else - ECS_Container_cmd_integ "$AWS_ECS_CONTAINER_CMD" -fi -#updating data based on ECS deploy type -if [ "$ECS_TEMPLATE_TYPE" == "FARGATE" ] -then - #updating Network - ECS_NETWORKTYPE="awsvpc" - template=$(echo $template | jq --arg executionRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/ecsTaskExecutionRole '.executionRoleArn=$executionRoleArn') - template=$(echo $template | jq --arg networkMode $ECS_NETWORKTYPE '.networkMode=$networkMode') - # Updating the compatibiltiy - #template=$(echo $template | jq --arg requiresCompatibilities EC2 '.requiresCompatibilities[0] |= .+ $requiresCompatibilities') - template=$(echo $template | jq --arg requiresCompatibilities FARGATE '.requiresCompatibilities[.requiresCompatibilities| length] |= .+ $requiresCompatibilities') - # Updating Fargate CPU - if [ -z $AWS_ECS_FARGATE_CPU ]; + + if [ -z $SECPS_LIST ]; then - echo "No FARGATE cpu defined . Going with default value 1024" - AWS_ECS_FARGATE_CPU="1024" - template=$(echo $template | jq --arg cpu $AWS_ECS_FARGATE_CPU '.cpu=$cpu') + log "No ps file provided" else - template=$(echo $template | jq --arg cpu $AWS_ECS_FARGATE_CPU '.cpu=$cpu') + Buffer_seclist=$(echo $SECPS_LIST | sed 's/,/ /g') + for listname in $Buffer_seclist; + do + local o=$IFS + IFS=$(echo -en "\n\b") + varpath=$( cat $listname.json | jq -r ' .ParmeterPathList[] ' ) + #log "vars are fetched" + for k in $varpath; + do + echo $k + aws ssm get-parameters-by-path --path $k --query "Parameters[*].{Name:Name}" > paramnames.json + ###paramnames=$(cat paramnames.json | jq -r .[].Name | rev | cut -d / -f 1 | rev) + for s in $(cat paramnames.json | jq -r .[].Name ) + do + varname=$(echo $s | rev | cut -d / -f 1 | rev) + varvalue="arn:aws:ssm:$AWS_REGION:$AWS_ACCOUNT_ID:parameter$s" + psenvaddition "$varname" "$varvalue" + #echo "$varname" "$varvalue" + done + done + IFS=$o + done fi - # Updating Fargate Memory - if [ -z $AWS_ECS_FARGATE_MEMORY ]; + log "Environment has updated" + + # Log Configuration + logconfiguration + log "Log configuration has updated" + + #volume update + if [ -z $AWS_ECS_VOLUMES ]; then - echo "No FARGATE memory defined . Going with default value 2048" - AWS_ECS_FARGATE_MEMORY="2048" - template=$(echo $template | jq --arg memory $AWS_ECS_FARGATE_MEMORY '.memory=$memory') + echo "No ECS volume mapping defined" else - template=$(echo $template | jq --arg memory $AWS_ECS_FARGATE_MEMORY '.memory=$memory') + Buffer_volumes=$(echo $AWS_ECS_VOLUMES | sed 's/,/ /g') + for v1 in $Buffer_volumes; + do + volname=$( echo $v1 | cut -d ':' -f 1 ) + sourcepath=$( echo $v1 | cut -d ':' -f 2 ) + mountpath=$( echo $v1 | cut -d ':' -f 3 ) + #mntpermission=$( echo $v1 | cut -d ':' -f 4 ) + #volumeupdate $volname $sourcepath $mountpath $mntpermission + volumeupdate $volname $sourcepath $mountpath + done + log "ECS volumes are mapped" + fi + + #Container health check update + if [ -z "$AWS_ECS_CONTAINER_HEALTH_CMD" ]; + then + echo "No ECS container health check command defined" + else + ECS_Container_HealthCheck_integ "$AWS_ECS_CONTAINER_HEALTH_CMD" fi -else - #CONTAINER_CPU - ECS_NETWORKTYPE="bridge" - template=$(echo $template | jq --arg networkMode $ECS_NETWORKTYPE '.networkMode=$networkMode') - - # Updating the compatibiltiy - template=$(echo $template | jq --arg requiresCompatibilities EC2 '.requiresCompatibilities[0] = $requiresCompatibilities') -fi -if [ -z "$template" ]; - then - track_error 1 "Task Def has not set by template variable" - exit 1 - else - # echo "template values ------:" $template - if REVISION=$(aws ecs register-task-definition --cli-input-json "$template" | $JQ '.taskDefinition.taskDefinitionArn'); then - log "Revision: $REVISION" + + #Container command integration + if [ -z "$AWS_ECS_CONTAINER_CMD" ]; + then + echo "No ECS container start-up command defined" + else + ECS_Container_cmd_integ "$AWS_ECS_CONTAINER_CMD" + fi + + #updating data based on ECS deploy type + if [ "$ECS_TEMPLATE_TYPE" == "FARGATE" ] + then + #updating Network + ECS_NETWORKTYPE="awsvpc" + template=$(echo $template | jq --arg executionRoleArn arn:aws:iam::$AWS_ACCOUNT_ID:role/ecsTaskExecutionRole '.executionRoleArn=$executionRoleArn') + template=$(echo $template | jq --arg networkMode $ECS_NETWORKTYPE '.networkMode=$networkMode') + # Updating the compatibiltiy + #template=$(echo $template | jq --arg requiresCompatibilities EC2 '.requiresCompatibilities[0] |= .+ $requiresCompatibilities') + template=$(echo $template | jq --arg requiresCompatibilities FARGATE '.requiresCompatibilities[.requiresCompatibilities| length] |= .+ $requiresCompatibilities') + # Updating Fargate CPU + if [ -z $AWS_ECS_FARGATE_CPU ]; + then + echo "No FARGATE CPU defined. Going with default value 1024" + AWS_ECS_FARGATE_CPU="1024" + template=$(echo $template | jq --arg cpu $AWS_ECS_FARGATE_CPU '.cpu=$cpu') else - track_error 1 "Task Def registration" - log "Failed to register task definition" - return 1 + template=$(echo $template | jq --arg cpu $AWS_ECS_FARGATE_CPU '.cpu=$cpu') fi -fi + + # Updating Fargate Memory + if [ -z $AWS_ECS_FARGATE_MEMORY ]; + then + echo "No FARGATE memory defined. Going with default value 2048" + AWS_ECS_FARGATE_MEMORY="2048" + template=$(echo $template | jq --arg memory $AWS_ECS_FARGATE_MEMORY '.memory=$memory') + else + template=$(echo $template | jq --arg memory $AWS_ECS_FARGATE_MEMORY '.memory=$memory') + fi + else + #CONTAINER_CPU + ECS_NETWORKTYPE="bridge" + template=$(echo $template | jq --arg networkMode $ECS_NETWORKTYPE '.networkMode=$networkMode') + + # Updating the compatibiltiy + template=$(echo $template | jq --arg requiresCompatibilities EC2 '.requiresCompatibilities[0] = $requiresCompatibilities') + fi + + if [ -z "$template" ]; + then + track_error 1 "Task Definition was not set by template variable" + exit 1 + else + # echo "template values ------:" $template + if REVISION=$(aws ecs register-task-definition --cli-input-json "$template" | $JQ '.taskDefinition.taskDefinitionArn'); then + log "Revision: $REVISION" + else + track_error 1 "Task Def registration" + log "Failed to register task definition" + return 1 + fi + fi } ECS_deploy_cluster() { - AWS_ECS_SERVICE=$1 - #checking cluster exist + + #checking if cluster exists CHECK_CLUSTER_EXIST="" CHECK_CLUSTER_EXIST=$(aws ecs describe-clusters --cluster $AWS_ECS_CLUSTER | jq --raw-output 'select(.clusters[].clusterName != null ) | .clusters[].clusterName') if [ -z $CHECK_CLUSTER_EXIST ]; then - echo "$AWS_ECS_CLUSTER cluster does not exist. Kindly check with admin team" + echo "$AWS_ECS_CLUSTER cluster does not exist. Kindly check with DevOps team" exit 1 else - echo "$AWS_ECS_CLUSTER Cluster exist" + echo "$AWS_ECS_CLUSTER cluster exists" fi - #checking service exist + + #checking if service exists CHECK_SERVICE_EXIST="" CHECK_SERVICE_EXIST=$(aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | jq --raw-output 'select(.services[].status != null ) | .services[].status') if [ -z $CHECK_SERVICE_EXIST ]; then if [ "$ECS_TEMPLATE_TYPE" == "FARGATE" ]; then - echo "Fargate Service does not exist. Kindly check with admin team" + echo "Fargate Service does not exist. Kindly check with DevOps team" exit 1 else - echo "service does not exist. Creating service" + echo "Service does not exist. Creating service" aws ecs create-service --cluster $AWS_ECS_CLUSTER --service-name $AWS_ECS_SERVICE --task-definition $REVISION --desired-count 1 - echo "Kindly work with admin team for routing" + echo "Kindly work with DevOps team for routing" fi else - echo "service exist.Application updates the service" + echo "ECS Service exists. Updating the service..." update_result=$(aws ecs update-service --cluster $AWS_ECS_CLUSTER --service $AWS_ECS_SERVICE --task-definition $REVISION ) result=$(echo $update_result | $JQ '.service.taskDefinition' ) log $result @@ -511,30 +526,31 @@ ECS_deploy_cluster() { return 1 fi - echo "Update service intialised successfully for deployment" + echo "Updated service intialised successfully for deployment\n\n" fi return 0 } check_service_status() { - AWS_ECS_SERVICE=$1 - counter=0 - sleep 60 + AWS_ECS_SERVICE=$1 + + counter=0 + sleep 60 + servicestatus=`aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'` + while [[ $servicestatus != *"steady state"* ]] + do + echo "Current event message : $servicestatus" + echo "Waiting for 15 sec to check the service status..." + sleep 15 servicestatus=`aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'` - while [[ $servicestatus != *"steady state"* ]] - do - echo "Current event message : $servicestatus" - echo "Waiting for 15 sec to check the service status...." - sleep 15 - servicestatus=`aws ecs describe-services --service $AWS_ECS_SERVICE --cluster $AWS_ECS_CLUSTER | $JQ '.services[].events[0].message'` - counter=`expr $counter + 1` - if [[ $counter -gt $COUNTER_LIMIT ]] ; then - echo "Service does not reach steady state with in 180 seconds. Please check" - exit 1 - fi - done - echo "$servicestatus" + counter=`expr $counter + 1` + if [[ $counter -gt $COUNTER_LIMIT ]] ; then + echo "Service did not reach steady state with in $(($COUNTER_LIMIT*15+60)) seconds. Please check" + exit 1 + fi + done + echo "$servicestatus" } validate_update_loggroup() @@ -543,43 +559,39 @@ validate_update_loggroup() #echo $log_group_fetch if [ -z $log_group_fetch ]; then - echo "log group does not exist" + echo "\nLog group does not exist\n" aws logs create-log-group --log-group-name /aws/ecs/$AWS_ECS_CLUSTER track_error $? "aws log group" else - echo "log group exist" + echo "\nLog group exists\n" fi } -# EBS integration +# EBS integration ebsportmapping() { -echo "port map called" -containerport=$1 -hostport=$2 - -if [ -z $hostport ] -then -ebstemplate=$(echo $ebstemplate | jq --arg containerPort $containerport --arg ebsportcount $ebsportcount '.Ports[$ebsportcount |tonumber] |= .+ { ContainerPort: $containerPort }') -else -ebstemplate=$(echo $ebstemplate | jq --arg hostPort $hostport --arg containerPort $containerport --arg ebsportcount $ebsportcount '.Ports[$ebsportcount |tonumber] |= .+ { HostPort: $hostPort, ContainerPort: $containerPort }') -fi + echo "Port map called\n" + containerport=$1 + hostport=$2 -let ebsportcount=ebsportcount+1 + if [ -z $hostport ] + then + ebstemplate=$(echo $ebstemplate | jq --arg containerPort $containerport --arg ebsportcount $ebsportcount '.Ports[$ebsportcount |tonumber] |= .+ { ContainerPort: $containerPort }') + else + ebstemplate=$(echo $ebstemplate | jq --arg hostPort $hostport --arg containerPort $containerport --arg ebsportcount $ebsportcount '.Ports[$ebsportcount |tonumber] |= .+ { HostPort: $hostPort, ContainerPort: $containerPort }') + fi + let ebsportcount=ebsportcount+1 } - EBS_push_docker_image() { - -echo "pushing docker image: ${IMAGE}" -IMAGE="${DOCKER_REGISTRY_NAME}/${IMG_WITH_EBS_TAG}" -docker push $IMAGE -track_error $? "docker push failed." - + echo "Pushing Docker image: ${IMAGE}" + IMAGE="${DOCKER_REGISTRY_NAME}/${IMG_WITH_EBS_TAG}" + docker push $IMAGE + track_error $? "Docker push failed." } creating_updating_ebs_docker_json() { - echo "updating auth bucket name" + echo "Updating S3 auth bucket name" sed -i.bak -e "s/@AWSS3AUTHBUCKET@/appirio-platform-$ENV_CONFIG/g" $EBS_TEMPLATE_SKELETON_FILE rm ${EBS_TEMPLATE_SKELETON_FILE}.bak @@ -587,7 +599,7 @@ creating_updating_ebs_docker_json() { ebstemplate=$(cat $EBS_TEMPLATE_SKELETON_FILE) if [ -z $AWS_EBS_PORTS ]; then - echo "No container port is defined. configuring default 8080 port" + echo "No container port is defined. Configuring default 8080 port" ebsportmapping 8080 else Buffer_portmap=$(echo $AWS_EBS_PORTS | sed 's/,/ /g') @@ -606,33 +618,31 @@ creating_updating_ebs_docker_json() { if [ -z "$EBS_EB_EXTENSTION_LOCATION" ]; then cat $EBS_TEMPLATE_SKELETON_FILE | sed -e "s/@IMAGE@/${IMG_WITH_EBS_TAG}/g" > $DOCKERRUN - echo "pushing $DOCKERRUN as ${IMG_WITH_EBS_TAG} to S3: ${AWS_S3_BUCKET}/${AWS_S3_KEY}" + echo "Pushing $DOCKERRUN as ${IMG_WITH_EBS_TAG} to S3: ${AWS_S3_BUCKET}/${AWS_S3_KEY}" aws s3api put-object --bucket "${AWS_S3_BUCKET}" --key "${AWS_S3_KEY}" --body $DOCKERRUN track_error $? "aws s3api put-object failed." else cat $EBS_TEMPLATE_SKELETON_FILE | sed -e "s/@IMAGE@/${IMG_WITH_EBS_TAG}/g" > $DOCKERRUN cp -rvf $EBS_EB_EXTENSTION_LOCATION/.ebextensions . jar cMf ${IMG_WITH_EBS_TAG}.zip $DOCKERRUN .ebextensions - echo "pushing ${IMG_WITH_EBS_TAG}.zip to S3: ${AWS_S3_BUCKET}/${AWS_S3_KEY}" + echo "Pushing ${IMG_WITH_EBS_TAG}.zip to S3: ${AWS_S3_BUCKET}/${AWS_S3_KEY}" aws s3api put-object --bucket "${AWS_S3_BUCKET}" --key "${AWS_S3_KEY}" --body ${IMG_WITH_EBS_TAG}.zip track_error $? "aws s3api put-object failed." fi } creating_updating_EBS_appversion() { - - echo "creating new application version $AWS_EBS_APPVER in ${AWS_EBS_APPLICATION_NAME} from s3:${AWS_S3_BUCKET}/${AWS_S3_KEY}" + echo "Creating new application version $AWS_EBS_APPVER in ${AWS_EBS_APPLICATION_NAME} from s3:${AWS_S3_BUCKET}/${AWS_S3_KEY}" aws elasticbeanstalk create-application-version --application-name $AWS_EBS_APPLICATION_NAME --version-label $AWS_EBS_APPVER --source-bundle S3Bucket="$AWS_S3_BUCKET",S3Key="$AWS_S3_KEY" track_error $? "aws elasticbeanstalk create-application-version failed." - echo "updating elastic beanstalk environment ${AWS_EB_ENV} with the version ${AWS_EBS_APPVER}." + echo "Updating elastic beanstalk environment ${AWS_EB_ENV} with the version ${AWS_EBS_APPVER}." # assumes beanstalk app for this service has already been created and configured aws elasticbeanstalk update-environment --environment-name $AWS_EBS_ENV_NAME --version-label $AWS_EBS_APPVER track_error $? "aws elasticbeanstalk update-environment failed." - } -#Cloud Front DEPLOYMENT +#CloudFront deployment deploy_s3bucket() { echo -e "application/font-woff\t\t\t\twoff2" >> /etc/mime.types @@ -679,17 +689,19 @@ deploy_s3bucket() { echo $getformatdetails S3_OPTIONS="--content-encoding gzip" fi + echo aws s3 cp --dryrun $syncfilepath s3://${AWS_S3_BUCKET}${uploadpath} ${S3_CACHE_OPTIONS} ${S3_OPTIONS} eval "aws s3 cp --dryrun $syncfilepath s3://${AWS_S3_BUCKET}${uploadpath} ${S3_CACHE_OPTIONS} ${S3_OPTIONS}" result=`eval "aws s3 cp $syncfilepath s3://${AWS_S3_BUCKET}${uploadpath} ${S3_CACHE_OPTIONS} ${S3_OPTIONS}"` if [ $? -eq 0 ]; then - echo "file Deployed!" + echo "File Deployed!" else echo "Deployment Failed - $result" exit 1 fi done; } + download_envfile() { Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g' ) @@ -704,6 +716,7 @@ download_envfile() #openssl enc -aes-256-cbc -d -md MD5 -in $listname.json.enc -out $listname.json -k $SECPASSWD done } + download_psfile() { Buffer_seclist=$(echo $SECPS_LIST | sed 's/,/ /g' ) @@ -715,6 +728,7 @@ download_psfile() track_error $? "$listname.json" done } + decrypt_fileenc() { Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g' ) @@ -747,21 +761,23 @@ uploading_envvar() IFS=$o done } + configure_Lambda_template() { - if [ "$AWS_LAMBDA_DEPLOY_TYPE" == "SLS" ] then mkdir -p /home/circleci/project/config Buffer_seclist=$(echo $SEC_LIST | sed 's/,/ /g') - #envvars=$( cat $listname.json | jq -c ' .app_var ') + #envvars=$( cat $listname.json | jq -c ' .app_var ') + for listname in $Buffer_seclist; do - o=$IFS - IFS=$(echo -en "\n\b") - envvars=$( cat $listname.json | jq -c ' . ') - echo "$envvars" > /home/circleci/project/config/$AWS_LAMBDA_STAGE.json - sed -i 's/\\n/\\\\n/g' /home/circleci/project/config/$AWS_LAMBDA_STAGE.json + o=$IFS + IFS=$(echo -en "\n\b") + envvars=$( cat $listname.json | jq -c ' . ') + echo "$envvars" > /home/circleci/project/config/$AWS_LAMBDA_STAGE.json + sed -i 's/\\n/\\\\n/g' /home/circleci/project/config/$AWS_LAMBDA_STAGE.json + #yq r $listname.json >$listname.yml #a=serverless.yml #b="$listname.json" @@ -770,12 +786,11 @@ configure_Lambda_template() #python -c "import sys , json , ruamel.yaml , cStringIO; jsondata = cStringIO.StringIO(); yaml = ruamel.yaml.YAML(); yaml.explicit_start = True; data = json.load(open('$b','r'), object_pairs_hook=ruamel.yaml.comments.CommentedMap) ; ruamel.yaml.scalarstring.walk_tree(data) ; yaml.dump(data, jsondata); cfg = yaml.load(open('$a','r')); cfg_env = yaml.load(jsondata.getvalue()); cfg['Resources']['tcdevhandler']['Properties']['Environment']['Variables']=cfg_env['app_var'] ; yaml.dump(cfg, open('appeneded.yaml', 'w'))" #python -c "import sys , json , ruamel.yaml , cStringIO; jsondata = cStringIO.StringIO(); yaml = ruamel.yaml.YAML(); yaml.explicit_start = True; data = json.load(open('$b','r'), object_pairs_hook=ruamel.yaml.comments.CommentedMap) ; ruamel.yaml.scalarstring.walk_tree(data) ; yaml.dump(data, jsondata); cfg = yaml.load(open('$a','r')); cfg_env = yaml.load(jsondata.getvalue()); cfg['provider']['environment']=cfg_env['app_var'] ; yaml.dump(cfg, open('appeneded.yaml', 'w'))" #python -c "import sys , json , ruamel.yaml ; from io import BytesIO as StringIO ; jsondata = StringIO(); yaml = ruamel.yaml.YAML(); yaml.explicit_start = True; data = json.load(open('$b','r'), object_pairs_hook=ruamel.yaml.comments.CommentedMap) ; ruamel.yaml.scalarstring.walk_tree(data) ; yaml.dump(data, jsondata); cfg = yaml.load(open('$a','r')); cfg_env= yaml.load(jsondata.getvalue()); cfg['provider']['environment']=cfg_env['app_var'] ; yaml.dump(cfg, open('appeneded.yaml','w'))" - #python -c "import sys , json , ruamel.yaml ; from io import BytesIO as StringIO ; jsondata = StringIO(); yaml = ruamel.yaml.YAML(); data = json.load(open('$b','r')) ; yaml.dump(data, jsondata); cfg = yaml.load(open('$a','r')); cfg_env= yaml.load(jsondata.getvalue()); cfg['provider']['environment']=cfg_env['app_var'] ; yaml.dump(cfg, open('appeneded.yaml','w'))" + #python -c "import sys , json , ruamel.yaml ; from io import BytesIO as StringIO ; jsondata = StringIO(); yaml = ruamel.yaml.YAML(); data = json.load(open('$b','r')) ; yaml.dump(data, jsondata); cfg = yaml.load(open('$a','r')); cfg_env= yaml.load(jsondata.getvalue()); cfg['provider']['environment']=cfg_env['app_var'] ; yaml.dump(cfg, open('appeneded.yaml','w'))" #mv -f appeneded.yaml serverless.yml done IFS=$o fi - } deploy_lambda_package() @@ -783,12 +798,11 @@ deploy_lambda_package() # sls deploy if [ "$AWS_LAMBDA_DEPLOY_TYPE" == "SLS" ] then - echo "welcome to lambda SLS deploy" + echo "Welcome to lambda SLS deploy" sls deploy --stage $AWS_LAMBDA_STAGE - fi - - + fi } + # decrypt_aws_sys_parameter() # { @@ -798,327 +812,334 @@ deploy_lambda_package() # Input Collection and validation input_parsing_validation() { -while getopts .d:h:i:e:l:t:v:s:p:g:c:m:. OPTION -do - case $OPTION in - d) - DEPLOYMENT_TYPE=$OPTARG - ;; - h) - usage - exit 1 - ;; - i) - APP_IMAGE_NAME=$OPTARG - ;; - e) - ENV=$OPTARG - ;; - l) - SECPS_LIST=$OPTARG - ;; - t) - TAG=$OPTARG - ;; - c) - CFCACHE=$OPTARG - ;; - v) - EBS_APPVER=$OPTARG - ;; - s) - SEC_LIST=$OPTARG - ;; - p) - ECS_TEMPLATE_TYPE=$OPTARG - ;; - g) - SHARED_PROPERTY_FILENAME=$OPTARG - ;; - m) - DEPLOYCATEGORY=$OPTARG - ;; - ?) - log "additional param required" - usage - exit - ;; - esac -done - -if [ -z $DEPLOYMENT_TYPE ] || [ -z $ENV ] ; -then - log "Param validation error" - usage - exit 1 -fi + while getopts .d:h:i:e:l:t:v:s:p:g:c:m:. OPTION + do + case $OPTION in + d) + DEPLOYMENT_TYPE=$OPTARG + ;; + h) + usage + exit 1 + ;; + i) + APP_IMAGE_NAME=$OPTARG + ;; + e) + ENV=$OPTARG + ;; + l) + SECPS_LIST=$OPTARG + ;; + t) + TAG=$OPTARG + ;; + c) + CFCACHE=$OPTARG + ;; + v) + EBS_APPVER=$OPTARG + ;; + s) + SEC_LIST=$OPTARG + ;; + p) + ECS_TEMPLATE_TYPE=$OPTARG + ;; + g) + SHARED_PROPERTY_FILENAME=$OPTARG + ;; + m) + DEPLOYCATEGORY=$OPTARG + ;; + ?) + log "additional param required" + usage + exit + ;; + esac + done -log "ENV : $ENV" -log "DEPLOYMENT_TYPE : $DEPLOYMENT_TYPE" -log "app variable list : $SEC_LIST" -ENV_CONFIG=`echo "$ENV" | tr '[:upper:]' '[:lower:]'` - -#Validating AWS configuration - - -#Getting Deployment varaible only - -# AWS_ACCESS_KEY_ID=$(eval "echo \$${ENV}_AWS_ACCESS_KEY_ID") -# AWS_SECRET_ACCESS_KEY=$(eval "echo \$${ENV}_AWS_SECRET_ACCESS_KEY") -# AWS_ACCOUNT_ID=$(eval "echo \$${ENV}_AWS_ACCOUNT_ID") -# AWS_REGION=$(eval "echo \$${ENV}_AWS_REGION") -# if [ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ] || [ -z $AWS_ACCOUNT_ID ] || [ -z $AWS_REGION ]; -# then -# log "AWS Secret Parameters are not configured in circleci/environment" -# usage -# exit 1 -# else -# configure_aws_cli -# #aws configure list -# fi - -download_envfile -if [ -z $SECPS_LIST ]; -then - log "No secret parameter file list provided" - -else - download_psfile -fi -#decrypt_fileenc -#uploading_envvar + if [ -z $DEPLOYMENT_TYPE ] || [ -z $ENV ] ; + then + log "Param validation error" + usage + exit 1 + fi + log "ENV : $ENV" + log "DEPLOYMENT_TYPE : $DEPLOYMENT_TYPE" + log "app variable list : $SEC_LIST" + ENV_CONFIG=`echo "$ENV" | tr '[:upper:]' '[:lower:]'` + + #Validating AWS configuration + + #Getting Deployment varaible only + + # AWS_ACCESS_KEY_ID=$(eval "echo \$${ENV}_AWS_ACCESS_KEY_ID") + # AWS_SECRET_ACCESS_KEY=$(eval "echo \$${ENV}_AWS_SECRET_ACCESS_KEY") + # AWS_ACCOUNT_ID=$(eval "echo \$${ENV}_AWS_ACCOUNT_ID") + # AWS_REGION=$(eval "echo \$${ENV}_AWS_REGION") + # if [ -z $AWS_ACCESS_KEY_ID ] || [ -z $AWS_SECRET_ACCESS_KEY ] || [ -z $AWS_ACCOUNT_ID ] || [ -z $AWS_REGION ]; + # then + # log "AWS Secret Parameters are not configured in circleci/environment" + # usage + # exit 1 + # else + # configure_aws_cli + # #aws configure list + # fi + + download_envfile + + if [ -z $SECPS_LIST ]; + then + log "No secret parameter file list provided" + else + download_psfile + fi + + #decrypt_fileenc + #uploading_envvar + + #Validating parameter based on Deployment type + #ECS parameter validation + if [ "$DEPLOYMENT_TYPE" == "ECS" ] + then + ECS_TAG=$TAG + if [ "$DEPLOYCATEGORY" == "CLI" ] + then + if [ -z $AWS_REPOSITORY ] || [ -z $AWS_ECS_CLUSTER ] || [ -z $AWS_ECS_SERVICE ] || [ -z $ECS_TAG ]; + then + log "Deployment varibale are not updated. Please check tag option was provided. Also ensure AWS_REPOSITORY, AWS_ECS_TASK_FAMILY,AWS_ECS_CONTAINER_NAME,AWS_ECS_PORTS,AWS_ECS_CLUSTER and AWS_ECS_SERVICE variables are configured on secret manager" + usage + exit 1 + fi + DEPLOYCATEGORYNAME="ECSCLI" + else + cp $TEMPLATE_DIR/$TEMPLATE_SKELETON_FILE . + + if [ -z $AWS_REPOSITORY ] || [ -z $AWS_ECS_CLUSTER ] || [ -z $AWS_ECS_SERVICE ] || [ -z $AWS_ECS_TASK_FAMILY ] || [ -z $AWS_ECS_CONTAINER_NAME ] || [ -z $AWS_ECS_PORTS ] || [ -z $ECS_TAG ]; + then + log "Deployment varibale are not updated. Please check tag option was provided. Also ensure AWS_REPOSITORY, AWS_ECS_TASK_FAMILY,AWS_ECS_CONTAINER_NAME,AWS_ECS_PORTS,AWS_ECS_CLUSTER and AWS_ECS_SERVICE variables are configured on secret manager" + usage + exit 1 + fi + DEPLOYCATEGORYNAME="AWSCLI" + fi + + log "AWS_REPOSITORY : $AWS_REPOSITORY" + log "AWS_ECS_CLUSTER : $AWS_ECS_CLUSTER" + log "AWS_ECS_SERVICE_NAMES : $AWS_ECS_SERVICE" + log "AWS_ECS_TASK_FAMILY : $AWS_ECS_TASK_FAMILY" + log "AWS_ECS_CONTAINER_NAME : $AWS_ECS_CONTAINER_NAME" + log "AWS_ECS_PORTS : $AWS_ECS_PORTS" + log "ECS_TAG : $ECS_TAG" + log "DEPLOY TYPE : $DEPLOYCATEGORYNAME" + fi + + #EBS parameter validation + if [ "$DEPLOYMENT_TYPE" == "EBS" ] + then + # EBS_TAG = the docker image tag for example dev.201807051535 + cp $TEMPLATE_DIR/$EBS_TEMPLATE_SKELETON_FILE . + EBS_TAG=$TAG + AWS_EBS_APPVER="${AWS_EBS_ENV_NAME}-${EBS_TAG}" + IMG_WITH_EBS_TAG="${DOCKER_IMAGE_NAME}:${EBS_TAG}" + # EBS_TAG="${IMAGE_NAME}:${ENV_CONFIG}.${EBS_APPVER}" + + if [ "$AWS_S3_KEY_LOCATION" = "" ] ; + then + AWS_S3_KEY="${IMG_WITH_EBS_TAG}" + else + AWS_S3_KEY="$AWS_S3_KEY_LOCATION/${IMG_WITH_EBS_TAG}" + fi + #AWS_EBS_EB_DOCKERRUN_TEMPLATE_LOCATION=$(eval "echo \$${ENV}_AWS_EBS_EB_DOCKERRUN_TEMPLATE_LOCATION") + #AWS_EBS_DOCKERRUN_TEMPLATE=$(eval "echo \$${ENV}_AWS_EBS_DOCKERRUN_TEMPLATE") + if [ -z $AWS_EBS_APPLICATION_NAME ] || [ -z $DOCKER_IMAGE_NAME ] || [ -z $AWS_EBS_ENV_NAME ] || [ -z $EBS_TAG ] || [ -z $AWS_EBS_APPVER ] || [ -z $AWS_S3_BUCKET ] ; + then + log "Build variables are not updated. Please update the Build variable file" + usage + exit 1 + fi + log "EBS_APPLICATION_NAME : $AWS_EBS_APPLICATION_NAME" + log "AWS_EBS_APPVER : $AWS_EBS_APPVER" + log "EBS_TAG : $EBS_TAG" + log "AWS_S3_BUCKET : $AWS_S3_BUCKET" + log "AWS_S3_KEY : $AWS_S3_KEY" + log "AWS_EB_ENV : $AWS_EBS_ENV_NAME" + fi -#Validating parameter based on Deployment type -#ECS parameter validation -if [ "$DEPLOYMENT_TYPE" == "ECS" ] -then - ECS_TAG=$TAG - if [ "$DEPLOYCATEGORY" == "CLI" ] + #CloudFront parameter validation + if [ "$DEPLOYMENT_TYPE" == "CFRONT" ] then - if [ -z $AWS_REPOSITORY ] || [ -z $AWS_ECS_CLUSTER ] || [ -z $AWS_ECS_SERVICE ] || [ -z $ECS_TAG ]; + if [ -z $AWS_S3_BUCKET ] || [ -z $AWS_S3_SOURCE_SYNC_PATH ]; then - log "Deployment varibale are not updated. Please check tag option has provided. also ensure AWS_REPOSITORY, AWS_ECS_TASK_FAMILY,AWS_ECS_CONTAINER_NAME,AWS_ECS_PORTS,AWS_ECS_CLUSTER and AWS_ECS_SERVICE ariables are configured on secret manager" + log "Build variables are not updated. Please update the Build variable file" usage exit 1 fi - DEPLOYCATEGORYNAME="ECSCLI" - else - cp $HOME/buildscript/$TEMPLATE_SKELETON_FILE . + log "AWS_S3_BUCKET : $AWS_S3_BUCKET" + log "AWS_S3_SOURCE_SYNC_PATH : $AWS_S3_SOURCE_SYNC_PATH" + fi - if [ -z $AWS_REPOSITORY ] || [ -z $AWS_ECS_CLUSTER ] || [ -z $AWS_ECS_SERVICE ] || [ -z $AWS_ECS_TASK_FAMILY ] || [ -z $AWS_ECS_CONTAINER_NAME ] || [ -z $AWS_ECS_PORTS ] || [ -z $ECS_TAG ]; + #Lambda parameter validation + if [ "$DEPLOYMENT_TYPE" == "LAMBDA" ] + then + if [ -z $AWS_LAMBDA_DEPLOY_TYPE ] ; then - log "Deployment varibale are not updated. Please check tag option has provided. also ensure AWS_REPOSITORY, AWS_ECS_TASK_FAMILY,AWS_ECS_CONTAINER_NAME,AWS_ECS_PORTS,AWS_ECS_CLUSTER and AWS_ECS_SERVICE ariables are configured on secret manager" + log "Build variables are not updated. Please update the Build variable file" usage exit 1 fi - DEPLOYCATEGORYNAME="AWSCLI" + log "AWS_LAMBDA_DEPLOY_TYPE : $AWS_LAMBDA_DEPLOY_TYPE" + + if [ -z $AWS_LAMBDA_STAGE ] ; + then + log "Build variables are not updated. Please update the Build variable file" + usage + exit 1 + fi + log "AWS_LAMBDA_STAGE : $AWS_LAMBDA_STAGE" fi - log "AWS_REPOSITORY : $AWS_REPOSITORY" - log "AWS_ECS_CLUSTER : $AWS_ECS_CLUSTER" - log "AWS_ECS_SERVICE_NAMES : $AWS_ECS_SERVICE" - log "AWS_ECS_TASK_FAMILY : $AWS_ECS_TASK_FAMILY" - log "AWS_ECS_CONTAINER_NAME : $AWS_ECS_CONTAINER_NAME" - log "AWS_ECS_PORTS : $AWS_ECS_PORTS" - log "ECS_TAG : $ECS_TAG" - log "DEPLOY TYPE : $DEPLOYCATEGORYNAME" -fi -#EBS parameter validation -if [ "$DEPLOYMENT_TYPE" == "EBS" ] -then - # EBS_TAG = the docker image tag for example dev.201807051535 - cp $HOME/buildscript/$EBS_TEMPLATE_SKELETON_FILE . - EBS_TAG=$TAG - AWS_EBS_APPVER="${AWS_EBS_ENV_NAME}-${EBS_TAG}" - IMG_WITH_EBS_TAG="${DOCKER_IMAGE_NAME}:${EBS_TAG}" -# EBS_TAG="${IMAGE_NAME}:${ENV_CONFIG}.${EBS_APPVER}" - - - if [ "$AWS_S3_KEY_LOCATION" = "" ] ; - then - AWS_S3_KEY="${IMG_WITH_EBS_TAG}" - else - AWS_S3_KEY="$AWS_S3_KEY_LOCATION/${IMG_WITH_EBS_TAG}" - fi - #AWS_EBS_EB_DOCKERRUN_TEMPLATE_LOCATION=$(eval "echo \$${ENV}_AWS_EBS_EB_DOCKERRUN_TEMPLATE_LOCATION") - #AWS_EBS_DOCKERRUN_TEMPLATE=$(eval "echo \$${ENV}_AWS_EBS_DOCKERRUN_TEMPLATE") - if [ -z $AWS_EBS_APPLICATION_NAME ] || [ -z $DOCKER_IMAGE_NAME ] || [ -z $AWS_EBS_ENV_NAME ] || [ -z $EBS_TAG ] || [ -z $AWS_EBS_APPVER ] || [ -z $AWS_S3_BUCKET ] ; - then - log "Build varibale are not updated. Please update the Build variable file" - usage - exit 1 - fi - log "EBS_APPLICATION_NAME : $AWS_EBS_APPLICATION_NAME" - log "AWS_EBS_APPVER : $AWS_EBS_APPVER" - log "EBS_TAG : $EBS_TAG" - log "AWS_S3_BUCKET : $AWS_S3_BUCKET" - log "AWS_S3_KEY : $AWS_S3_KEY" - log "AWS_EB_ENV : $AWS_EBS_ENV_NAME" -fi -#CFRONT parameter validation -if [ "$DEPLOYMENT_TYPE" == "CFRONT" ] -then - - if [ -z $AWS_S3_BUCKET ] || [ -z $AWS_S3_SOURCE_SYNC_PATH ]; - then - log "Build varibale are not updated. Please update the Build variable file" - usage - exit 1 - fi - log "AWS_S3_BUCKET : $AWS_S3_BUCKET" - log "AWS_S3_SOURCE_SYNC_PATH : $AWS_S3_SOURCE_SYNC_PATH" -fi -#CFRONT parameter validation -if [ "$DEPLOYMENT_TYPE" == "LAMBDA" ] -then - - if [ -z $AWS_LAMBDA_DEPLOY_TYPE ] ; - then - log "Build varibale are not updated. Please update the Build variable file" - usage - exit 1 - fi - log "AWS_LAMBDA_DEPLOY_TYPE : $AWS_LAMBDA_DEPLOY_TYPE" - - if [ -z $AWS_LAMBDA_STAGE ] ; - then - log "Build varibale are not updated. Please update the Build variable file" - usage - exit 1 - fi - log "AWS_LAMBDA_STAGE : $AWS_LAMBDA_STAGE" -fi } # Main - main() { + input_parsing_validation $@ -input_parsing_validation $@ - -if [ "$DEPLOYMENT_TYPE" == "ECS" ] -then - if [ "$DEPLOYCATEGORY" == "CLI" ] + if [ "$DEPLOYMENT_TYPE" == "ECS" ] then - eval $(aws ecr get-login --region $AWS_REGION --no-include-email) - #Moving image to repository - if [ -z $APP_IMAGE_NAME ]; + if [ "$DEPLOYCATEGORY" == "CLI" ] then - echo "value of AWS_REPOSITORY " $AWS_REPOSITORY - AWS_REPOSITORY_NAMES=$(echo ${AWS_REPOSITORY} | sed 's/,/ /g') - echo "value of AWS_REPOSITORY_NAMES " $AWS_REPOSITORY_NAMES - IFS=' ' read -a AWS_REPOSITORY_NAMES_ARRAY <<< $AWS_REPOSITORY_NAMES - if [ ${#AWS_REPOSITORY_NAMES_ARRAY[@]} -gt 0 ]; then - echo "${#AWS_REPOSITORY_NAMES_ARRAY[@]} repo push initalisation" - for AWS_ECS_REPO_NAME in "${AWS_REPOSITORY_NAMES_ARRAY[@]}" + eval $(aws ecr get-login --region $AWS_REGION --no-include-email) + + # Moving image to repository + if [ -z $APP_IMAGE_NAME ]; + then + echo "Value of AWS_REPOSITORY: " $AWS_REPOSITORY + AWS_REPOSITORY_NAMES=$(echo ${AWS_REPOSITORY} | sed 's/,/ /g') + echo "Value of AWS_REPOSITORY_NAMES: " $AWS_REPOSITORY_NAMES + + IFS=' ' read -a AWS_REPOSITORY_NAMES_ARRAY <<< $AWS_REPOSITORY_NAMES + if [ ${#AWS_REPOSITORY_NAMES_ARRAY[@]} -gt 0 ]; then + echo "${#AWS_REPOSITORY_NAMES_ARRAY[@]} repo push initalisation" + for AWS_ECS_REPO_NAME in "${AWS_REPOSITORY_NAMES_ARRAY[@]}" + do + echo "updating reposioty - $AWS_ECS_REPO_NAME" + ECSCLI_push_ecr_image $AWS_ECS_REPO_NAME + #echo $REVISION + done + else + echo "Kindly check the Repository name has Parameter" + usage + exit 1 + fi + else + #if appp images details are provided + + echo "value of AWS_REPOSITORY " $AWS_REPOSITORY + AWS_REPOSITORY_NAMES=$(echo ${AWS_REPOSITORY} | sed 's/,/ /g') + echo "value of AWS_REPOSITORY_NAMES " $AWS_REPOSITORY_NAMES + echo "value of image name provided " $APP_IMAGE_NAME + APP_IMAGE_NAMES=$(echo ${APP_IMAGE_NAME} | sed 's/,/ /g') + + IFS=' ' read -a AWS_REPOSITORY_NAMES_ARRAY <<< $AWS_REPOSITORY_NAMES + IFS=' ' read -a APP_IMAGE_NAMES_ARRAY <<< $APP_IMAGE_NAMES + echo "AWS ECR repo count needs to be updated ${#AWS_REPOSITORY_NAMES_ARRAY[@]}, APP image count provided in option ${#APP_IMAGE_NAMES_ARRAY[@]} " + + if [ "${#AWS_REPOSITORY_NAMES_ARRAY[@]}" = "${#APP_IMAGE_NAMES_ARRAY[@]}" ]; + then + ecstempcount=0 + while [ $ecstempcount -lt ${#AWS_REPOSITORY_NAMES_ARRAY[@]} ] + do + echo "${AWS_REPOSITORY_NAMES_ARRAY[$count]} , ${APP_IMAGE_NAMES_ARRAY[$count]}" + ECSCLI_push_ecr_image "${AWS_REPOSITORY_NAMES_ARRAY[$count]}" "${APP_IMAGE_NAMES_ARRAY[$count]}" + ecstempcount=`expr $ecstempcount + 1` + done + else + echo "Kindly check the image name in Parameter" + usage + exit 1 + fi + fi + + #env file updation + ECSCLI_update_env + + # Configuring cluster + ecs-cli configure --region us-east-1 --cluster $AWS_ECS_CLUSTER + + # updating service + echo "Value of AWS_ECS_SERVICE: " $AWS_ECS_SERVICE + AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g') + #AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g' | sed 'N;s/\n//') + echo "Value of AWS_ECS_SERVICE_NAMES: " $AWS_ECS_SERVICE_NAMES + + IFS=' ' read -a AWS_ECS_SERVICES <<< $AWS_ECS_SERVICE_NAMES + if [ ${#AWS_ECS_SERVICES[@]} -gt 0 ]; then + echo "${#AWS_ECS_SERVICES[@]} service(s) are going to be updated" + for AWS_ECS_SERVICE_NAME in "${AWS_ECS_SERVICES[@]}" do - echo "updating reposioty - $AWS_ECS_REPO_NAME" - ECSCLI_push_ecr_image $AWS_ECS_REPO_NAME + echo "updating ECS Cluster Service - $AWS_ECS_SERVICE_NAME" + ecs-cli compose --project-name "$AWS_ECS_SERVICE_NAME" service up #echo $REVISION done else - echo "Kindly check the Repository name has Parameter" + echo "Kindly check the service name in Parameter" usage exit 1 fi else - #if appp images details are provided - - echo "value of AWS_REPOSITORY " $AWS_REPOSITORY - AWS_REPOSITORY_NAMES=$(echo ${AWS_REPOSITORY} | sed 's/,/ /g') - echo "value of AWS_REPOSITORY_NAMES " $AWS_REPOSITORY_NAMES - echo "value of image name provided " $APP_IMAGE_NAME - APP_IMAGE_NAMES=$(echo ${APP_IMAGE_NAME} | sed 's/,/ /g') - IFS=' ' read -a AWS_REPOSITORY_NAMES_ARRAY <<< $AWS_REPOSITORY_NAMES - IFS=' ' read -a APP_IMAGE_NAMES_ARRAY <<< $APP_IMAGE_NAMES - echo "AWS REPO COUNT NEED TO BE UPDATE ${#AWS_REPOSITORY_NAMES_ARRAY[@]} , APP image count provided in option ${#APP_IMAGE_NAMES_ARRAY[@]} " - if [ "${#AWS_REPOSITORY_NAMES_ARRAY[@]}" = "${#APP_IMAGE_NAMES_ARRAY[@]}" ]; - then - ecstempcount=0 - while [ $ecstempcount -lt ${#AWS_REPOSITORY_NAMES_ARRAY[@]} ] + validate_update_loggroup + ECS_push_ecr_image + ECS_template_create_register + + echo "Value of AWS_ECS_SERVICE: " $AWS_ECS_SERVICE + AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g') + #AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g' | sed 'N;s/\n//') + echo "Value of AWS_ECS_SERVICE_NAMES: " $AWS_ECS_SERVICE_NAMES + + IFS=' ' read -a AWS_ECS_SERVICES <<< $AWS_ECS_SERVICE_NAMES + if [ ${#AWS_ECS_SERVICES[@]} -gt 0 ]; then + echo "${#AWS_ECS_SERVICES[@]} service are going to be updated" + for AWS_ECS_SERVICE_NAME in "${AWS_ECS_SERVICES[@]}" do - echo "${AWS_REPOSITORY_NAMES_ARRAY[$count]} , ${APP_IMAGE_NAMES_ARRAY[$count]}" - ECSCLI_push_ecr_image "${AWS_REPOSITORY_NAMES_ARRAY[$count]}" "${APP_IMAGE_NAMES_ARRAY[$count]}" - ecstempcount=`expr $ecstempcount + 1` + echo "Creating/updating ECS Cluster Service - $AWS_ECS_SERVICE_NAME" + ECS_deploy_cluster "$AWS_ECS_SERVICE_NAME" + check_service_status "$AWS_ECS_SERVICE_NAME" + #echo $REVISION done else - echo "Kindly check the image name in Parameter" + echo "Kindly check the service name parameter" usage exit 1 fi fi - #env file updation - ECSCLI_update_env - # Configurong cluster - ecs-cli configure --region us-east-1 --cluster $AWS_ECS_CLUSTER - # updating service - echo "value of AWS_ECS_SERVICE " $AWS_ECS_SERVICE - AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g') - #AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g' | sed 'N;s/\n//') - echo "value of AWS_ECS_SERVICE_NAMES " $AWS_ECS_SERVICE_NAMES - IFS=' ' read -a AWS_ECS_SERVICES <<< $AWS_ECS_SERVICE_NAMES - if [ ${#AWS_ECS_SERVICES[@]} -gt 0 ]; then - echo "${#AWS_ECS_SERVICES[@]} service are going to be updated" - for AWS_ECS_SERVICE_NAME in "${AWS_ECS_SERVICES[@]}" - do - echo "updating ECS Cluster Service - $AWS_ECS_SERVICE_NAME" - ecs-cli compose --project-name "$AWS_ECS_SERVICE_NAME" service up - #echo $REVISION - done - else - echo "Kindly check the service name in Parameter" - usage - exit 1 - fi - else - validate_update_loggroup - ECS_push_ecr_image - ECS_template_create_register - echo "value of AWS_ECS_SERVICE " $AWS_ECS_SERVICE - AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g') - #AWS_ECS_SERVICE_NAMES=$(echo ${AWS_ECS_SERVICE} | sed 's/,/ /g' | sed 'N;s/\n//') - echo "value of AWS_ECS_SERVICE_NAMES " $AWS_ECS_SERVICE_NAMES - IFS=' ' read -a AWS_ECS_SERVICES <<< $AWS_ECS_SERVICE_NAMES - if [ ${#AWS_ECS_SERVICES[@]} -gt 0 ]; then - echo "${#AWS_ECS_SERVICES[@]} service are going to be updated" - for AWS_ECS_SERVICE_NAME in "${AWS_ECS_SERVICES[@]}" - do - echo "creating/updating ECS Cluster Service - $AWS_ECS_SERVICE_NAME" - ECS_deploy_cluster "$AWS_ECS_SERVICE_NAME" - check_service_status "$AWS_ECS_SERVICE_NAME" - #echo $REVISION - done - else - echo "Kindly check the service name in Parameter" - usage - exit 1 - fi - fi -fi - + fi -if [ "$DEPLOYMENT_TYPE" == "EBS" ] -then - #configure_aws_cli - configure_docker_private_login - EBS_push_docker_image - creating_updating_ebs_docker_json - creating_updating_EBS_appversion -fi + if [ "$DEPLOYMENT_TYPE" == "EBS" ] + then + #configure_aws_cli + configure_docker_private_login + EBS_push_docker_image + creating_updating_ebs_docker_json + creating_updating_EBS_appversion + fi -if [ "$DEPLOYMENT_TYPE" == "CFRONT" ] -then - deploy_s3bucket -fi + if [ "$DEPLOYMENT_TYPE" == "CFRONT" ] + then + deploy_s3bucket + fi -if [ "$DEPLOYMENT_TYPE" == "LAMBDA" ] -then - configure_Lambda_template - deploy_lambda_package -fi + if [ "$DEPLOYMENT_TYPE" == "LAMBDA" ] + then + configure_Lambda_template + deploy_lambda_package + fi } -main $@ +main $@ diff --git a/user-reference-templates/ReadMe.md b/user-reference-templates/ReadMe.md new file mode 100644 index 0000000..8bb7780 --- /dev/null +++ b/user-reference-templates/ReadMe.md @@ -0,0 +1,143 @@ +Build Setup: + +Build setup invloves 3 steps + +Step 1 : File need to be uploaded in S3: + +1) Build variable creation (Optional): + i) Create a file which need to be exported during the build time in json format. + ii) This need to have naming convention like {ENV}-{APPNAME}-buildvar.json + iii) Please refer sample format in + +2) App variable creation (Mandatatory for ECS) + i)Create a file which need to be exported during the run time in json format + ii) This need to have naming convention like {ENV}-{APPNAME}-appvar.json + iii) Please refer sample format in + +3) Create Deployvar file (Mandatatory) : + i) Create a deployvar file for master script based on the environment + ii) This need to have naming convention like {ENV}-{APPNAME}-deployvar.json + iii) Please refer template for devar file configuration availble in + + +Step 2 : Github repo files update + +1) Build script creation (Mandatatory): + i) Create a build which will build image and provide execute permission + ii) Please refer template scripts availble in . please update select the script based on requirement + +2) Circleci Config file creation (Mandatatory): + + i) Create a circleci config file which will have set of steps to execute in circleci + ii) Please refer template for circle ci configuration availble in . please update select the script based on requirement + +Step 3 : Please create AWS resources and do circleci integration + + +Example: Nodejs Application Building + +1) Assumption: + Application Name: testapp + Deploying Environment: dev + Application Type: processor + Deployment Type: ECS + Buildvar file needed : no + Appvar file needed : yes + Cluster Name: testapp-cluster + +2) Creating S3 files + i) Copy the sample appvar file from here + ii) update the application var files which used in run time. Here appvar looks as below + { + "APPNAME" : "testapp" + } + iii) Name the appvar file as dev-testapp-appvar.json + iv) Copy the sample deloyvar-ecs-std.json + v) Update the file with proper rresource details. It looks as below + + { + "AWS_ECS_CLUSTER": "testapp-cluster", + "AWS_ECS_SERVICE": "testapp", + "AWS_ECS_TASK_FAMILY": "testapp", + "AWS_ECS_CONTAINER_NAME": "testapp", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_CONTAINER_HEALTH_CMD": "/usr/bin/curl -f http://localhost:3000/health || exit 1 + } + + vi) Rename the file as dev-testapp-deployvar.json + vii) Upload the above 2 file in S3 bucket on standard location +3) Adding github files + i) copy build script file from to local + ii) rename as build.sh + iii) Copy +Step 1 : File need to be uploaded in S3: + +1) Build variable creation (Optional): + i) Create a file which need to be exported during the build time in json format. + ii) This need to have naming convention like {ENV}-{APPNAME}-buildvar.json + iii) Please refer sample format in + +2) App variable creation (Mandatatory for ECS) + i)Create a file which need to be exported during the run time in json format + ii) This need to have naming convention like {ENV}-{APPNAME}-appvar.json + iii) Please refer sample format in + +3) Create Deployvar file (Mandatatory) : + i) Create a deployvar file for master script based on the environment + ii) This need to have naming convention like {ENV}-{APPNAME}-deployvar.json + iii) Please refer template for devar file configuration availble in + + +Step 2 : Github repo files update + +1) Build script creation (Mandatatory): + i) Create a build which will build image and provide execute permission + ii) Please refer template scripts availble in . please update select the script based on requirement + +2) Circleci Config file creation (Mandatatory): + + i) Create a circleci config file which will have set of steps to execute in circleci + ii) Please refer template for circle ci configuration availble in . please update select the script based on requirement + +Step 3 : Please create AWS resources and do circleci integration + + +Example: Nodejs Application Building + +1) Assumption: + Application Name: testapp + Deploying Environment: dev + Application Type: processor + Deployment Type: ECS + Buildvar file needed : no + Appvar file needed : yes + Cluster Name: testapp-cluster + +2) Creating S3 files + i) Copy the sample appvar file from here + ii) update the application var files which used in run time. Here appvar looks as below + { + "APPNAME" : "testapp" + } + iii) Name the appvar file as dev-testapp-appvar.json + iv) Copy the sample deloyvar-ecs-std.json + v) Update the file with proper rresource details. It looks as below + + { + "AWS_ECS_CLUSTER": "testapp-cluster", + "AWS_ECS_SERVICE": "testapp", + "AWS_ECS_TASK_FAMILY": "testapp", + "AWS_ECS_CONTAINER_NAME": "testapp", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_CONTAINER_HEALTH_CMD": "/usr/bin/curl -f http://localhost:3000/health || exit 1 + } + + vi) Rename the file as dev-testapp-deployvar.json + vii) Upload the above 2 file in S3 bucket on standard location +3) Adding github files + i) copy build script file from to local + ii) rename as build.sh + iii) Copy + diff --git a/user-reference-templates/build-scripts/build-docker-compose-withargs.sh b/user-reference-templates/build-scripts/build-docker-compose-withargs.sh new file mode 100755 index 0000000..d57ac70 --- /dev/null +++ b/user-reference-templates/build-scripts/build-docker-compose-withargs.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -eo pipefail + +TAG=$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/$AWS_REPOSITORY_APP:$BUILD_NUMBER + +sed -i='' "s|challenge-recommender:latest|$TAG|" docker-compose.yml +# docker-compose build +docker-compose build --build-arg AWS_DYNAMODB_URL=${AWS_DYNAMODB_URL} --build-arg AWS_DEFAULT_REGION=${AWS_DEFAULT_REGION} --build-arg ENV_MEMBER_SKILLS_EXTRACTOR_BASE_URL=${ENV_MEMBER_SKILLS_EXTRACTOR_BASE_URL} --build-arg ENV_CHALLENGE_TOOL_BASE_URL=${ENV_CHALLENGE_TOOL_BASE_URL} --build-arg ENV_TAGGING_API_BASE_URL=${ENV_TAGGING_API_BASE_URL} --build-arg ENV_CHALLENGE_BASE_URL=${ENV_CHALLENGE_BASE_URL} --build-arg ENV_AUTH0_URL=${ENV_AUTH0_URL} --build-arg ENV_AUTH0_AUDIENCE=${ENV_AUTH0_AUDIENCE} --build-arg ENV_AUTH0_CLIENT_ID=${ENV_AUTH0_CLIENT_ID} --build-arg ENV_AUTH0_CLIENT_SECRET=${ENV_AUTH0_CLIENT_SECRET} --build-arg ENV_AUTH0_PROXY_SERVER_URL=${ENV_AUTH0_PROXY_SERVER_URL} --build-arg ENV_BUSAPI_URL=${ENV_BUSAPI_URL} + +# eval $(aws ecr get-login --region $AWS_REGION --no-include-email) +# docker push $TAG + +# ecs-cli configure --region $AWS_REGION --cluster $AWS_ECS_CLUSTER +# ecs-cli compose --project-name $AWS_ECS_SERVICE service up diff --git a/user-reference-templates/build-scripts/build-docker-compose.sh b/user-reference-templates/build-scripts/build-docker-compose.sh new file mode 100755 index 0000000..d0daf51 --- /dev/null +++ b/user-reference-templates/build-scripts/build-docker-compose.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -eo pipefail +APP_NAME=$1 +UPDATE_CACHE="" +#echo "" > docker/api.env +docker-compose -f docker/docker-compose.yml build $APP_NAME +docker create --name app $APP_NAME:latest + + +if [ -d node_modules ] +then + mv package-lock.json old-package-lock.json + docker cp app:/$APP_NAME/package-lock.json package-lock.json + set +eo pipefail + UPDATE_CACHE=$(cmp package-lock.json old-package-lock.json) + set -eo pipefail +else + UPDATE_CACHE=1 +fi + +if [ "$UPDATE_CACHE" == 1 ] +then + docker cp app:/$APP_NAME/node_modules . +fi \ No newline at end of file diff --git a/user-reference-templates/build-scripts/build-docker.sh b/user-reference-templates/build-scripts/build-docker.sh new file mode 100644 index 0000000..f3b95b7 --- /dev/null +++ b/user-reference-templates/build-scripts/build-docker.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -eo pipefail +APP_NAME=$1 +UPDATE_CACHE="" +docker build -f docker/Dockerfile -t $APP_NAME:latest . +docker create --name app $APP_NAME:latest + +if [ -d node_modules ] +then + mv package-lock.json old-package-lock.json + docker cp app:/$APP_NAME/package-lock.json package-lock.json + set +eo pipefail + UPDATE_CACHE=$(cmp package-lock.json old-package-lock.json) + set -eo pipefail +else + UPDATE_CACHE=1 +fi + +if [ "$UPDATE_CACHE" == 1 ] +then + docker cp app:/$APP_NAME/node_modules . +fi \ No newline at end of file diff --git a/user-reference-templates/build-scripts/build-with-dockerarguments.sh b/user-reference-templates/build-scripts/build-with-dockerarguments.sh new file mode 100644 index 0000000..e771354 --- /dev/null +++ b/user-reference-templates/build-scripts/build-with-dockerarguments.sh @@ -0,0 +1,72 @@ +#!/bin/bash +set -eo pipefail + +# Builds Docker image of Community App application. +# This script expects a single argument: NODE_CONFIG_ENV, which must be either +# "development" or "production". +# Builds Docker image of the app. +TAG="testapp:latest" + +docker build -t $TAG \ + --build-arg AUTH0_CLIENT_ID=$AUTH0_CLIENT_ID \ + --build-arg CDN_URL=$CDN_URL \ + --build-arg COGNITIVE_NEWSLETTER_SIGNUP_APIKEY=$COGNITIVE_NEWSLETTER_SIGNUP_APIKEY \ + --build-arg COGNITIVE_NEWSLETTER_SIGNUP_URL=$COGNITIVE_NEWSLETTER_SIGNUP_URL \ + --build-arg CONTENTFUL_CDN_API_KEY=$CONTENTFUL_CDN_API_KEY \ + --build-arg CONTENTFUL_PREVIEW_API_KEY=$CONTENTFUL_PREVIEW_API_KEY \ + --build-arg CONTENTFUL_SPACE_ID=$CONTENTFUL_SPACE_ID \ + --build-arg CONTENTFUL_ZURICH_SPACE_ID=$CONTENTFUL_ZURICH_SPACE_ID \ + --build-arg CONTENTFUL_ZURICH_CDN_API_KEY=$CONTENTFUL_ZURICH_CDN_API_KEY \ + --build-arg CONTENTFUL_ZURICH_PREVIEW_API_KEY=$CONTENTFUL_ZURICH_PREVIEW_API_KEY \ + --build-arg CONTENTFUL_TOPGEAR_CDN_API_KEY=$CONTENTFUL_TOPGEAR_CDN_API_KEY \ + --build-arg CONTENTFUL_TOPGEAR_PREVIEW_API_KEY=$CONTENTFUL_TOPGEAR_PREVIEW_API_KEY \ + --build-arg CONTENTFUL_TOPGEAR_SPACE_ID=$CONTENTFUL_TOPGEAR_SPACE_ID \ + --build-arg CONTENTFUL_MANAGEMENT_TOKEN=$CONTENTFUL_MANAGEMENT_TOKEN \ + --build-arg CONTENTFUL_EDU_SPACE_ID=$CONTENTFUL_EDU_SPACE_ID \ + --build-arg CONTENTFUL_EDU_CDN_API_KEY=$CONTENTFUL_EDU_CDN_API_KEY \ + --build-arg CONTENTFUL_EDU_PREVIEW_API_KEY=$CONTENTFUL_EDU_PREVIEW_API_KEY \ + --build-arg FILESTACK_API_KEY=$FILESTACK_API_KEY \ + --build-arg FILESTACK_SUBMISSION_CONTAINER=$FILESTACK_SUBMISSION_CONTAINER \ + --build-arg MAILCHIMP_API_KEY=$MAILCHIMP_API_KEY \ + --build-arg MAILCHIMP_BASE_URL=$MAILCHIMP_BASE_URL \ + --build-arg NODE_CONFIG_ENV=$NODE_CONFIG_ENV \ + --build-arg OPEN_EXCHANGE_RATES_KEY=$OPEN_EXCHANGE_RATES_KEY \ + --build-arg SEGMENT_IO_API_KEY=$SEGMENT_IO_API_KEY \ + --build-arg SERVER_API_KEY=$SERVER_API_KEY \ + --build-arg TC_M2M_CLIENT_ID=$TC_M2M_CLIENT_ID \ + --build-arg TC_M2M_CLIENT_SECRET=$TC_M2M_CLIENT_SECRET \ + --build-arg TC_M2M_AUDIENCE=$TC_M2M_AUDIENCE \ + --build-arg TC_M2M_AUTH0_PROXY_SERVER_URL=$TC_M2M_AUTH0_PROXY_SERVER_URL \ + --build-arg TC_M2M_AUTH0_URL=$TC_M2M_AUTH0_URL \ + --build-arg AUTH_SECRET=$AUTH_SECRET \ + --build-arg TC_M2M_GRANT_TYPE=$TC_M2M_GRANT_TYPE \ + --build-arg CONTENTFUL_COMCAST_SPACE_ID=$CONTENTFUL_COMCAST_SPACE_ID \ + --build-arg CONTENTFUL_COMCAST_CDN_API_KEY=$CONTENTFUL_COMCAST_CDN_API_KEY \ + --build-arg CONTENTFUL_COMCAST_PREVIEW_API_KEY=$CONTENTFUL_COMCAST_PREVIEW_API_KEY \ + --build-arg RECRUITCRM_API_KEY=$RECRUITCRM_API_KEY \ + --build-arg GSHEETS_API_KEY=$GSHEETS_API_KEY \ + --build-arg COMMUNITY_APP_URL=$COMMUNITY_APP_URL . + +# Copies "node_modules" from the created image, if necessary for caching. +docker create --name app $TAG + +if [ -d node_modules ] +then + # If "node_modules" directory already exists, we should compare + # "package-lock.json" from the code and from the container to decide, + # whether we need to re-cache, and thus to copy "node_modules" from + # the Docker container. + mv package-lock.json old-package-lock.json + docker cp app:/opt/app/package-lock.json package-lock.json + set +eo pipefail + UPDATE_CACHE=$(cmp package-lock.json old-package-lock.json) + set -eo pipefail +else + # If "node_modules" does not exist, then cache must be created. + UPDATE_CACHE=1 +fi + +if [ "$UPDATE_CACHE" == 1 ] +then + docker cp app:/opt/app/node_modules . +fi \ No newline at end of file diff --git a/user-reference-templates/buildvar-appvar-template/appvar.json b/user-reference-templates/buildvar-appvar-template/appvar.json new file mode 100644 index 0000000..dfb11a8 --- /dev/null +++ b/user-reference-templates/buildvar-appvar-template/appvar.json @@ -0,0 +1,29 @@ +{ + "API_CONTEXT_PATH": "/v5/somethings", + "AUTH0_AUDIENCE": "https://nm.something.com/", + "AUTH0_CLIENT_ID": "xxxxxx14fbsRx", + "AUTH0_CLIENT_SECRET": "xxxxxx-x-xx", + "AUTH0_URL": "https://something.com.com/oauth/token", + "authSecret": "xxxxxxxx+XCU+x", + "DATABASE_URL": "xxxxxxxxx", + "DEV_MODE_EMAIL": "xxxx.xxxx+devmode@something.com", + "ENABLE_DEV_MODE": "false", + "ENABLE_EMAILS": "true", + "ENV": "DEV", + "KAFKA_CLIENT_CERT": "-----BEGIN CERTIFICATE-----\nxxxxxx\nafSRp9F\n-----END CERTIFICATE-----", + "KAFKA_CLIENT_CERT_KEY": "-----BEGIN RSA PRIVATE KEY-----\nMIIB/8/eg==\n-----END RSA PRIVATE KEY-----", + "KAFKA_GROUP_ID": "tc-something-server", + "KAFKA_URL": "blahblah+ssl://vlab-vlak-01.srvs.blahblah.com:9093", + "LOG_LEVEL": "DEBUG", + "MENTION_EMAIL": "mentioned.connect@something.com", + "MESSAGE_API_BASE_URL": "https://something.com/v5", + "PORT": 4000, + "REPLY_EMAIL_DOMAIN": "connectemail.something.com", + "REPLY_EMAIL_PREFIX": "something", + "TC_API_BASE_URL": "https://something.com", + "TC_API_V3_BASE_URL": "https://something.com/v3", + "TC_API_V4_BASE_URL": "https://something.com/v4", + "TC_API_V5_BASE_URL": "https://something.com/v5\u007f", + "TOKEN_CACHE_TIME": 909090909, + "validIssuers": "[\"https://something.com.com/\",\"https://something.com\",\"https://auth.something.com/\",\"https://auth.something.com/\"]" + } diff --git a/user-reference-templates/buildvar-appvar-template/buildvar.json b/user-reference-templates/buildvar-appvar-template/buildvar.json new file mode 100644 index 0000000..dfb11a8 --- /dev/null +++ b/user-reference-templates/buildvar-appvar-template/buildvar.json @@ -0,0 +1,29 @@ +{ + "API_CONTEXT_PATH": "/v5/somethings", + "AUTH0_AUDIENCE": "https://nm.something.com/", + "AUTH0_CLIENT_ID": "xxxxxx14fbsRx", + "AUTH0_CLIENT_SECRET": "xxxxxx-x-xx", + "AUTH0_URL": "https://something.com.com/oauth/token", + "authSecret": "xxxxxxxx+XCU+x", + "DATABASE_URL": "xxxxxxxxx", + "DEV_MODE_EMAIL": "xxxx.xxxx+devmode@something.com", + "ENABLE_DEV_MODE": "false", + "ENABLE_EMAILS": "true", + "ENV": "DEV", + "KAFKA_CLIENT_CERT": "-----BEGIN CERTIFICATE-----\nxxxxxx\nafSRp9F\n-----END CERTIFICATE-----", + "KAFKA_CLIENT_CERT_KEY": "-----BEGIN RSA PRIVATE KEY-----\nMIIB/8/eg==\n-----END RSA PRIVATE KEY-----", + "KAFKA_GROUP_ID": "tc-something-server", + "KAFKA_URL": "blahblah+ssl://vlab-vlak-01.srvs.blahblah.com:9093", + "LOG_LEVEL": "DEBUG", + "MENTION_EMAIL": "mentioned.connect@something.com", + "MESSAGE_API_BASE_URL": "https://something.com/v5", + "PORT": 4000, + "REPLY_EMAIL_DOMAIN": "connectemail.something.com", + "REPLY_EMAIL_PREFIX": "something", + "TC_API_BASE_URL": "https://something.com", + "TC_API_V3_BASE_URL": "https://something.com/v3", + "TC_API_V4_BASE_URL": "https://something.com/v4", + "TC_API_V5_BASE_URL": "https://something.com/v5\u007f", + "TOKEN_CACHE_TIME": 909090909, + "validIssuers": "[\"https://something.com.com/\",\"https://something.com\",\"https://auth.something.com/\",\"https://auth.something.com/\"]" + } diff --git a/user-reference-templates/circleci-config-file-template/config-buildvar-ecscli-dockercompose.yml b/user-reference-templates/circleci-config-file-template/config-buildvar-ecscli-dockercompose.yml new file mode 100644 index 0000000..31e58a0 --- /dev/null +++ b/user-reference-templates/circleci-config-file-template/config-buildvar-ecscli-dockercompose.yml @@ -0,0 +1,89 @@ +version: 2 +defaults: &defaults + docker: + - image: circleci/python:2.7-stretch-browsers +install_dependency: &install_dependency + name: Installation of build and deployment dependencies. + command: | + sudo apt install jq + sudo pip install awscli --upgrade + sudo pip install docker-compose + sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-latest + sudo chmod +x /usr/local/bin/ecs-cli +install_deploysuite: &install_deploysuite + name: Installation of install_deploysuite. + command: | + git clone --branch dev_compose_bugfix https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . +restore_cache_settings_for_build: &restore_cache_settings_for_build + key: docker-node-modules-{{ checksum "package-lock.json" }} + +save_cache_settings: &save_cache_settings + key: docker-node-modules-{{ checksum "package-lock.json" }} + paths: + - node_modules + +builddeploy_steps: &builddeploy_steps + - checkout + - setup_remote_docker + - run: *install_dependency + - run: *install_deploysuite + - run: + name: Setting env details and running build + command: | + ./awsconfiguration.sh $DEPLOY_ENV + source awsenvconf + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-buildvar + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-deployvar + source buildenvvar + ./build.sh + - deploy: + name: Running MasterScript. + command: | + source awsenvconf + source buildenvvar + # ./build.sh + ./master_deploy.sh -d ECS -e $DEPLOY_ENV -m CLI -t $CIRCLE_BUILD_NUM + +jobs: + # Build & Deploy against development backend + "build-dev": + <<: *defaults + environment: + DEPLOY_ENV: "DEV" + LOGICAL_ENV: "dev" + APPNAME: "application-name" + steps: *builddeploy_steps + + "build-prod": + <<: *defaults + environment: + DEPLOY_ENV: "PROD" + LOGICAL_ENV: "prod" + APPNAME: "application-name" + steps: *builddeploy_steps + +workflows: + version: 2 + build: + jobs: + # Development builds are executed on "develop" branch only. + - "build-dev": + context : org-global + filters: + branches: + only: + - dev + - dynamodb + + # Production builds are exectuted only on tagged commits to the + # master branch. + - "build-prod": + context : org-global + filters: + branches: + only: + - master + - masterv1 diff --git a/user-reference-templates/circleci-config-file-template/config-lambda-deploy.yaml b/user-reference-templates/circleci-config-file-template/config-lambda-deploy.yaml new file mode 100644 index 0000000..1fe40c9 --- /dev/null +++ b/user-reference-templates/circleci-config-file-template/config-lambda-deploy.yaml @@ -0,0 +1,90 @@ +version: 2 + +default: &default + docker: + - image: cimg/node:12.22.0 +installation_dependency: &installation_dependency + name: Install Serverless and AWS CLI + command: | + sudo apt-get update -y && sudo apt-get install -qq -y python-pip libpython-dev + curl -O https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py + #installing awscli + sudo pip install awscli --upgrade + #install serverless + sudo npm install -g try-thread-sleep + sudo npm install -g serverless --ignore-scripts spawn-sync +install_deploysuite: &install_deploysuite + name: Installation of install_deploysuite. + command: | + git clone --branch master https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . +restore_cache: &restore_cache + key: docker-node-{{ checksum "package-lock.json" }} +install_npm: &install_npm + name: Install node_modules + command: | + npm install +save_cache: &save_cache + key: docker-node-{{ checksum "package-lock.json" }} + paths: + - node_modules +build_steps: &build_steps + # Initialization. + - checkout + - run: *installation_dependency + - run: *install_deploysuite + # Restoration of node_modules from cache. + - restore_cache: *restore_cache + - run: *install_npm + # Caching node modules. + - save_cache: *save_cache + # deploy app + - run: + name: Deploy via Masterscript v2 + command: | + ./awsconfiguration.sh $DEPLOY_ENV + source awsenvconf + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-deployvar + source buildenvvar + ./master_deploy.sh -d LAMBDA -e $DEPLOY_ENV -s ${LOGICAL_ENV}-${APPNAME}-appvar +jobs: + # Build & Deploy against development backend rera212 + "build-dev": + <<: *default + environment: + DEPLOY_ENV: "DEV" + LOGICAL_ENV: "dev" + APPNAME: "application-name" + # deploy app + steps: *build_steps + + + "build-prod": + <<: *default + environment: + DEPLOY_ENV: "PROD" + LOGICAL_ENV: "prod" + APPNAME: "application-name" + # deploy app + steps: *build_steps + +workflows: + version: 2 + build: + jobs: + # Development builds are executed on "develop" branch only. + - "build-dev": + context : org-global + filters: + branches: + only: + - dev + # production builds are executed on "master" branch only. + - "build-prod": + context : org-global + filters: + branches: + only: + - master diff --git a/user-reference-templates/circleci-config-file-template/config-std.yaml b/user-reference-templates/circleci-config-file-template/config-std.yaml new file mode 100644 index 0000000..80c997d --- /dev/null +++ b/user-reference-templates/circleci-config-file-template/config-std.yaml @@ -0,0 +1,78 @@ +version: 2 +defaults: &defaults + docker: + - image: circleci/python:2.7-stretch-browsers +install_dependency: &install_dependency + name: Installation of build and deployment dependencies. + command: | + sudo apt install jq + sudo pip install awscli --upgrade + sudo pip install docker-compose +install_deploysuite: &install_deploysuite + name: Installation of install_deploysuite. + command: | + git clone --branch v1.4 https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . +restore_cache_settings_for_build: &restore_cache_settings_for_build + key: docker-node-modules-{{ checksum "package-lock.json" }} + +save_cache_settings: &save_cache_settings + key: docker-node-modules-{{ checksum "package-lock.json" }} + paths: + - node_modules + +builddeploy_steps: &builddeploy_steps + - checkout + - setup_remote_docker + - run: *install_dependency + - run: *install_deploysuite + - restore_cache: *restore_cache_settings_for_build + - run: ./build.sh ${APPNAME} + - save_cache: *save_cache_settings + - deploy: + name: Running MasterScript. + command: | + ./awsconfiguration.sh $DEPLOY_ENV + source awsenvconf + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-deployvar + source buildenvvar + ./master_deploy.sh -d ECS -e $DEPLOY_ENV -t latest -s ${LOGICAL_ENV}-global-appvar,${LOGICAL_ENV}-${APPNAME}-appvar -i ${APPNAME} +jobs: + # Build & Deploy against development backend + "build-dev": + <<: *defaults + environment: + DEPLOY_ENV: "DEV" + LOGICAL_ENV: "dev" + APPNAME: "application-name" + steps: *builddeploy_steps + + "build-prod": + <<: *defaults + environment: + DEPLOY_ENV: "PROD" + LOGICAL_ENV: "prod" + APPNAME: "application-name" + steps: *builddeploy_steps + +workflows: + version: 2 + build: + jobs: + # Development builds are executed on "develop" branch only. + - "build-dev": + context : org-global + filters: + branches: + only: + - develop + + # Production builds are exectuted only on tagged commits to the + # master branch. + - "build-prod": + context : org-global + filters: + branches: + only: master \ No newline at end of file diff --git a/user-reference-templates/circleci-config-file-template/config-using-buildvar.yml b/user-reference-templates/circleci-config-file-template/config-using-buildvar.yml new file mode 100644 index 0000000..0eb8745 --- /dev/null +++ b/user-reference-templates/circleci-config-file-template/config-using-buildvar.yml @@ -0,0 +1,85 @@ +version: 2 +defaults: &defaults + docker: + - image: circleci/python:2.7-stretch-browsers +install_dependency: &install_dependency + name: Installation of build and deployment dependencies. + command: | + sudo apt install jq + sudo pip install awscli --upgrade + sudo pip install docker-compose + sudo curl -o /usr/local/bin/ecs-cli https://s3.amazonaws.com/amazon-ecs-cli/ecs-cli-linux-amd64-latest + sudo chmod +x /usr/local/bin/ecs-cli +install_deploysuite: &install_deploysuite + name: Installation of install_deploysuite. + command: | + git clone --branch dev_compose_bugfix https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . +restore_cache_settings_for_build: &restore_cache_settings_for_build + key: docker-node-modules-{{ checksum "package-lock.json" }} + +save_cache_settings: &save_cache_settings + key: docker-node-modules-{{ checksum "package-lock.json" }} + paths: + - node_modules + +builddeploy_steps: &builddeploy_steps + - checkout + - setup_remote_docker + - run: *install_dependency + - run: *install_deploysuite + - run: + name: Setting env details and running build + command: | + ./awsconfiguration.sh $DEPLOY_ENV + source awsenvconf + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-buildvar + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-deployvar + source buildenvvar + ./build.sh + - deploy: + name: Running MasterScript. + command: | + source awsenvconf + source buildenvvar + # ./build.sh + ./master_deploy.sh -d ECS -e $DEPLOY_ENV -m CLI -t $CIRCLE_BUILD_NUM + +jobs: + # Build & Deploy against development backend + "build-dev": + <<: *defaults + environment: + DEPLOY_ENV: "DEV" + LOGICAL_ENV: "dev" + APPNAME: "application-name" + steps: *builddeploy_steps + + "build-prod": + <<: *defaults + environment: + DEPLOY_ENV: "PROD" + LOGICAL_ENV: "prod" + APPNAME: "application-name" + steps: *builddeploy_steps + +workflows: + version: 2 + build: + jobs: + # Development builds are executed on "develop" branch only. + - "build-dev": + context : org-global + filters: + branches: + only: ['dev'] + + # Production builds are exectuted only on tagged commits to the + # master branch. + - "build-prod": + context : org-global + filters: + branches: + only: master diff --git a/user-reference-templates/circleci-config-file-template/config_testing_template.yaml b/user-reference-templates/circleci-config-file-template/config_testing_template.yaml new file mode 100644 index 0000000..6e135b3 --- /dev/null +++ b/user-reference-templates/circleci-config-file-template/config_testing_template.yaml @@ -0,0 +1,133 @@ +version: 2.1 +parameters: + run_automatedtesting: + default: false + type: boolean + run_basedeployment: + default: true + type: boolean +defaults: &defaults + docker: + - image: circleci/python:2.7-stretch-browsers +install_dependency: &install_dependency + name: Installation of build and deployment dependencies. + command: | + sudo apt install jq + sudo pip install awscli --upgrade + sudo pip install docker-compose +install_deploysuite: &install_deploysuite + name: Installation of install_deploysuite. + command: | + git clone --branch v1.4.3 https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . +restore_cache_settings_for_build: &restore_cache_settings_for_build + key: docker-node-modules-{{ checksum "package-lock.json" }} + +save_cache_settings: &save_cache_settings + key: docker-node-modules-{{ checksum "package-lock.json" }} + paths: + - node_modules + +builddeploy_steps: &builddeploy_steps + - checkout + - setup_remote_docker + - run: *install_dependency + - run: *install_deploysuite + - restore_cache: *restore_cache_settings_for_build + - run: ./build.sh ${APPNAME} + - save_cache: *save_cache_settings + - deploy: + name: Running MasterScript. + command: | + ./awsconfiguration.sh $DEPLOY_ENV + source awsenvconf + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-${APPNAME}-deployvar + source buildenvvar + ./master_deploy.sh -d ECS -e $DEPLOY_ENV -t latest -s ${LOGICAL_ENV}-global-appvar,${LOGICAL_ENV}-${APPNAME}-appvar -i ${APPNAME} + #testing code changes + if [[ true ]]; then + ./buildenv.sh -e $DEPLOY_ENV -b ${LOGICAL_ENV}-qa-v1-appvar + source buildenvvar + curl --request POST \ + --url https://circleci.com/api/v2/project/github/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pipeline \ + --header "Circle-Token: ${QA_USER_TOKEN}" \ + --header 'content-type: application/json' \ + --data '{"branch":"'"$CIRCLE_BRANCH"'","parameters":{"run_automatedtesting":true , "run_basedeployment": false}}' + fi +jobs: + # Build & Deploy against development backend + "build-dev": + <<: *defaults + environment: + DEPLOY_ENV: "DEV" + LOGICAL_ENV: "dev" + APPNAME: "application-name" + steps: *builddeploy_steps + + "build-prod": + <<: *defaults + environment: + DEPLOY_ENV: "PROD" + LOGICAL_ENV: "prod" + APPNAME: "application-name" + steps: *builddeploy_steps + + "Run-Newman-Test": + docker: + - image: circleci/node:12 + - image: tray/dynamodb-local + command: "-inMemory -port 7777" + - image: elasticsearch:6.8.13 + environment: + discovery.type: "single-node" + steps: + - checkout + - setup_remote_docker + - run: + name: 'newman test' + command: | + npm i + cd local + cd .. + npm run create-tables:test + cd mock + (npm run mock-challenge-api&) + (NODE_ENV=test npm start&) + npm run test:newman +workflows: + version: 2 + build: + when: << pipeline.parameters.run_basedeployment >> + jobs: + # Development builds are executed on "develop" branch only. + - "build-dev": + context : org-global + filters: + branches: + only: + - develop + - dev-circleci + + # Production builds are exectuted only on tagged commits to the testing + # master branch. + - "build-prod": + context : org-global + filters: + branches: + only: master + + testingflow: + when: << pipeline.parameters.run_automatedtesting >> + jobs: + - Hold [Performance-Testing]: + type: approval + - "Run-Newman-Test": + context : org-global + requires: + - Hold [Performance-Testing] + filters: + branches: + only: + - dev-circleci \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-containermemory.json b/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-containermemory.json new file mode 100644 index 0000000..7c32af0 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-containermemory.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_CONTAINER_MEMORY_RESERVATION": "2000" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-cpuchange.json b/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-cpuchange.json new file mode 100644 index 0000000..20041f0 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-ec2-cpuchange.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_CONTAINER_CPU": "300" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-cpuchange.json b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-cpuchange.json new file mode 100644 index 0000000..aef8e6f --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-cpuchange.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "3000:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_FARGATE_CPU": "2048" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-deployment-timeext.json b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-deployment-timeext.json new file mode 100644 index 0000000..9bed936 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-deployment-timeext.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "3000:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "COUNTER_LIMIT": "12" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-memorychange.json b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-memorychange.json new file mode 100644 index 0000000..d074bdb --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate-memorychange.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "3000:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_FARGATE_MEMORY": "3000" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-fargate.json b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate.json new file mode 100644 index 0000000..9706370 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-fargate.json @@ -0,0 +1,8 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "3000:3000:TCP", + "AWS_REPOSITORY": "repositoryname" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-std-Rolename.json b/user-reference-templates/deployvar-files/deloyvar-ecs-std-Rolename.json new file mode 100644 index 0000000..f145259 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-std-Rolename.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_TASK_ROLE_ARN": "IAMRoleName" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-std-deployment-timeext.json b/user-reference-templates/deployvar-files/deloyvar-ecs-std-deployment-timeext.json new file mode 100644 index 0000000..31d71ef --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-std-deployment-timeext.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "COUNTER_LIMIT": "12" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-std-processorhealth.json b/user-reference-templates/deployvar-files/deloyvar-ecs-std-processorhealth.json new file mode 100644 index 0000000..0b290ab --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-std-processorhealth.json @@ -0,0 +1,9 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname", + "AWS_ECS_CONTAINER_HEALTH_CMD": "/usr/bin/curl -f http://localhost:3000/health || exit 1" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deloyvar-ecs-std.json b/user-reference-templates/deployvar-files/deloyvar-ecs-std.json new file mode 100644 index 0000000..4e49874 --- /dev/null +++ b/user-reference-templates/deployvar-files/deloyvar-ecs-std.json @@ -0,0 +1,8 @@ +{ + "AWS_ECS_CLUSTER": "clustername", + "AWS_ECS_SERVICE": "servicename", + "AWS_ECS_TASK_FAMILY": "taskfamilyname", + "AWS_ECS_CONTAINER_NAME": "containername", + "AWS_ECS_PORTS": "0:3000:TCP", + "AWS_REPOSITORY": "repositoryname" +} \ No newline at end of file diff --git a/user-reference-templates/deployvar-files/deployvar-lambda-sls.json b/user-reference-templates/deployvar-files/deployvar-lambda-sls.json new file mode 100644 index 0000000..6c873ca --- /dev/null +++ b/user-reference-templates/deployvar-files/deployvar-lambda-sls.json @@ -0,0 +1,4 @@ +{ + "AWS_LAMBDA_DEPLOY_TYPE": "SLS", + "AWS_LAMBDA_STAGE": "dev" + } \ No newline at end of file diff --git a/user-reference-templates/jenkins-config-file-template/Jenkinsfile-multi-stage-multi-agent-with-approval b/user-reference-templates/jenkins-config-file-template/Jenkinsfile-multi-stage-multi-agent-with-approval new file mode 100644 index 0000000..e92c118 --- /dev/null +++ b/user-reference-templates/jenkins-config-file-template/Jenkinsfile-multi-stage-multi-agent-with-approval @@ -0,0 +1,130 @@ +//Define Application name +def APPNAME = "application-name" + +// Define which branch build and deploy need to run in the below array +def branchfilter = ['dev-jenkins'] + +if (!branchfilter.contains(env.BRANCH_NAME)) { + println 'Now is not the time to run the pipeline.' + println 'Exiting without running subsequent stages.' + println env.BRANCH_NAME + currentBuild.result = 'SUCCESS' + return +} + +//Define branch specific var +if (env.BRANCH_NAME == 'dev-jenkins' || env.BRANCH_NAME == 'dev-env') { + DEPLOY_ENV = 'DEV' + LOGICAL_ENV = 'dev' + IS_BUILD = true + IS_DEPLOY = true +} +if (env.BRANCH_NAME == 'master-jenkins') { + DEPLOY_ENV = 'PROD' + LOGICAL_ENV = 'prod' + IS_BUILD = true + IS_DEPLOY = false +} + +pipeline { + agent none + environment { + CI_AUTH0_URL = credentials('CI_AUTH0_URL') + CI_AUTH0_CLIENTID = credentials('CI_AUTH0_CLIENTID') + CI_AUTH0_CLIENTSECRET = credentials('CI_AUTH0_CLIENTSECRET') + CI_AUTH0_AUDIENCE = credentials('CI_AUTH0_AUDIENCE') + } + stages { + stage('build and deploy') { + agent { + label 'tc-jenkins-ecs-agent' + } + options { skipDefaultCheckout() } + stages + { + stage('checkout') + { + steps { + //cheking out code + checkout scm + script { + giturltoken = scm.getUserRemoteConfigs()[0].getUrl().tokenize('/') + env.TC_GIT_ORG = giturltoken.get(giturltoken.size()-2) + env.TC_REPONAME = scm.getUserRemoteConfigs()[0].getUrl().tokenize('/').last().split("\\.")[0] + env.TC_GIT_COMMIT_MSG = sh (script: 'git log -1 --pretty=%B ${GIT_COMMIT}', returnStdout: true).trim() + env.TC_GIT_AUTHOR = sh (script: 'git log -1 --pretty=%an ${GIT_COMMIT}', returnStdout: true).trim() + env.TC_GIT_HASH = sh (script: 'git log -1 --pretty=%h ${GIT_COMMIT}', returnStdout: true).trim() + } + } + + } + stage('pre-req install') + { + steps { + //Installing required pre-req software + sh ''' + #!/bin/bash + apt update -y + apt install awscli jq curl -y + curl https://get.docker.com/ > dockerinstall && chmod 777 dockerinstall && ./dockerinstall + git clone --branch dev-jenkins https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . + ''' + } + } + stage('fetching configuration') + { + steps { + //fetching pre-req + sh """ + #!/bin/bash + ./awsconfiguration.sh ${DEPLOY_ENV} + ./buildenv.sh -e ${DEPLOY_ENV} -b ${LOGICAL_ENV}_${APPNAME}_buildvar,${LOGICAL_ENV}_${APPNAME}_deployvar + """ + load 'awsenvconfg' + load 'buildenvvarg' + } + } + stage('build application') + { + //Building Application + when { expression { IS_BUILD } } + steps { + // Doing Build + sh """ + #!/bin/bash + ./build.sh + """ + } + } + stage('deploy') + { + //Deploying app + when { expression { IS_DEPLOY } } + steps { + //Doing Deployment + echo "Deploying application" + //input(message: 'Hello World!', ok: 'Submit') + sh """ + #!/bin/bash + ./master_deploy.sh -d ECS -e ${DEPLOY_ENV} -t latest -s ${LOGICAL_ENV}_${APPNAME}_taskvar -i communityapp + """ + } + } + } + + } + stage('Kindly provide approval for testing') + { + agent none + steps { + input(message: 'Hello World!', ok: 'Submit') + } + } + + } + +} + diff --git a/user-reference-templates/jenkins-config-file-template/Jenkinsfile-std-SingleAgent b/user-reference-templates/jenkins-config-file-template/Jenkinsfile-std-SingleAgent new file mode 100644 index 0000000..e898626 --- /dev/null +++ b/user-reference-templates/jenkins-config-file-template/Jenkinsfile-std-SingleAgent @@ -0,0 +1,160 @@ +// Define Application name +def APPNAME = "application-name" + +// Define which branch build and deploy need to run in the below array + +def branchfilter = ['dev'] + +if (!branchfilter.contains(env.BRANCH_NAME)) { + println 'Now is not the time to run the pipeline.' + println 'Exiting without running subsequent stages.' + println env.BRANCH_NAME + currentBuild.result = 'SUCCESS' + return +} + +//Define branch specific var +if (env.BRANCH_NAME == 'dev') { + DEPLOY_ENV = 'DEV' + LOGICAL_ENV = 'dev' + IS_BUILD = true + IS_DEPLOY = true + IS_APP_DEPLOY = true + IS_API_DEPLOY = true + ENABLE_CACHE = false +} + +// NOTE: main/prod is not supported yet +if (env.BRANCH_NAME == 'main') { + DEPLOY_ENV = 'PROD' + LOGICAL_ENV = 'prod' + IS_BUILD = true + IS_DEPLOY = true + IS_APP_DEPLOY = true + IS_API_DEPLOY = true + ENABLE_CACHE = true +} + +pipeline { + agent { + label 'tc-ecs-agent-large' + } + + environment { + CI_AUTH0_URL = credentials('CI_AUTH0_URL') + CI_AUTH0_CLIENTID = credentials('CI_AUTH0_CLIENTID') + CI_AUTH0_CLIENTSECRET = credentials('CI_AUTH0_CLIENTSECRET') + CI_AUTH0_AUDIENCE = credentials('CI_AUTH0_AUDIENCE') + } + + options { skipDefaultCheckout() } + + stages + { + stage('checkout') + { + steps { + //cheking out code + checkout scm + script { + giturltoken = scm.getUserRemoteConfigs()[0].getUrl().tokenize('/') + env.TC_GIT_ORG = giturltoken.get(giturltoken.size()-2) + env.TC_REPONAME = scm.getUserRemoteConfigs()[0].getUrl().tokenize('/').last().split("\\.")[0] + env.TC_GIT_COMMIT_MSG = sh (script: 'git log -1 --pretty=%B ${GIT_COMMIT}', returnStdout: true).trim() + env.TC_GIT_AUTHOR = sh (script: 'git log -1 --pretty=%an ${GIT_COMMIT}', returnStdout: true).trim() + env.TC_GIT_HASH = sh (script: 'git log -1 --pretty=%h ${GIT_COMMIT}', returnStdout: true).trim() + if (sh(script: "git log -1 --pretty=%B | fgrep -ie '[skip ci]' -e '[ci skip]'", returnStatus: true) == 0) { + currentBuild.result = 'NOT_BUILT' + error 'Aborting because commit message contains [skip ci]' + } + } + } + + } + stage('pre-req install') + { + steps { + //Installing required pre-req software + sh ''' + #!/bin/bash + apt update -y + apt install awscli jq curl sudo -y + curl https://get.docker.com/ > dockerinstall && chmod 777 dockerinstall && ./dockerinstall + git clone --branch dev-jenkins https://github.com/topcoder-platform/tc-deploy-scripts ../buildscript + cp ./../buildscript/master_deploy.sh . + cp ./../buildscript/buildenv.sh . + cp ./../buildscript/awsconfiguration.sh . + curl -sL https://deb.nodesource.com/setup_16.x | sudo bash - + apt -y install nodejs + ''' + } + } + stage('fetching configuration') + { + steps { + //fetching pre-req + sh """ + #!/bin/bash + ./awsconfiguration.sh ${DEPLOY_ENV} + echo "Fetching deployvar" + ./buildenv.sh -e ${DEPLOY_ENV} -b ${LOGICAL_ENV}-${APPNAME}-deployvarj + mv buildenvvar deployenvvar + mv buildenvvarg deployenvvarg + echo "Fetching Buildvar" + ./buildenv.sh -e ${DEPLOY_ENV} -b ${LOGICAL_ENV}-${APPNAME}-buildvar + """ + load 'awsenvconfg' + load 'deployenvvarg' + load 'buildenvvarg' + } + } + stage('build application') + { + //Building Application + when { expression { IS_BUILD } } + steps { + // Doing Build + sh """ + #!/bin/bash + node --version + npm --version + git config --global url."https://git@".insteadOf git:// + npm ci + npm run build + ls -lath + """ + } + } + stage('appdeploy') + { + //Deploying app + when { expression { IS_APP_DEPLOY } } + steps { + //Doing Deployment + echo "Deploying application" + //input(message: 'Hello World!', ok: 'Submit') + sh """ + #!/bin/bash + ./master_deploy.sh -d CFRONT -e $DEPLOY_ENV -c $ENABLE_CACHE + """ + } + } + stage('apideploy') + { + //Deploying app + when { expression { IS_API_DEPLOY } } + steps { + //Doing Deployment + echo "Deploying Api" + // input(message: 'Hello World!', ok: 'Submit') + sh """ + #!/bin/bash + sed -i '/node_modules/d' ./.dockerignore + docker build -f docker/api/ECSDockerfile -t $APPNAME-api:latest . + ./master_deploy.sh -d ECS -e $DEPLOY_ENV -t latest -s ${LOGICAL_ENV}-${APPNAME}-appvar -i ${APPNAME}-api + """ + } + } + } + +} \ No newline at end of file pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy