#!/bin/bash
# ==================================================================================
# Utility to help with Kubernetes/GCE operations when working with OVERCAST project.
# ==================================================================================

SCRIPTDIR=$(dirname $0)
DEBUG=0
ARGS="$@"

# Workaround for initial 'env-set'
if [ ! -f "$HOME/.overcast" -a "$1" == "env-set" ]; then
    echo $2 > "$HOME/.overcast"
    echo prod > "$HOME/.overcast-prod"
fi

. $SCRIPTDIR/_environment
. $SCRIPTDIR/_functions
[ -f $STAGE_DIR/git/$CLUSTER_ENV/env.conf ] && . $STAGE_DIR/git/$CLUSTER_ENV/env.conf

[ $CLUSTER_TYPE == k8s ] || die 4 "This script will work only for Kubernetes/GCE based environments!"

export QUIET=0
COMMAND=$1
SELECTED_MODULE_DIR=$2
NOCERT=0
LOGFILE="$LOG_DIR/overcast-deployer.log"

if [ -n "$REGISTRY_HOST" ]; then
    VAE_REGISTRY_HOST=$REGISTRY_HOST
    VAE_REGISTRY_USERNAME=$REGISTRY_USERNAME
    VAE_REGISTRY_PASSWORD=$REGISTRY_PASSWORD
fi

export VAE_REGISTRY_HOST
export VAE_REGISTRY_USERNAME
export VAE_REGISTRY_PASSWORD

for ARG in "$@"; do
    case $ARG in
        --debug)
            export DEBUG=1
            ;;
        -s=*)
            export SCALE="${ARG#*=}"
            ;;
        --upstream=*)
            export UPSTREAM="${ARG#*=}"
            ;;
        --nocert)
            $NOCERT=1
            ;;
        --module-version=*)
            export MODULE_VERSION="${ARG#*=}"
            ;;
        --module-dir=*)
            export MODULE_DIR="${ARG#*=}"
            ;;
        -i=*)
            export MODULE="${ARG#*=}"
            ;;
        -q|--quiet)
            export QUIET=1
            ;;
        -v=*|--version=*)
            VERSION="${ARG#*=}"
            if [ $COMMAND != "ver-set" -a $COMMAND != "push-vae-image" -a $COMMAND != "deploy-module" ]; then
                [ -f $STAGE_DIR/$VERSION/overcast.$VERSION ] || die 20 "Missing product versioned images list file for requested version=$VERSION"
            fi
            ;;
        --force)
            FORCE=1
            ;;
    esac
done

# Duplicate output to a log
#echo -e "\n$(date) ---------------------------------------------------------------------" >> $LOGFILE
#echo "$(date) overcast-deployer started. ARGS: $ARGS" >> $LOGFILE
#exec > >(tee -a $LOGFILE) 2>&1

# ------------------------------------- USAGE ----------
function USAGE {
cat<<ENDUSAGE
Utility to help with Kubernetes/GCE operations when working with OVERCAST project.

USAGE: overcast-deployer COMMAND [(-v|--verison)=V.E.R] [--force]

    env-list                        list configured/registered clusters/environments
    env-set                         set current working cluster/environment

    create-cluster                  create an empty Kubernetes cluster
    upgrade-cluster                 upgrade Kubernetes cluster to the latest version
    destroy-cluster                 destroy Kubernetes cluter

    conf-setup-repo [--upstream=..] setup local cluster git (config) repository clone and set
                                    upstream remote (one of 'prod'|'beta'|'dev', default is 'prod')
    conf-pull-upstream              update environment git (configuration) repository from upstream
    conf-commit                     commit local changes into
    conf-sync                       up2date configuration files insde containers

    deploy-status                   show deployment status
    deploy -v=V.E.R                 deploy/upgrade Overcast' "skeleton" objects and dependencies
    destroy --force                 destroy Overcast related objects in cluster
    pull-images                     pull images from production registry
    deploy-module                   deploy single module to K8S

    ver-list                        display Overcast version and versions of all modules
    ver-set -v=V.E.R -i=module      Change currently used image version for module

    cluster-ip                      Print current cluster IP

    scale-pods -s=x.xx              scale pod autoscalers by factor of x.xx (example: 0.25 to reduce by 4 / 2 to double)

    set-node-selectors              Set node-selectors for pods listed in k8s-patches/node-selectors.list (Git)
    set-resources                   Set resources limits for pods described in k8s-patches/resources.list (Git)
    patch-deployments               Apply generic json patches listed in k8s-patches/deployment-patches.list (Git)
    
    list-savepoints                 List available savepoints
    create-savepoint <name>         Create savepoint
    delete-savepoint <name>         Delete specified savepoint
    restore-savepoint <name>        Restore image versions from specified savepoint

ENDUSAGE
}


# ------------------------------------- conf-setup-repo --------------
function conf-setup-repo {

    case $UPSTREAM in
        prod)
            BRANCH=prod
            ;;
        beta)
            BRANCH=beta
            ;;
        *)
            BRANCH=master
    esac

    echo Using upstream: $BRANCH

    local OLD_PWD=$(pwd)
    cd

    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}

    if [ -d "$REPO_DIR" ]; then
        echo Local clone already exists, coverly refusing to create a new one!
        cd $REPO_DIR
    else
        echo Making a local clone, setting remote to upstream branch: $BRANCH
        git clone --depth 1 $(git-config-url)
        cd $REPO_DIR
    fi

    if [ -f upstream-branch ]; then
        SAVED_BRANCH=$(cat upstream-branch)
        if [ "$SAVED_BRANCH" != "$BRANCH" ]; then
            echo ERROR: Repository was already established with an upstream branch: $SAVED_BRANCH
            echo Try to repeat this command using correct branch!
            exit 1
        fi
    else
        echo $BRANCH > upstream-branch
        git add upstream-branch
    fi

    git remote add upstream $GIT_CONFIG_BASE_REPO

    conf-pull-upstream

    cd $OLD_PWD
}

# -------------------------------------- conf-pull-upstream --------------
function conf-pull-upstream {
    local OLD_PWD=$(pwd)
    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}

    if [ ! -d ~/$REPO_DIR ]; then
        echo ERROR: No local clone found. Use \'conf-setup-repo\' first!
        exit 1
    fi


    cd ~/$REPO_DIR

    if [ ! -f upstream-branch ]; then
        echo ERROR: Repository has not \'upstream-branch\' marker saved!
        exit 1
    fi

    BRANCH=$(cat upstream-branch)

    echo Pulling updates from upstream branch: $BRANCH

    git fetch upstream
    git merge upstream/$BRANCH

    cd $OLD_PWD
}


# --------------------------------------- conf-commit -----------------
function conf-commit {
    local OLD_PWD=$(pwd)
    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}

    if [ ! -d ~/$REPO_DIR ]; then
        echo ERROR: No local clone found. Use \'conf-setup-repo\' first!
        exit 1
    fi

    git commit -a
    git push

    cd $OLD_PWD
}


# ------------------------------------- conf-check-version -------------
function conf-check-version {

    local OLD_PWD=$(pwd)
    cd

    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}

    if [ -d "$REPO_DIR" ]; then
        cd $REPO_DIR
        git pull
    else
        git clone --depth 1 $(git-config-url)
        cd $REPO_DIR
    fi

    local GIT_VERSION=latest
    if [ -f version ]; then
        GIT_VERSION=$(cat version)
    else
        echo version file missing in repository. Considering 'latest' by default
    fi
    if [ "$GIT_VERSION" != "$VERSION" ]; then
        echo ERROR: Repository was already established with another version: $GIT_VERSION
        echo Try to repeat this command using correct version!
        exit 1
    fi

    # Copy base.conf and env.conf to stage. We need them for DB-related variables
    cp -f {base,env}.conf $STAGE_DIR/$VERSION/etc/
    cd $OLD_PWD
}

# ------------------------------------- conf-sync --------------------
function conf-sync {
    for pod in $(kubectl get pods -n overcast | grep -v ^NAME | cut -f 1 -d ' '); do
        echo "Processing pod $pod"
        kubectl exec $pod -- sh -c "cd /opt/sarch/etc && git pull 1>/dev/null"
    done
}

# ----------------------------------------- deploy-conf --------------
function deploy-conf {
    [ -d $STAGE_DIR/$VERSION ] || die 99 "No directory found: stage/$VERSION"
    [ -d $STAGE_DIR/$VERSION/etc ] || die 99 "No directory found: stage/$VERSION/etc"

    # Recreate 'overcast-env' config map
    echo "Recreate ConfigMap \"overcast-env\""
    kubectl delete configmap overcast-env 2>/dev/null
    kubectl create configmap overcast-env --from-file=$STAGE_DIR/$VERSION/etc/staging-env.conf
}

# ----------------------------------------- deploy-cert --------------
function deploy-cert {
    CERT_DIR=$STAGE_DIR/$VERSION/cert
    [ $NOCERT -eq 1 ] && return
    if [ ! -d $CERT_DIR ] || [ ! -f $CERT_DIR/tls.key ] || [ ! -f $CERT_DIR/tls.crt ]; then
        if [ $OVERCAST_UPGRADE -eq 0 ]; then
            die 56 "Missing certificate/key files. Specify '--nocert' if you want to skip certificate deployment stage"
        fi
    fi
    echo Found private key and certificate. Installing into Ingress
    tls-helper deploy-cert --key=$CERT_DIR/tls.key --cert=$CERT_DIR/tls.crt
}

# -------------------------------------- register-with-camelot -------
function register-with-camelot {
    mkdir -p camelot
    rm -rf camelot/*
    pushd camelot

    openssl genrsa -out private.key 4096
    openssl req -new -key private.key -out private.csr

    STATUSCODE=$(curl --silent --stderr err.log --output cert.crt --digest -u "$CUSTOMER_ID:$CUSTOMER_PASSWORD" -X POST \
        --data-binary @private.csr -H "Content-Type: text/plain;charset=\"utf-8\"" \
        --cookie-jar ck.jar --anyauth https://camelot.videonext.com/key/$CUSTOMER_ID -v --write-out "%{http_code}")
    if [ $STATUSCODE -ne 200 ]; then
        die 18 "CustomerId registration failed!"
    fi

    kubectl create secret generic customer-keys --from-file=private.key --from-file=cert.crt
    [ $? != 0 ] && die 19 "Error storing camelot keys into secret"

    echo
    echo CustomerId registered successfully
}

# ----------------------------------------- deploy-overcast ----------
function deploy-overcast {
    # Check if environment is complete
    [ -f $STAGE_DIR/git/$CLUSTER_ENV/env.conf ] || die 54 "Missing Git environment files. Do 'env-set' first"
    # check images are in place
    local MISSING=$(image-service product-status -v=$VERSION | grep MISSING)
    if [ -n "$MISSING" ]; then
        image-service product-status -v=$VERSION
        die 91 'Can not deploy product with missing images'
    fi
    # Check if version in Git repo matches current one
    conf-check-version
    [ -d $STAGE_DIR/$VERSION ] || die 99 "No directory found: stage/$VERSION"
    pushd $STAGE_DIR/$VERSION
    # Check if stage environment is created for current cluster
    local LOCAL_ENV=$(cat ./cluster-env)
    if [ $CLUSTER_ENV != $LOCAL_ENV ]; then
        die 99 "This staging environment was created for cluster $LOCAL_ENV"
    fi
    # presence of "overcast" namespace is an indicator if this is a fresh install or update
    NS=$(kubectl get namespace overcast 2>/dev/null | grep -v ^NAME | awk '{print $2}')
    if [ -z "$NS" ]; then
        echo Namespace \"overcast\" does not yet exist, performing \"fresh deploy\"
        export OVERCAST_UPGRADE=0

        # create and set namespace for OVERCAST
        kubectl create namespace overcast
        kubectl config set-context $(kubectl config view | grep current-context | awk '{print $2}') --namespace=overcast

        # Create database
        echo Creating database
        db-conf create -v=$VERSION -d=apl
        db-conf create -v=$VERSION -d=audit

        # ConfigMap to store migration pointer, deployment checksums, status, etc...
        kubectl create configmap overcast-deployment --from-literal=deploy.pointer=000 --from-literal=deploy.version=0.0.0

        # Register cluster with camelot
        register-with-camelot

        popd
    elif [ $NS == "Terminating" ]; then
        die 99 "Previous Overcast deployment is being destroyed"
    else
        echo Namespace \"overcast\" already exists, performing \"upgrade\"
        export OVERCAST_UPGRADE=1
    fi
    #
    perform-deployment
}

# ------------------------------- perform-deployment ----------
function perform-deployment {

    # Roll database migrations forward
    db-conf update -v=$VERSION -d=apl
    db-conf update -v=$VERSION -d=audit

    # Deploy configuration files and templates
    deploy-conf

    # Deploy key and certificate if exists
    deploy-cert
    
    # Images are processed first, so that deployments are created and the rest of deployment scripts
    # have a chance to work with configurations and APIs (pay attanetion to "wait-for-xxx" calls there)
    process-images

    # First check if version change is possible in automated mode
    TRG_LEVEL=$(grep BASE_DEPLOY_LEVEL $STAGE_DIR/$VERSION/overcast.$VERSION | cut -d: -f2)
    POINTER=$(kubectl get configmap overcast-deployment -o yaml|grep deploy.pointer|grep -v overcast|cut -d: -f2|tr -d '" ')
    echo Current Overcast migration pointer=$POINTER, target deployment level=$TRG_LEVEL
    if (( POINTER > TRG_LEVEL )); then
        die 99 'Trying to roll deployment back in "base deployment level" is not supported for automated mode.'
    fi

    # Now, apply K8S updates
    kubectl patch configmap overcast-deployment -p '{"data": {"deploy.status": "inconsistent"}}'

    pushd $STAGE_DIR/$VERSION
    ls deploy-* | \
    while read SCRIPT_FULL; do
        SCRIPT_SHORT=$(basename $SCRIPT_FULL)
        if [[ $SCRIPT_SHORT =~ ^deploy-([0-9]+)- ]]; then
            LEVEL=${BASH_REMATCH[1]}
            #echo $LEVEL
        else
            continue
        fi

        CHECKSUM=$(cat $SCRIPT_SHORT | md5sum | tr -d '" -')

        (( DEBUG == 0 )) || echo CHECKSUM=$CHECKSUM SCRIPT_FULL:$SCRIPT_FULL SCRIPT_SHORT:$SCRIPT_SHORT LEVEL:$LEVEL DEBUG:$DEBUG
        if (( LEVEL > POINTER )); then
            echo EXECUTING: $SCRIPT_SHORT
            ./$SCRIPT_FULL \
                && kubectl patch configmap overcast-deployment -p '{"data": {"deploy.pointer": "'$LEVEL'"}}' \
                || die 1 "Errors detected in $SCRIPT_SHORT, STOPPING"
            kubectl patch configmap overcast-deployment -p '{"data": {"'$SCRIPT_SHORT'": "'$CHECKSUM'"}}'
        else
            # still, let's confirm script was not altered
            SAVED_CHECKSUM=$(kubectl get configmap overcast-deployment -o yaml|grep $SCRIPT_SHORT | cut -d: -f2|tr -d '" ')
            if [ "$SAVED_CHECKSUM" == "$CHECKSUM" ]; then
                echo SKIPPING \(already installed\): $SCRIPT_SHORT, checksum OK
            else
                die 13 "Script: $SCRIPT_SHORT, checksum mismatch (found:$CHECKSUM / expected:$SAVED_CHECKSUM)"
            fi
        fi
    done
    popd
    
    # Apply deployment patches: resources, replicas, node-labels
    set-resources
    set-replicas
    
    kubectl patch configmap overcast-deployment -p '{"data": {"deploy.version": "'$VERSION'", "deploy.status": "consistent"}}'

    echo Deployment/migration status:
    kubectl get configmap overcast-deployment -o yaml|grep deploy|egrep -v '(name:|selfLink:)'|sort

    watch "echo 'Monitoring application PODs (hit [Ctrl-C] at any time to exit)'; echo; echo; kubectl get pod"
}

# ----------------------------------- process-images ----------
function process-images {
    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}
    local PATCH_DIR=~/$REPO_DIR/k8s-patches/deploy
    LIST_FILE=$(readlink -f $STAGE_DIR/$VERSION/image-list)
    [ -f  "$LIST_FILE" ] || die 8 "Missing image list file: $LIST_FILE"
    # most of other images depend on configuraion provided by overcast-shared deployment
    # move "overcast-shared" / "api-php" to the front
    KUBECTL_VER=$(cat $LIST_FILE | perl -ne 'print $1 if /overcast\/kubectl:(\S+)\s/')
    KUBECTL=$(cat $LIST_FILE | perl -ne 'print $1 if /(overcast\/kubectl\S+)\s/')
    API_PHP=$(cat $LIST_FILE | perl -ne 'print $1 if /(overcast\/api-php\S+)\s/')
    LIST_REST=$(cat $LIST_FILE | sed -E 's%overcast/api-php\S+\s%%' | sed -E 's%overcast/kubectl\S+\s%%' )
    #
    for IMG_VER in $API_PHP $KUBECTL $LIST_REST; do
        # IMG_VER=overcast/api-php:1.0
        # note, that 'VER' will be used in 'substitute' script when patching YAML (!!!)
        export VER=${IMG_VER##*:}
        export IMAGE=${IMG_VER%%:*}
        export SHORTNAME=${IMAGE##*/} # ex.: ptz
        export IVER=:$VER
        export KUBECTL_VER
        
        # Push VAE images to updates server
        if [[ $SHORTNAME =~ ^vae- ]]; then
            push-vae-image
        fi

        $STAGE_DIR/$VERSION/docker-push

        # Apply all deployment scripts found in 'deploy'
        export K8S_DOCKER_REGISTRY K8S_PROJECT_ID SHORTNAME VER
        # If we have replacement deploy scripts for this image in 
        # $PATCH_DIR then deploy from there
        if [ -d "$PATCH_DIR/$SHORTNAME" ]; then
            pushd $PATCH_DIR/$SHORTNAME &>/dev/null
        else
            pushd $STAGE_DIR/$VERSION/$SHORTNAME &>/dev/null
        fi
        for script in $(ls *.yaml 2>/dev/null); do
            echo Applying $SHORTNAME/$script
            cat $script | substitute | kubectl apply -f -
        done
        popd &>/dev/null

        # Some images call PHP API methods so they depend on api-php pod => WAIT
        [[ $IMG_VER == *api-php* ]] && wait-for-pod api-php
    done
}

function push-images {
    LIST_FILE=$(readlink -f $STAGE_DIR/$VERSION/image-list)
    [ -f  "$LIST_FILE" ] || die 8 "Missing image list file: $LIST_FILE"
    # most of other images depend on configuraion provided by overcast-shared deployment
    # move "overcast-shared" / "api-php" to the front
    KUBECTL_VER=$(cat $LIST_FILE | perl -ne 'print $1 if /overcast\/kubectl:(\S+)\s/')
    KUBECTL=$(cat $LIST_FILE | perl -ne 'print $1 if /(overcast\/kubectl\S+)\s/')
    API_PHP=$(cat $LIST_FILE | perl -ne 'print $1 if /(overcast\/api-php\S+)\s/')
    LIST_REST=$(cat $LIST_FILE | sed -E 's%overcast/api-php\S+\s%%' | sed -E 's%overcast/kubectl\S+\s%%' )
    #
    for IMG_VER in $API_PHP $KUBECTL $LIST_REST; do
        # IMG_VER=overcast/api-php:1.0
        # note, that 'VER' will be used in 'substitute' script when patching YAML (!!!)
        export VER=${IMG_VER##*:}
        export IMAGE=${IMG_VER%%:*}
        export SHORTNAME=${IMAGE##*/} # ex.: ptz
        export IVER=:$VER
        export KUBECTL_VER

        #if [[ $SHORTNAME =~ ^vae- ]]; then
        #    push-vae-image
        #else
            $STAGE_DIR/$VERSION/docker-push
        #fi
    done
}

function wait-for-pod {
    pod_name=$1
    while true; do
        POD=$(kubectl get pod|grep $pod_name|grep -vF "0/")
        [ -n "$POD" ] && break
        echo Waiting for \'$pod_name\' to become operational
        sleep 30
    done

    if [ $pod_name == "api-php" ]; then
        sleep 15
        if [ $OVERCAST_ENV == prod ]; then
            db-conf sync -v=$VERSION
        else
            db-conf sync
        fi
    fi
}

# ---------------------------------- destroy-overcast ----------
function destroy-overcast {
    echo Destroying OVERCAST objects

    echo Flushing Redis
    db-conf flush-redis

    echo Deleting deployments
    kubectl delete deployment -l"app=overcast"

    echo Deleting endpoints
    kubectl delete endpoints -l"app=overcast"

    echo Deleting services
    kubectl delete svc -l"app=overcast"

    kubectl delete namespace overcast

    db-conf drop

    echo Destroying OVERCAST objects: FINISHED
}

# ---------------------------------- destroy-module ------------
function deploy-module {
    local module_dir=$STAGE_DIR/$VERSION/$MODULE
    if [ -n "$MODULE_DIR" ]; then
        module_dir=$MODULE_DIR
    fi
    [ -d "$module_dir" ] || die 56 "Module dir for $MODULE is missing"
    VER=$MODULE_VERSION
    if [ -z "$VER" ]; then
        VER=latest
    fi
    export VER
    echo Deploying module $MODULE:$VER to K8S
    pushd $module_dir &>/dev/null
    local ERR_CODE=0
    for script in $(ls *.yaml); do
        cat "$script" | substitute | kubectl apply -f -
    done
    popd &>/dev/null
    return $ERR_CODE
}

# --------------------------------------- exec-if-exists -------
function exec-if-exists {
    if [ -x $1 ]; then
        echo Executing: $1
        $@
    else
        echo MISSING: $1
    fi
}

# ----------------------------------------- exec-or-fail -------
function exec-or-fail {
    SCRIPT=$1
    [ -x $SCRIPT ] || die 13 "Could not execute: $SCRIPT"
    $@
}

# --------------------------------------------- scale-pods ------
function scale-pods {
    for HPA in $(kubectl get hpa|awk '{print $1}'|grep -v NAME); do
        echo Scaling HPA: $HPA
        kubectl get hpa $HPA -o yaml| perl -n -e 'print /\smaxReplicas:\s(\d+)$/ && $1!=1? "  maxReplicas: ".int(0.99+'$SCALE'*$1)."\n" : $_' | kubectl replace -f -
    done
}

# --------------------------------------------- pull-images -----
function pull-images {
    LIST_FILE=$(readlink -f $STAGE_DIR/$VERSION/image-list)
    [ -f  "$LIST_FILE" ] || die 8 "Missing image list file: $LIST_FILE"
    docker login -u _json_key -p "$PROD_REGISTRY_KEY" https://us.gcr.io
    [ $? == 0 ] || die 67 "Docker login to production registry failed"
    for IMG_VER in $(cat $LIST_FILE); do
        # IMG_VER=overcast/api-php:1.0
        # note, that 'VER' will be used in 'substitute' script when patching YAML (!!!)
        export VER=${IMG_VER##*:}
        export IMAGE=${IMG_VER%%:*}
        export SHORTNAME=${IMAGE##*/} # ex.: ptz

        docker pull us.gcr.io/test1-141119/$SHORTNAME:$VER
        docker tag  us.gcr.io/test1-141119/$SHORTNAME:$VER overcast/$SHORTNAME:$VER
    done

    docker logout https://us.gcr.io
}

declare -A PATCH_MAP
# ------------------------------------------patch-deployments ---
function read-deployment-patches {
    local PATCHFILE=$1
    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}

    if [ ! -d ~/$REPO_DIR ]; then
        echo ERROR: No local clone found. Use \'conf-setup-repo\' first!
        exit 1
    fi

    PATCH_MAP=()
    local PATCH_LIST=~/$REPO_DIR/k8s-patches/$PATCHFILE
    if [ -f $PATCH_LIST ]; then
        while read -u 10 line; do
            if [[ $line =~ ^# ]]; then
                continue
            fi
            local PATCH=${line##*=}
            local DEPLOY_NAME=${line%%=*}
            if [ -z "$PATCH" -o -z "$DEPLOY_NAME" ]; then
                die 71 "Invalid syntax of file deployment-patches.list"
            fi
            PATCH_MAP[$DEPLOY_NAME]=$PATCH
            #kubectl patch deployment $DEPLOY_NAME --type json --patch "$PATCH"
        done 10<$PATCH_LIST
    else
        die 56 "Missing $PATCHFILE file"
    fi
}

function get-object-kind {
    if [ $1 == mediaproxy ]; then
        echo daemonset
    elif [ $1 == metastorage ]; then
        echo statefulset
    else
        echo deployment
    fi
}

function set-node-selectors {
    echo =============== Patching node selectors ================
    read-deployment-patches node-selectors.list
    for deploy in ${!PATCH_MAP[@]}; do
        selector="${PATCH_MAP[$deploy]}"
        kubectl patch $(get-object-kind $deploy) $deploy --type json --patch "[{\"op\":\"replace\",\"path\":\"/spec/template/spec/nodeSelector\",\"value\":{\"overcast.${selector}\": \"true\"}}]"
    done
}

function set-resources {
    echo =============== Patching resources =====================
    read-deployment-patches resources.list
    for deploy in ${!PATCH_MAP[@]}; do
        resources="${PATCH_MAP[$deploy]}"
        kubectl patch $(get-object-kind $deploy) $deploy --type json --patch "[{\"op\":\"add\",\"path\":\"/spec/template/spec/containers/0/resources\",\"value\":$resources}]"
    done
}

function set-replicas {
    echo =============== Patching replicas ======================
    read-deployment-patches replicas.list
    for deploy in ${!PATCH_MAP[@]}; do
        [ $(get-object-kind $deploy) != deployment ] && continue
        replicas="${PATCH_MAP[$deploy]}"
        OLD_IFS=$IFS
        IFS=':'
        read -ra hpa <<< "$replicas"
        kubectl delete hpa $deploy &>/dev/null
        kubectl autoscale deployment $deploy --min=${hpa[0]} --max=${hpa[1]} --cpu-percent=${hpa[2]}
        IFS=$OLD_IFS
    done
}

function patch-deployments {
    read-deployment-patches deployment-patches.list
    for deploy in ${!PATCH_MAP[@]}; do
        patch="${PATCH_MAP[$deploy]}"
        kubectl patch $(get-object-kind $deploy) $deploy --type json --patch "$PATCH"
    done
}

function deploy-addons {
    local REPO_DIR=${GIT_CONFIG_REPO##*/}; REPO_DIR=${REPO_DIR%%.git}
    local ADDONS_DIR=$BASE_DIR/images/addons

    if [ ! -d ~/$REPO_DIR ]; then
        echo ERROR: No local clone found. Use \'conf-setup-repo\' first!
        exit 1
    fi

    local ADDONS_LIST=~/$REPO_DIR/k8s-patches/addons.list
    if [ -f $ADDONS_LIST ]; then
        while read -u 10 addon; do
            if [[ $addon =~ ^# ]]; then
                continue
            fi
            if [ -z "$addon" ]; then
                continue
            fi
            if [ ! -d $ADDONS_DIR/$addon ]; then
                echo "No such addon: $addon"
                continue
            fi
            pushd $ADDONS_DIR/$addon/deploy &>/dev/null
            for script in $(ls *.yaml 2>/dev/null); do
                echo Applying $addon/deploy/$script
                cat $script | substitute | kubectl apply -f -
            done
            popd &>/dev/null
        done 10<$ADDONS_LIST
    else
        echo "Missing $ADDONS_FILE file. Nothing to do"
    fi
}

# --------------------------------------------- ver-list --------
function ver-list {
    VER=$(kubectl get configmap overcast-deployment -o yaml|grep deploy.version|grep -v overcast|cut -d: -f2|tr -d '" ')
    echo PRODUCT_VERSION=$VER
    OLD_IFS=$IFS
    IFS=$'\n'
    for line in $(kubectl get deployment,statefulset,daemonset -o yaml | grep image: | perl -p -e '$_=m{/([\w\-]+):(.+)$}?"$1=$2\n":""' | sort); do
        echo $line
    done
    IFS=$OLD_IFS
}

function ver-set {
    IMG_URL=$(kubectl get $(get-object-kind $MODULE) $MODULE -o yaml | grep image: | grep -v kubectl | perl -p -e 's/.*image:\s*(\S+):\S+$/$1/')
    [ -n "$IMG_URL" ] || die 115 "Unable to get image URL"
    kubectl patch $(get-object-kind $MODULE) $MODULE --type json \
        --patch "[{\"op\":\"replace\",\"path\":\"/spec/template/spec/containers/0/image\",\"value\":\"$IMG_URL:$VERSION\"}]"
}

function cluster-ip {
    kubectl describe ingress overcast-ingress | grep Address | awk '{print $2}'
}

# ==================================================================================
#
# Savepoints management
#

function check-savepoint-exists {
    local name=$1
    kubectl get configmap "overcast.savepoint.$name" &>/dev/null
}

function create-savepoint {
    declare -a kv
    local name=$SELECTED_MODULE_DIR
    [ -z "$name" ] && die 231 "Savepoint name must be specified"
    if check-savepoint-exists $name; then
        die 232 "Savepoint '$name' already exists"
    fi
    for line in $(kubectl get deploy,sts,ds -o json | jq -r .items[].spec.template.spec.containers[0].image | sort); do
        kv[${#kv[*]}]=$(echo $line | perl -p -e '$_=m{/([\w\-]+):(.+)$}?"$1=$2\n":""')
    done
    local args
    for item in ${kv[*]}; do
        args="$args --from-literal=$item"
    done
    
    kubectl create configmap "overcast.savepoint.$name" $args
}

function list-savepoints {
    kubectl get configmap | grep overcast.savepoint | cut -f 1 -d ' ' | perl -pe 's/^overcast\.savepoint\.//'
}

function delete-savepoint {
    local name=$SELECTED_MODULE_DIR
    [ -z "$name" ] && die 231 "Savepoint name must be specified"
    if ! check-savepoint-exists $name; then
        die 232 "Savepoint '$name' doesn't exist"
    fi
    kubectl delete configmap "overcast.savepoint.$name"
}

function restore-savepoint {
    local name=$SELECTED_MODULE_DIR
    [ -z "$name" ] && die 231 "Savepoint name must be specified"
    if ! check-savepoint-exists $name; then
        die 232 "Savepoint '$name' doesn't exist"
    fi
    echo ========== Restoring savepoint $name ==========
    for imgver in $(kubectl get configmap "overcast.savepoint.$name" -o json | jq -r '.data' | perl -ne 'print "$1:$2\n" if /"(.+?)":\s+"(.+?)"/'); do
        VERSION=${imgver##*:}
        MODULE=${imgver%%:*}
        ver-set
    done
}

# ==================================================================================
# ==================================================================================
# ==================================================================================

[ -z "$COMMAND" ] && USAGE && die 1 "Need at least one argument"

if [ "$COMMAND" != "ver-set" -a "$COMMAND" != "push-vae-image" -a "$COMMAND" != "deploy-module" ]; then
    if [ -n "$VERSION" ]; then
        exec-or-fail $STAGE_DIR/$VERSION/test-cli
    else
        exec-or-fail $(ls $STAGE_DIR/*/test-cli|tail -n1)
    fi
fi

case $COMMAND in
    create-cluster) # -------------------------------------------- create-cluster -----
        exec-or-fail $BASE_DIR/deploy/iaas/$CLUSTER_IAAS/cluster-create
        ;;
    upgrade-cluster) # ------------------------------------------ upgrade-cluster -----
        exec-or-fail $BASE_DIR/deploy/iaas/$CLUSTER_IAAS/cluster-upgrade
        ;;
    destroy-cluster) # ------------------------------------------ destroy-cluster -----
        [ -z "$FORCE" ] && die 88 'Must use --force option'
        wait-confirmation Are you sure you want to destroy Kubernetes cluster?
        exec-or-fail $BASE_DIR/deploy/iaas/$CLUSTER_IAAS/cluster-destroy
        ;;
    deploy) # ------------------------------------------------------------ deploy -----
        [ -z "$VERSION" ] && die 11 'Must provide version'
        deploy-overcast
        ;;
    push-images)
        [ -z "$VERSION" ] && die 11 'Must provide version'
        push-images
        ;;
    push-vae-image)
        [ -z "$MODULE" ] && die 14 "Must set module name with -i option"
        [ -z "$VERSION" ] && die 11 'Must provide version'
        export VER=$VERSION
        export IMAGE=overcast/$MODULE
        export SHORTNAME=${IMAGE##*/} # ex.: ptz
        export IVER=:$VER
        push-vae-image
        ;;
    deploy-conf) # --------------------------------------------------- deploy-conf -----
        [ -z "$VERSION" ] && die 11 'Must provide version'
        deploy-conf
        ;;
    deploy-status)
        kubectl get configmap overcast-deployment -o yaml|grep deploy|egrep -v '(name:|selfLink:)'|sort
        ;;
    destroy) # ------------------------------------------------- destroy-overcast -----
        [ -z "$VERSION" ] && die 11 'Must provide version'
        [ -z "$FORCE" ] && die 12 'Must add --force option'
        wait-confirmation Are you sure you want to destroy Overcast deployment?
        destroy-overcast
        ;;
    scale-pods) # ---------------------------------------------------- scale-pods -----
        scale-pods
        ;;
    env-list)  # ------------------------------------------------------- env-list -----
        echo "Environments registered ($SCRIPTDIR/env.*):"
        list-env
        ;;
    env-set) # ---------------------------------------------------------- env-set -----
        set-env $VERSION
        ;;
    conf-setup-repo)
        conf-setup-repo
        ;;
    conf-pull-upstream)
        conf-pull-upstream
        ;;
    conf-commit)
        conf-commit
        ;;
    conf-sync)
        conf-sync
        ;;
    ver-list)
        ver-list
        ;;
    ver-set)
        [ -z "$MODULE" ] && die 14 "Must set module name with -i option"
        [ -z "$VERSION" ] && die 11 'Must provide version'
        ver-set
        ;;
    cluster-ip)
        cluster-ip
        ;;
    pull-images)
        [ -z "$VERSION" ] && die 11 'Must provide version'
        pull-images
        ;;
    set-node-selectors)
        set-node-selectors
        ;;
    set-resources)
        set-resources
        ;;
    set-replicas)
        set-replicas
        ;;
    patch-deployments)
        patch-deployments
        ;;
    deploy-addons)
        deploy-addons
        ;;
    deploy-module)
        [ -z "$MODULE" ] && die 14 "Must set module name with -i option"
        deploy-module
        ;;
    create-savepoint)
        create-savepoint
        ;;
    list-savepoints)
        list-savepoints
        ;;
    delete-savepoint)
        delete-savepoint
        ;;
    restore-savepoint)
        restore-savepoint
        ;;
    *) # ------------------------------------------------------------------------------
        USAGE
        die 1 "COMMAND not recognized"
esac

echo
