-OLD_ID=`openstack server show testing-deployment -f value -c id`
-OLD_ID=`openstack server show production-deployment -f value -c id`
# TODO create snapshot copy of old instance
# TODO rename old instance, so that we can find it in cleanup task
-openstack server remove floating ip $OLD_ID $FLOATING_IP
# TODO get and locally store zip of old certificate-docker-volume
# don't create snapshot copy of old instance, we keep the old instance alive as long as possible
# add should work without removing first- openstack server remove floating ip $OLD_ID $PRODUCTION_IP
-openstack server remove volume $OLD_ID $VOLUME_ID
-openstack server remove volume $OLD_ID $VOLUME_ID
-INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor s2 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data deploy_scripts/cloudinit.yml --security-group ssh --security-group www --security-group https testing-deployment`
-INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor s2 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data deploy_scripts/cloudinit.yml --security-group ssh --security-group www --security-group https production-deployment`
-while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done# wait until an address is available to attach the floating ip
-while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done# wait until an address is available to attach the floating ip
-openstack server add floating ip $INSTANCE_ID $FLOATING_IP
-openstack server add floating ip $INSTANCE_ID $PRODUCTION_IP
# TODO move local zip of certificate-docker-volume to server once startup is complete
-openstack server add volume $INSTANCE_ID $VOLUME_ID
-openstack server add volume $INSTANCE_ID $VOLUME_ID
-ssh -oStrictHostKeyChecking=accept-new apiserver@$PRODUCTION_DOMAIN "until [ -e /finished_cloudinit ]; do sleep 5; done"# wait until cloudinit script is complete - this should also mean that the server has started TODO check this
# do this in cleanup job, depending on the state of the server- openstack server delete $OLD_ID
-OLD_ID=`openstack server show testing-deployment -f value -c id`
-OLD_ID=`openstack server show testing-deployment -f value -c id`
-openstack server remove floating ip $OLD_ID $FLOATING_IP
# TODO rename old instance, so that we can find it in cleanup task
-INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor s2 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data deploy_scripts/cloudinit.yml --security-group ssh --security-group www --security-group https testing-deployment`
# TODO get and locally store zip of old certificate-docker-volume
# add should work without removing first- openstack server remove floating ip $OLD_ID $TESTING_IP
-INSTANCE_ID=`openstack server create -f value -c id --prefix IMAGE_ --flavor s1 --image 149a65b5-aeb8-499f-aaa6-ec966bd28dd6 --user-data deploy_scripts/cloudinit.yml --security-group ssh --security-group www --security-group https testing-deployment`
-while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done# wait until an address is available to attach the floating ip
-while [ "`openstack server show $INSTANCE_ID -c addresses -f value`" = "{}" ]; do sleep 5; done# wait until an address is available to attach the floating ip
-openstack server add floating ip $INSTANCE_ID $FLOATING_IP
-openstack server add floating ip $INSTANCE_ID $TESTING_IP
-openstack server delete $OLD_ID
# TODO move local zip of certificate-docker-volume to server once startup is complete
-ssh -oStrictHostKeyChecking=accept-new apiserver@$TESTING_DOMAIN "until [ -e /finished_cloudinit ]; do sleep 5; done"# wait until cloudinit script is complete - this should also mean that the server has started TODO check this
# do this in cleanup job, depending on the state of the server- openstack server delete $OLD_ID
cleanup-failed-full-deployment:
# check if there is an old prod or test instance, assign respective ip to it, re-attach volume, delete new instance, rename old instance
# if there is none, this is a failed light-deployment, which is handled by another job
# this does not guarantee a successful rollback, but unless the old instance was faulty, this should work
stage:cleanup
when:on_failure
only:
-web
script:
-echo "This is the cleanup for the full-redeployment of the testing or production servers"
-echo "if this job is reached, some earlier job had to have failed, this will return to the previous instance (if available)"
-echo "A successfull cleanup can not be guaranteed, depending on the failure reason"
# TODO check which old instance is present. (eithger test-old or production-old); store instance id in var test_id and prod_id
# TODO if test_id is set, rollback test ip address, rename test instance and delete new instance
# TODO if prod_id is set, rollback prod ip, remove new instance, attach volume to old, remname prod instance
# gitlab should automatically alert the devs about this failure
cleanup-failed-light-test-deployment:
# if there is a failure with the light deployments, this tries to git checkout an earlier version and rollback to that.
stage:cleanup
when:on_failure
only:
-master
except:
-tags
-web
script:
-echo "This is the cleanup for the light-redeployment of the testing servers"
-echo "if this job is reached, some earlier job had to have failed, this will return to the previous instance (if available)"
-echo "A successfull cleanup can not be guaranteed, depending on the failure reason"
# TODO somehow find out which commit to rollback to
-COMMIT_TAG=""# TODO set some stable base version here, update regularily?