-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrds_dbdump_play.yml
105 lines (88 loc) · 3.57 KB
/
rds_dbdump_play.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
#
# when using rds, we have to move the db dumps to another instance
# this playbook is to be applied to this another instance.
#
# as of nov23, catchpy and rhetoric.catchpy are using a bak instance to do the db
# backups; this is due to raise in traffic during pandemic and we are trying not to
# overload catchpy-db traffic during db backups. By now, the load stabilized and we
# can revert to use one catchpy/db for all courses (merging both catchpy's is not
# implemented yet)
#
# ALWAYS ADD -e project_name=<project_name> to ansible-playbook extra args
# e.g. -e project_name=catchpy to config backups for catchpy db
#
- import_playbook: common_play.yml
- hosts: "{{ target_hosts | default('all', true) }}"
remote_user: "{{ my_remote_user }}"
become: yes
become_user: root
vars_files:
- vars/common_vars.yml
- vars/postgres_vars.yml
handlers:
- include: handlers/main.yml
tasks:
- include_role:
name: external/Stouts.users
vars:
users_users: "{{ [db_backup_local_user] }}"
- include_role:
name: external/nmaekawa.extra-ebs
when: use_aws
- name: install packages jq, aws
apt:
name: ['jq', 'awscli']
state: present
- name: install pg client
include_tasks: roles/external/nmaekawa.postgres/tasks/pg_install_client.yml
- hosts: "{{ target_hosts | default('all', true) }}"
remote_user: "{{ my_remote_user }}"
become: yes
become_user: root
vars_files:
- vars/common_vars.yml
- vars/postgres_vars.yml
- vars/{{ project_name }}_vars.yml
tasks:
- ec2_metadata_facts:
when: ec2_tag_cluster == 'prod'
- name: create dirs
file:
path: "{{ item }}"
owner: "{{ db_backup_user }}"
group: "{{ db_backup_group }}"
mode: 0755
state: directory
with_items: ["{{ db_backup_dir }}/{{ service_name }}", "{{ db_bin_dir }}"]
- name: drop script to perform data backups
template:
src: roles/webapp_install/templates/cronjob_db_backup.sh.j2
dest: "{{ db_bin_dir }}/pg_datadump_{{ service_db_name }}.sh"
owner: "{{ db_backup_user }}"
group: "{{ db_backup_group }}"
mode: 0754
when: ec2_tag_cluster == "prod"
- name: cronjob for data backup
cron:
name: "daily postgres data dump for {{ service_db_name }}"
user: "{{ db_backup_user }}"
hour: "{{ lookup('env', 'DBBAK_DATA_CRON_HOUR') | default('6', true) }}" # this is UTC, so ~2am EDT or ~1am EST
minute: "{{ lookup('env', 'DBBAK_DATA_CRON_MIN') | default('0', true) }}"
state: present
job: >
{{ db_bin_dir }}/pg_datadump_{{ service_db_name }}.sh --db "{{ service_db_name }}"
--outdir "{{ db_backup_dir }}/{{ service_name }}"
--days {{ service_db_backup_local_retention_in_days }} 2>&1 | logger -t "[db backup]"
when: ec2_tag_cluster == "prod"
- name: install s3 cronjob for db dumps
cron:
name: "s3 sync for db dumps: {{ service_db_name }}"
user: "{{ db_backup_user }}"
hour: "{{ lookup('env', 'DBBAK_SYNC_CRON_HOUR') | default('6', true) }}" # this is UTC, so ~2am EDT or ~1am EST
minute: "{{ lookup('env', 'DBBAK_SYNC_CRON_MIN') | default('45', true) }}" # check when dumps fininh to schedule after that
state: present
job: >
/usr/bin/aws s3 sync
{{ db_backup_dir }}/{{ service_name }}
s3://{{ s3_backup_bucket_name }}/{{ ec2_tag_cluster }}/{{ project_name }}/pgdump_taken_from_{{ ansible_ec2_instance_id }}
when: ec2_tag_cluster == "prod"