Skip to content

Commit

Permalink
HC-490 - Supporting Opensearch Dashboards (#104)
Browse files Browse the repository at this point in the history
* migrating from kibana -> opensearch dashboards

* distinguishing between ILM/ISM

removing hardcoded ILM in templates
using python scripts to install ILM/ISM instead of raw curl requests
commented out ingest pipeline for consolidation of databases

* removed warm_migration action bc returning 403

* removed cold state b/c AWS opensearch uses cold_delete and non managed uses delete only

* copied kibana_dashboard_import into opensearch_dashboards_import

* updated import_dashboards for opensearch

added mozart_redis_host in fabfile.py
separating the kibana/opensearch logic in the update kibana function

* flipped logic around to make elasticseaech the default

* fixed opensearch_dashboards.yml

fixed sdswatch shell script to use logstash-oss-7.16.3

* logstash 7.9.3 -> logstash-oss 7.16.3

* forgot to add sleep in import opensearch dashboard

* remove field in ism

* edited jvm.options

* removed trailing comma

* no kibana in opensearch

* added opensearch support for sds status

* added base path /metrics for dashboards

* fixed opensearch dashboards endpoint

* removed /metrics

* uncommented jvm.options

* removed cold and addtl support for multiple hosts

* small changes

* separated sdswatch for opensearch vs elasticsearch

* fixed clean_indices_from_alias.py

* move ism/ilm polivies inside if/else

* use hsysds es_util to get ES status

* call ES core to call ping function

* don't use host when calling es functions

---------

Co-authored-by: dustinlo <[email protected]>
Co-authored-by: Mike Cayanan <[email protected]>
  • Loading branch information
3 people authored Oct 31, 2023
1 parent fc18a74 commit 5af3097
Show file tree
Hide file tree
Showing 20 changed files with 4,821 additions and 1,049 deletions.
2 changes: 1 addition & 1 deletion sdscli/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,6 @@
from __future__ import division
from __future__ import absolute_import

__version__ = "1.2.3"
__version__ = "1.3.0"
__url__ = "https://github.com/sdskit/sdscli"
__description__ = "Command line interface for SDSKit"
83 changes: 50 additions & 33 deletions sdscli/adapters/hysds/fabfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,29 +32,32 @@
repo_re = re.compile(r'.+//.*?/(.*?)/(.*?)(?:\.git)?$')

# define private EC2 IP addresses for infrastructure hosts
context = {}
this_dir = os.path.dirname(os.path.abspath(__file__))
sds_cfg = get_user_config_path()
if not os.path.isfile(sds_cfg):
raise RuntimeError(
"SDS configuration file doesn't exist. Run 'sds configure'.")

with open(sds_cfg) as f:
context = yaml.load(f, Loader=yaml.FullLoader)

# define and build groups to reduce redundancy in defining roles

# mozart hosts
mozart_es_engine = context.get("MOZART_ES_ENGINE", "elasticsearch")
mozart_host = '%s' % context['MOZART_PVT_IP']
mozart_rabbit_host = '%s' % context['MOZART_RABBIT_PVT_IP']
mozart_redis_host = '%s' % context['MOZART_REDIS_PVT_IP']
mozart_es_host = '%s' % context['MOZART_ES_PVT_IP']

# metrics host
metrics_es_engine = context.get("METRICS_ES_ENGINE", "elasticsearch")
metrics_host = '%s' % context['METRICS_PVT_IP']
metrics_redis_host = '%s' % context['METRICS_REDIS_PVT_IP']
metrics_es_host = '%s' % context['METRICS_ES_PVT_IP']

# grq host
grq_es_engine = context.get("GRQ_ES_ENGINE", "elasticsearch")
grq_host = '%s' % context['GRQ_PVT_IP']
grq_es_host = '%s' % context['GRQ_ES_PVT_IP']

Expand All @@ -77,12 +80,12 @@
'mozart': [mozart_host],
'mozart-rabbit': [mozart_rabbit_host],
'mozart-redis': [mozart_redis_host],
'mozart-es': [mozart_es_host],
'mozart-es': mozart_es_host if type(mozart_es_host) is list else [mozart_es_host],
'metrics': [metrics_host],
'metrics-redis': [metrics_redis_host],
'metrics-es': [metrics_es_host],
'metrics-es': metrics_es_host if type(metrics_es_host) is list else [metrics_es_host],
'grq': [grq_host],
'grq-es': [grq_es_host],
'grq-es': grq_es_host if type(grq_es_host) is list else [grq_es_host],
'factotum': [factotum_host],
'ci': [ci_host],
'verdi': verdi_hosts,
Expand Down Expand Up @@ -484,17 +487,21 @@ def install_base_es_template():


def install_es_policy():
role, hysds_dir, hostname = resolve_role()

policy_file_name = "es_ilm_policy_mozart.json"
target_file = f"{ops_dir}/mozart/etc/{policy_file_name}"
send_template(
policy_file_name,
target_file
)
# run(f"curl -XPUT 'localhost:9200/_ilm/policy/ilm_policy_mozart?pretty' -H 'Content-Type: application/json' -d@{target_file}")
if mozart_es_engine == "opensearch":
ism_policy_file_name = "opensearch_ism_policy_mozart.json"
ism_target_file = f"{ops_dir}/mozart/etc/{ism_policy_file_name}"
send_template(ism_policy_file_name, ism_target_file)

with cd('~/mozart/ops/hysds/scripts'):
run(f"python install_ilm_policy.py --ism-policy {ism_target_file}")
else:
ilm_policy_file_name = "es_ilm_policy_mozart.json"
ilm_target_file = f"{ops_dir}/mozart/etc/{ilm_policy_file_name}"
send_template(ilm_policy_file_name, ilm_target_file)

with prefix('source %s/bin/activate' % hysds_dir):
run(f'{hysds_dir}/ops/{role}/scripts/install_ilm_policy.sh --policy_file {target_file}')
with cd('~/mozart/ops/hysds/scripts'):
run(f"python install_ilm_policy.py --ilm-policy {ilm_target_file}")


def install_mozart_es_templates():
Expand All @@ -513,13 +520,14 @@ def install_mozart_es_templates():
target_dir = f"{ops_dir}/mozart/etc"
for template in templates:
# Copy templates to etc/ directory
target_path = f"{target_dir}/{template}"
send_template(
template,
target_path
)
with prefix('source %s/bin/activate' % hysds_dir):
run(f"{hysds_dir}/ops/mozart/scripts/install_es_template.sh --install_job_templates --template_dir {target_dir}")
target_path = f"{ops_dir}/mozart/etc/{template}"
send_template(template, target_path)
template_doc_name = template.replace(".template", '')
print(f"Creating ES index template for {template}")
# run(f"curl -XPUT 'localhost:9200/_index_template/{template_doc_name}?pretty' "
# f"-H 'Content-Type: application/json' -d@{target_path}")
with cd('~/mozart/ops/hysds/scripts'):
run(f"python install_job_status_template.py {template_doc_name} {target_path}")


##########################
Expand Down Expand Up @@ -630,14 +638,10 @@ def rabbitmq_queues_flush():
def mozart_es_flush():
ctx = get_context()
#run('curl -XDELETE http://{MOZART_ES_PVT_IP}:9200/_index_template/*_status'.format(**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py http://{MOZART_ES_PVT_IP}:9200 job_status-current'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py http://{MOZART_ES_PVT_IP}:9200 task_status-current'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py http://{MOZART_ES_PVT_IP}:9200 event_status-current'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py http://{MOZART_ES_PVT_IP}:9200 worker_status-current'.format(
**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py job_status-current'.format(**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py task_status-current'.format(**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py event_status-current'.format(**ctx))
run('~/mozart/ops/hysds/scripts/clean_indices_from_alias.py worker_status-current'.format(**ctx))
#run('~/mozart/ops/hysds/scripts/clean_job_spec_container_indexes.sh http://{MOZART_ES_PVT_IP}:9200'.format(**ctx))


Expand Down Expand Up @@ -764,6 +768,7 @@ def python_setup_develop(node_type, dest):
# ci functions
##########################


def get_ci_job_info(repo, branch=None):
ctx = get_context()
match = repo_re.search(repo)
Expand Down Expand Up @@ -859,7 +864,10 @@ def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('sdswatch_client.conf', '~/mozart/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/mozart/bin/run_sdswatch_client.sh")
if mozart_es_engine == "opensearch":
send_template("run_sdswatch_client_opensearch.sh", "~/mozart/bin/run_sdswatch_client.sh")
else:
send_template("run_sdswatch_client.sh", "~/mozart/bin/run_sdswatch_client.sh")
run("chmod 755 ~/mozart/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/mozart/bin/watch_supervisord_services.py")
run("chmod 755 ~/mozart/bin/watch_supervisord_services.py")
Expand All @@ -870,7 +878,10 @@ def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status,
template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
upload_template('sdswatch_client.conf', '~/metrics/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/metrics/bin/run_sdswatch_client.sh")
if metrics_es_engine == "opensearch":
send_template("run_sdswatch_client_opensearch.sh", "~/metrics/bin/run_sdswatch_client.sh")
else:
send_template("run_sdswatch_client.sh", "~/metrics/bin/run_sdswatch_client.sh")
run("chmod 755 ~/metrics/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/metrics/bin/watch_supervisord_services.py")
run("chmod 755 ~/metrics/bin/watch_supervisord_services.py")
Expand All @@ -879,7 +890,10 @@ def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status,
elif node_type == 'grq':
upload_template('sdswatch_client.conf', '~/sciflo/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/sciflo/bin/run_sdswatch_client.sh")
if grq_es_engine == "opensearch":
send_template("run_sdswatch_client_opensearch.sh", "~/sciflo/bin/run_sdswatch_client.sh")
else:
send_template("run_sdswatch_client.sh", "~/sciflo/bin/run_sdswatch_client.sh")
run("chmod 755 ~/sciflo/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/sciflo/bin/watch_supervisord_services.py")
run("chmod 755 ~/sciflo/bin/watch_supervisord_services.py")
Expand All @@ -888,7 +902,10 @@ def send_shipper_conf(node_type, log_dir, cluster_jobs, redis_ip_job_status,
elif node_type in ('verdi', 'verdi-asg', 'factotum'):
upload_template('sdswatch_client.conf', '~/verdi/etc/sdswatch_client.conf', use_jinja=True,
context=ctx, template_dir=os.path.join(ops_dir, 'mozart/ops/hysds/configs/logstash'))
send_template("run_sdswatch_client.sh", "~/verdi/bin/run_sdswatch_client.sh")
if metrics_es_engine == "opensearch":
send_template("run_sdswatch_client_opensearch.sh", "~/verdi/bin/run_sdswatch_client.sh")
else:
send_template("run_sdswatch_client.sh", "~/verdi/bin/run_sdswatch_client.sh")
run("chmod 755 ~/verdi/bin/run_sdswatch_client.sh")
send_template("watch_supervisord_services.py", "~/verdi/bin/watch_supervisord_services.py")
run("chmod 755 ~/verdi/bin/watch_supervisord_services.py")
Expand Down
14 changes: 1 addition & 13 deletions sdscli/adapters/hysds/files/es_ilm_policy_mozart.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
}
},
"warm": {
"min_age": "90d",
"min_age": "97d",
"actions": {
"migrate": {
"enabled": false
Expand All @@ -20,18 +20,6 @@
}
}
},
"cold": {
"min_age": "97d",
"actions": {
"set_priority" : {
"priority": 0
},
"migrate": {
"enabled": false
},
"freeze": {}
}
},
"delete": {
"min_age": "104d",
"actions": {
Expand Down
5 changes: 1 addition & 4 deletions sdscli/adapters/hysds/files/event_status.template
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,7 @@
"settings": {
"number_of_shards": 8,
"index": {
"refresh_interval": "5s",
"lifecycle": {
"name": "ilm_policy_mozart"
}
"refresh_interval": "5s"
},
"analysis": {
"analyzer": {
Expand Down
8 changes: 4 additions & 4 deletions sdscli/adapters/hysds/files/install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,10 @@ if [ ! -z "$CONTAINER_REGISTRY" -a ! -z "$CONTAINER_REGISTRY_BUCKET" ]; then
fi

# Start up SDSWatch client
export LOGSTASH_IMAGE="s3://{{ CODE_BUCKET }}/logstash-7.9.3.tar.gz"
export LOGSTASH_IMAGE="s3://{{ CODE_BUCKET }}/logstash-oss-7.16.3.tar.gz"
export LOGSTASH_IMAGE_BASENAME="$(basename $LOGSTASH_IMAGE 2>/dev/null)"
if [ -z "$(docker images -q logstash:7.9.3)" ]; then
rm -rf /tmp/logstash-7.9.3.tar.gz
if [ -z "$(docker images -q logstash-oss:7.16.3)" ]; then
rm -rf /tmp/logstash-oss-7.16.3.tar.gz
aws s3 cp ${LOGSTASH_IMAGE} /tmp/${LOGSTASH_IMAGE_BASENAME}
docker load -i /tmp/${LOGSTASH_IMAGE_BASENAME}
else
Expand All @@ -89,7 +89,7 @@ docker run -e HOST=${FQDN} -v /data/work/jobs:/sdswatch/jobs \
-v $HOME/verdi/log:/sdswatch/log \
-v sdswatch_data:/usr/share/logstash/data \
-v $HOME/verdi/etc/sdswatch_client.conf:/usr/share/logstash/config/conf/logstash.conf \
--name=sdswatch-client -d logstash:7.9.3 \
--name=sdswatch-client -d logstash-oss:7.16.3 \
logstash -f /usr/share/logstash/config/conf/logstash.conf --config.reload.automatic

# Load verdi docker image
Expand Down
Loading

0 comments on commit 5af3097

Please sign in to comment.