diff --git a/awx/api/serializers.py b/awx/api/serializers.py index 32d75f36975b..33f9e79db991 100644 --- a/awx/api/serializers.py +++ b/awx/api/serializers.py @@ -3008,7 +3008,7 @@ class Meta: fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode', - 'allow_simultaneous', 'custom_virtualenv', 'job_split_count') + 'allow_simultaneous', 'custom_virtualenv', 'job_slice_count') def get_related(self, obj): res = super(JobTemplateSerializer, self).get_related(obj) @@ -3025,7 +3025,7 @@ def get_related(self, obj): labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}), object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}), instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}), - split_jobs = self.reverse('api:job_template_split_jobs_list', kwargs={'pk': obj.pk}), + slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}), )) if self.version > 1: res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk}) @@ -3121,7 +3121,7 @@ class Meta: 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch', 'allow_simultaneous', 'artifacts', 'scm_revision', - 'instance_group', 'diff_mode') + 'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count') def get_related(self, obj): res = super(JobSerializer, self).get_related(obj) @@ -3199,13 +3199,6 @@ def to_representation(self, obj): def get_summary_fields(self, obj): summary_fields = super(JobSerializer, self).get_summary_fields(obj) - if obj.internal_limit: - summary_fields['internal_limit'] = {} - if obj.internal_limit.startswith('split'): - offset, step = Inventory.parse_split_params(obj.internal_limit) - summary_fields['internal_limit']['split'] = {'offset': offset, 'step': step} - else: - summary_fields['internal_limit']['unknown'] = self.internal_limit all_creds = [] # Organize credential data into multitude of deprecated fields # TODO: remove most of this as v1 is removed diff --git a/awx/api/templates/api/inventory_script_view.md b/awx/api/templates/api/inventory_script_view.md index dbbe5eb0c0fd..28126dcbbb6c 100644 --- a/awx/api/templates/api/inventory_script_view.md +++ b/awx/api/templates/api/inventory_script_view.md @@ -26,8 +26,8 @@ string of `?all=1` to return all hosts, including disabled ones. Specify a query string of `?towervars=1` to add variables to the hostvars of each host that specifies its enabled state and database ID. -Specify a query string of `?subset=split2of5` to produce an inventory that -has a restricted number of hosts according to the rules of job splitting. +Specify a query string of `?subset=slice2of5` to produce an inventory that +has a restricted number of hosts according to the rules of job slicing. To apply multiple query strings, join them with the `&` character, like `?hostvars=1&all=1`. diff --git a/awx/api/urls/job_template.py b/awx/api/urls/job_template.py index 4d6ca79137ea..0b43575ba4a8 100644 --- a/awx/api/urls/job_template.py +++ b/awx/api/urls/job_template.py @@ -8,7 +8,7 @@ JobTemplateDetail, JobTemplateLaunch, JobTemplateJobsList, - JobTemplateSplitJobsList, + JobTemplateSliceWorkflowJobsList, JobTemplateCallback, JobTemplateSchedulesList, JobTemplateSurveySpec, @@ -29,7 +29,7 @@ url(r'^(?P[0-9]+)/$', JobTemplateDetail.as_view(), name='job_template_detail'), url(r'^(?P[0-9]+)/launch/$', JobTemplateLaunch.as_view(), name='job_template_launch'), url(r'^(?P[0-9]+)/jobs/$', JobTemplateJobsList.as_view(), name='job_template_jobs_list'), - url(r'^(?P[0-9]+)/split_jobs/$', JobTemplateSplitJobsList.as_view(), name='job_template_split_jobs_list'), + url(r'^(?P[0-9]+)/slice_workflow_jobs/$', JobTemplateSliceWorkflowJobsList.as_view(), name='job_template_slice_workflow_jobs_list'), url(r'^(?P[0-9]+)/callback/$', JobTemplateCallback.as_view(), name='job_template_callback'), url(r'^(?P[0-9]+)/schedules/$', JobTemplateSchedulesList.as_view(), name='job_template_schedules_list'), url(r'^(?P[0-9]+)/survey_spec/$', JobTemplateSurveySpec.as_view(), name='job_template_survey_spec'), diff --git a/awx/api/views/__init__.py b/awx/api/views/__init__.py index 0a7cc8146d0a..71250e6a0bff 100644 --- a/awx/api/views/__init__.py +++ b/awx/api/views/__init__.py @@ -2453,6 +2453,15 @@ def retrieve(self, request, *args, **kwargs): towervars = bool(request.query_params.get('towervars', '')) show_all = bool(request.query_params.get('all', '')) subset = request.query_params.get('subset', '') + if subset: + if not isinstance(subset, six.string_types): + raise ParseError(_('Inventory subset argument must be a string.')) + if subset.startswith('slice'): + slice_number, slice_count = Inventory.parse_slice_params(subset) + else: + raise ParseError(_('Subset does not use any supported syntax.')) + else: + slice_number, slice_count = 1, 1 if hostname: hosts_q = dict(name=hostname) if not show_all: @@ -2463,7 +2472,7 @@ def retrieve(self, request, *args, **kwargs): hostvars=hostvars, towervars=towervars, show_all=show_all, - subset=subset + slice_number=slice_number, slice_count=slice_count )) @@ -3369,7 +3378,7 @@ def post(self, request, *args, **kwargs): if extra_vars is not None and job_template.ask_variables_on_launch: extra_vars_redacted, removed = extract_ansible_vars(extra_vars) kv['extra_vars'] = extra_vars_redacted - kv['_prevent_splitting'] = True # will only run against 1 host, so no point + kv['_prevent_slicing'] = True # will only run against 1 host, so no point with transaction.atomic(): job = job_template.create_job(**kv) @@ -3401,12 +3410,12 @@ def allowed_methods(self): return methods -class JobTemplateSplitJobsList(SubListCreateAPIView): +class JobTemplateSliceWorkflowJobsList(SubListCreateAPIView): model = WorkflowJob serializer_class = WorkflowJobListSerializer parent_model = JobTemplate - relationship = 'split_jobs' + relationship = 'slice_workflow_jobs' parent_key = 'job_template' @@ -3702,6 +3711,8 @@ def get(self, request, *args, **kwargs): def post(self, request, *args, **kwargs): obj = self.get_object() + if obj.is_sliced_job and not obj.job_template_id: + raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.')) new_workflow_job = obj.create_relaunch_workflow_job() new_workflow_job.signal_start() diff --git a/awx/main/migrations/0050_v330_job_slicing.py b/awx/main/migrations/0050_v330_job_slicing.py new file mode 100644 index 000000000000..c786f455db6b --- /dev/null +++ b/awx/main/migrations/0050_v330_job_slicing.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.11 on 2018-10-15 16:21 +from __future__ import unicode_literals + +import awx.main.utils.polymorphic +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('main', '0049_v330_validate_instance_capacity_adjustment'), + ] + + operations = [ + migrations.AddField( + model_name='job', + name='job_slice_count', + field=models.PositiveIntegerField(blank=True, default=1, help_text='If ran as part of sliced jobs, the total number of slices. If 1, job is not part of a sliced job.'), + ), + migrations.AddField( + model_name='job', + name='job_slice_number', + field=models.PositiveIntegerField(blank=True, default=0, help_text='If part of a sliced job, the ID of the inventory slice operated on. If not part of sliced job, parameter is not used.'), + ), + migrations.AddField( + model_name='jobtemplate', + name='job_slice_count', + field=models.PositiveIntegerField(blank=True, default=1, help_text='The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.'), + ), + migrations.AddField( + model_name='workflowjob', + name='is_sliced_job', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='workflowjob', + name='job_template', + field=models.ForeignKey(blank=True, default=None, help_text='If automatically created for a sliced job run, the job template the workflow job was created from.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='slice_workflow_jobs', to='main.JobTemplate'), + ), + migrations.AlterField( + model_name='unifiedjob', + name='unified_job_template', + field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjob_unified_jobs', to='main.UnifiedJobTemplate'), + ), + ] diff --git a/awx/main/migrations/0050_v340_split_jobs.py b/awx/main/migrations/0050_v340_split_jobs.py deleted file mode 100644 index d7b80ee50cb3..000000000000 --- a/awx/main/migrations/0050_v340_split_jobs.py +++ /dev/null @@ -1,37 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by Django 1.11.11 on 2018-09-13 15:55 -from __future__ import unicode_literals - -import awx.main.utils.polymorphic -from django.db import migrations, models -import django.db.models.deletion - - -class Migration(migrations.Migration): - - dependencies = [ - ('main', '0049_v330_validate_instance_capacity_adjustment'), - ] - - operations = [ - migrations.AddField( - model_name='jobtemplate', - name='job_split_count', - field=models.IntegerField(blank=True, default=0, help_text='The number of jobs to split into at runtime. Will cause the Job Template to launch a workflow if value is non-zero.'), - ), - migrations.AddField( - model_name='workflowjob', - name='job_template', - field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='split_jobs', to='main.JobTemplate'), - ), - migrations.AlterField( - model_name='unifiedjob', - name='unified_job_template', - field=models.ForeignKey(default=None, editable=False, null=True, on_delete=awx.main.utils.polymorphic.SET_NULL, related_name='unifiedjob_unified_jobs', to='main.UnifiedJobTemplate'), - ), - migrations.AddField( - model_name='job', - name='internal_limit', - field=models.CharField(default=b'', editable=False, max_length=1024), - ), - ] diff --git a/awx/main/models/inventory.py b/awx/main/models/inventory.py index 5c8cdebfa819..805e4eb1f454 100644 --- a/awx/main/models/inventory.py +++ b/awx/main/models/inventory.py @@ -221,17 +221,19 @@ def get_group_children_map(self): return group_children_map @staticmethod - def parse_split_params(split_str): - m = re.match(r"split(?P\d+)of(?P\d+)", split_str) + def parse_slice_params(slice_str): + m = re.match(r"slice(?P\d+)of(?P\d+)", slice_str) if not m: - raise ParseError(_('Could not parse subset as split specification.')) - offset = int(m.group('offset')) + raise ParseError(_('Could not parse subset as slice specification.')) + number = int(m.group('number')) step = int(m.group('step')) - if offset > step: - raise ParseError(_('Split offset must be greater than total number of splits.')) - return (offset, step) + if number > step: + raise ParseError(_('Slice number must be less than total number of slices.')) + elif number < 1: + raise ParseError(_('Slice number must be 1 or higher.')) + return (number, step) - def get_script_data(self, hostvars=False, towervars=False, show_all=False, subset=None): + def get_script_data(self, hostvars=False, towervars=False, show_all=False, slice_number=1, slice_count=1): hosts_kw = dict() if not show_all: hosts_kw['enabled'] = True @@ -239,14 +241,9 @@ def get_script_data(self, hostvars=False, towervars=False, show_all=False, subse if towervars: fetch_fields.append('enabled') hosts = self.hosts.filter(**hosts_kw).order_by('name').only(*fetch_fields) - if subset: - if not isinstance(subset, six.string_types): - raise ParseError(_('Inventory subset argument must be a string.')) - if subset.startswith('split'): - offset, step = Inventory.parse_split_params(subset) - hosts = hosts[offset::step] - else: - raise ParseError(_('Subset does not use any supported syntax.')) + if slice_count > 1: + offset = slice_number - 1 + hosts = hosts[offset::slice_count] data = dict() all_group = data.setdefault('all', dict()) diff --git a/awx/main/models/jobs.py b/awx/main/models/jobs.py index dd2197c573fe..78b5ead7b212 100644 --- a/awx/main/models/jobs.py +++ b/awx/main/models/jobs.py @@ -277,11 +277,11 @@ class Meta: default=False, allows_field='credentials' ) - job_split_count = models.IntegerField( + job_slice_count = models.PositiveIntegerField( blank=True, - default=0, - help_text=_("The number of jobs to split into at runtime. " - "Will cause the Job Template to launch a workflow if value is non-zero."), + default=1, + help_text=_("The number of jobs to slice into at runtime. " + "Will cause the Job Template to launch a workflow if value is greater than 1."), ) admin_role = ImplicitRoleField( @@ -302,7 +302,8 @@ def _get_unified_job_class(cls): @classmethod def _get_unified_job_field_names(cls): return set(f.name for f in JobOptions._meta.fields) | set( - ['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials', 'internal_limit'] + ['name', 'description', 'schedule', 'survey_passwords', 'labels', 'credentials', + 'job_slice_number', 'job_slice_count'] ) @property @@ -328,13 +329,15 @@ def create_job(self, **kwargs): return self.create_unified_job(**kwargs) def create_unified_job(self, **kwargs): - prevent_splitting = kwargs.pop('_prevent_splitting', False) - split_event = bool(self.job_split_count > 1 and (not prevent_splitting)) + prevent_splitting = kwargs.pop('_prevent_slicing', False) + split_event = bool(self.job_slice_count > 1 and (not prevent_splitting)) if split_event: # A Split Job Template will generate a WorkflowJob rather than a Job from awx.main.models.workflow import WorkflowJobTemplate, WorkflowJobNode kwargs['_unified_job_class'] = WorkflowJobTemplate._get_unified_job_class() kwargs['_parent_field_name'] = "job_template" + kwargs.setdefault('_eager_fields', {}) + kwargs['_eager_fields']['is_sliced_job'] = True job = super(JobTemplate, self).create_unified_job(**kwargs) if split_event: try: @@ -342,11 +345,11 @@ def create_unified_job(self, **kwargs): except JobLaunchConfig.DoesNotExist: wj_config = JobLaunchConfig() actual_inventory = wj_config.inventory if wj_config.inventory else self.inventory - for idx in xrange(min(self.job_split_count, + for idx in xrange(min(self.job_slice_count, actual_inventory.hosts.count())): create_kwargs = dict(workflow_job=job, unified_job_template=self, - ancestor_artifacts=dict(job_split=idx)) + ancestor_artifacts=dict(job_split=idx + 1)) WorkflowJobNode.objects.create(**create_kwargs) return job @@ -531,10 +534,17 @@ class Meta: on_delete=models.SET_NULL, help_text=_('The SCM Refresh task used to make sure the playbooks were available for the job run'), ) - internal_limit = models.CharField( - max_length=1024, - default='', - editable=False, + job_slice_number = models.PositiveIntegerField( + blank=True, + default=0, + help_text=_("If part of a sliced job, the ID of the inventory slice operated on. " + "If not part of sliced job, parameter is not used."), + ) + job_slice_count = models.PositiveIntegerField( + blank=True, + default=1, + help_text=_("If ran as part of sliced jobs, the total number of slices. " + "If 1, job is not part of a sliced job."), ) @@ -580,10 +590,11 @@ def event_class(self): return JobEvent def copy_unified_job(self, **new_prompts): - new_prompts['_prevent_splitting'] = True - if self.internal_limit: - new_prompts.setdefault('_eager_fields', {}) - new_prompts['_eager_fields']['internal_limit'] = self.internal_limit # oddball, not from JT or prompts + # Needed for job slice relaunch consistency, do no re-spawn workflow job + # target same slice as original job + new_prompts['_prevent_slicing'] = True + new_prompts.setdefault('_eager_fields', {}) + new_prompts['_eager_fields']['job_slice_number'] = self.job_slice_number return super(Job, self).copy_unified_job(**new_prompts) @property diff --git a/awx/main/models/workflow.py b/awx/main/models/workflow.py index 7aa2753c6d99..e5e816d64ee9 100644 --- a/awx/main/models/workflow.py +++ b/awx/main/models/workflow.py @@ -219,11 +219,13 @@ def get_job_kwargs(self): data.update(accepted_fields) # build ancestor artifacts, save them to node model for later aa_dict = {} + is_root_node = True for parent_node in self.get_parent_nodes(): + is_root_node = False aa_dict.update(parent_node.ancestor_artifacts) if parent_node.job and hasattr(parent_node.job, 'artifacts'): aa_dict.update(parent_node.job.artifacts) - if aa_dict: + if aa_dict and not is_root_node: self.ancestor_artifacts = aa_dict self.save(update_fields=['ancestor_artifacts']) # process password list @@ -252,18 +254,12 @@ def get_job_kwargs(self): # ensure that unified jobs created by WorkflowJobs are marked data['_eager_fields'] = {'launch_type': 'workflow'} # Extra processing in the case that this is a split job - if 'job_split' in self.ancestor_artifacts: + if 'job_split' in self.ancestor_artifacts and is_root_node: split_str = six.text_type(self.ancestor_artifacts['job_split'] + 1) - data['_eager_fields']['name'] = six.text_type("{} - {}").format( - self.unified_job_template.name[:512 - len(split_str) - len(' - ')], - split_str - ) data['_eager_fields']['allow_simultaneous'] = True - data['_eager_fields']['internal_limit'] = 'split{0}of{1}'.format( - self.ancestor_artifacts['job_split'], - self.workflow_job.workflow_job_nodes.count() - ) - data['_prevent_splitting'] = True + data['_eager_fields']['job_slice_number'] = self.ancestor_artifacts['job_split'] + data['_eager_fields']['job_slice_count'] = self.workflow_job.workflow_job_nodes.count() + data['_prevent_slicing'] = True return data @@ -459,11 +455,16 @@ class Meta: ) job_template = models.ForeignKey( 'JobTemplate', - related_name='split_jobs', + related_name='slice_workflow_jobs', blank=True, null=True, default=None, on_delete=models.SET_NULL, + help_text=_("If automatically created for a sliced job run, the job template " + "the workflow job was created from."), + ) + is_sliced_job = models.BooleanField( + default=False ) @property diff --git a/awx/main/tasks.py b/awx/main/tasks.py index 2c543d88d3c7..f45bcd176b90 100644 --- a/awx/main/tasks.py +++ b/awx/main/tasks.py @@ -826,7 +826,8 @@ def should_use_proot(self, instance, **kwargs): def build_inventory(self, instance, **kwargs): script_data = instance.inventory.get_script_data( - hostvars=True, subset=getattr(instance, 'internal_limit', '') + hostvars=True, + slice_number=instance.job_slice_number, slice_count=instance.job_slice_count ) json_data = json.dumps(script_data) handle, path = tempfile.mkstemp(dir=kwargs.get('private_data_dir', None)) diff --git a/awx/main/tests/functional/api/test_job.py b/awx/main/tests/functional/api/test_job.py index 40b17bc3685a..0e735eccb019 100644 --- a/awx/main/tests/functional/api/test_job.py +++ b/awx/main/tests/functional/api/test_job.py @@ -123,11 +123,11 @@ def test_job_relaunch_on_failed_hosts(post, inventory, project, machine_credenti @pytest.mark.django_db -def test_split_jt_recent_jobs(split_job_factory, admin_user, get): - workflow_job = split_job_factory(3, spawn=True) - split_jt = workflow_job.job_template +def test_slice_jt_recent_jobs(slice_job_factory, admin_user, get): + workflow_job = slice_job_factory(3, spawn=True) + slice_jt = workflow_job.job_template r = get( - url=split_jt.get_absolute_url(), + url=slice_jt.get_absolute_url(), user=admin_user, expect=200 ) diff --git a/awx/main/tests/functional/conftest.py b/awx/main/tests/functional/conftest.py index 6e0aa11f6cad..1b90225adb45 100644 --- a/awx/main/tests/functional/conftest.py +++ b/awx/main/tests/functional/conftest.py @@ -779,15 +779,15 @@ def disable_database_settings(mocker): @pytest.fixture -def split_jt_factory(inventory): +def slice_jt_factory(inventory): def r(N, jt_kwargs=None): for i in range(N): inventory.hosts.create(name='foo{}'.format(i)) if not jt_kwargs: jt_kwargs = {} return JobTemplate.objects.create( - name='split-jt-from-factory', - job_split_count=N, + name='slice-jt-from-factory', + job_slice_count=N, inventory=inventory, **jt_kwargs ) @@ -795,18 +795,18 @@ def r(N, jt_kwargs=None): @pytest.fixture -def split_job_factory(split_jt_factory): +def slice_job_factory(slice_jt_factory): def r(N, jt_kwargs=None, prompts=None, spawn=False): - split_jt = split_jt_factory(N, jt_kwargs=jt_kwargs) + slice_jt = slice_jt_factory(N, jt_kwargs=jt_kwargs) if not prompts: prompts = {} - split_job = split_jt.create_unified_job(**prompts) + slice_job = slice_jt.create_unified_job(**prompts) if spawn: - for node in split_job.workflow_nodes.all(): + for node in slice_job.workflow_nodes.all(): # does what the task manager does for spawning workflow jobs kv = node.get_job_kwargs() job = node.unified_job_template.create_unified_job(**kv) node.job = job node.save() - return split_job + return slice_job return r diff --git a/awx/main/tests/functional/models/test_inventory.py b/awx/main/tests/functional/models/test_inventory.py index 7b1574a56d24..e11a4f926cdb 100644 --- a/awx/main/tests/functional/models/test_inventory.py +++ b/awx/main/tests/functional/models/test_inventory.py @@ -38,11 +38,11 @@ def test_towervars(self, inventory): 'remote_tower_id': host.id } - def test_split_subset(self, inventory): + def test_slice_subset(self, inventory): for i in range(3): inventory.hosts.create(name='host{}'.format(i)) for i in range(3): - assert inventory.get_script_data(subset='split{}of3'.format(i)) == { + assert inventory.get_script_data(slice_number=i + 1, slice_count=3) == { 'all': {'hosts': ['host{}'.format(i)]} } diff --git a/awx/main/tests/functional/models/test_job.py b/awx/main/tests/functional/models/test_job.py index 16098d6fd12e..e64acafd2b0c 100644 --- a/awx/main/tests/functional/models/test_job.py +++ b/awx/main/tests/functional/models/test_job.py @@ -84,18 +84,18 @@ def test_job_host_summary_representation(host): @pytest.mark.django_db -class TestSplittingModels: +class TestSlicingModels: - def test_split_workflow_spawn(self, split_jt_factory): - split_jt = split_jt_factory(3) - job = split_jt.create_unified_job() + def test_slice_workflow_spawn(self, slice_jt_factory): + slice_jt = slice_jt_factory(3) + job = slice_jt.create_unified_job() assert isinstance(job, WorkflowJob) - assert job.job_template == split_jt - assert job.unified_job_template == split_jt + assert job.job_template == slice_jt + assert job.unified_job_template == slice_jt assert job.workflow_nodes.count() == 3 - def test_splits_with_JT_and_prompts(self, split_job_factory): - job = split_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True) + def test_slices_with_JT_and_prompts(self, slice_job_factory): + job = slice_job_factory(3, jt_kwargs={'ask_limit_on_launch': True}, prompts={'limit': 'foobar'}, spawn=True) assert job.launch_config.prompts_dict() == {'limit': 'foobar'} for node in job.workflow_nodes.all(): assert node.limit is None # data not saved in node prompts diff --git a/awx/main/tests/functional/test_rbac_job_start.py b/awx/main/tests/functional/test_rbac_job_start.py index f20611da14a8..6748b3df5de5 100644 --- a/awx/main/tests/functional/test_rbac_job_start.py +++ b/awx/main/tests/functional/test_rbac_job_start.py @@ -49,18 +49,18 @@ def test_inventory_use_access(inventory, user): @pytest.mark.django_db -def test_split_job(split_job_factory, rando): - workflow_job = split_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True) +def test_slice_job(slice_job_factory, rando): + workflow_job = slice_job_factory(2, jt_kwargs={'created_by': rando}, spawn=True) workflow_job.job_template.execute_role.members.add(rando) - # Abilities of user with execute_role for split workflow job container + # Abilities of user with execute_role for slice workflow job container assert WorkflowJobAccess(rando).can_start(workflow_job) # relaunch allowed for access_cls in (UnifiedJobAccess, WorkflowJobAccess): access = access_cls(rando) assert access.can_read(workflow_job) assert workflow_job in access.get_queryset() - # Abilities of user with execute_role for all the split of the job + # Abilities of user with execute_role for all the slice of the job for node in workflow_job.workflow_nodes.all(): access = WorkflowJobNodeAccess(rando) assert access.can_read(node) diff --git a/awx/ui/client/features/jobs/jobsList.controller.js b/awx/ui/client/features/jobs/jobsList.controller.js index ac4bdfa1a9a1..cd1b9e1b9637 100644 --- a/awx/ui/client/features/jobs/jobsList.controller.js +++ b/awx/ui/client/features/jobs/jobsList.controller.js @@ -76,20 +76,12 @@ function ListJobsController ( return { icon, link, value }; }); - vm.getSplitJobDetails = (details) => { - const internalLimitDetails = Object.assign({}, details); - - if (!internalLimitDetails) { - return null; - } - - const splitJobDetails = internalLimitDetails.split; - - if (!splitJobDetails) { + vm.getSliceJobDetails = (job) => { + if (job.job_slice_count === 1) { return null; } - return `Split Job ${splitJobDetails.offset + 1}/${splitJobDetails.step}`; + return `Slice Job ${job.job_slice_number}/${job.job_slice_count}`; }; vm.getSref = ({ type, id }) => { diff --git a/awx/ui/client/features/jobs/jobsList.view.html b/awx/ui/client/features/jobs/jobsList.view.html index 8124b6ccf1ab..783a734024ef 100644 --- a/awx/ui/client/features/jobs/jobsList.view.html +++ b/awx/ui/client/features/jobs/jobsList.view.html @@ -24,7 +24,7 @@ header-value="{{ job.id }} - {{ job.name }}" header-state="{{ vm.getSref(job) }}" header-tag="{{ vm.jobTypes[job.type] }}" - secondary-tag="{{ vm.getSplitJobDetails(job.summary_fields.internal_limit) }}"> + secondary-tag="{{ vm.getSliceJobDetails(job) }}">
{{ vm.jobType.value }}
- -
- -
{{ vm.splitJobDetails.offset }}
+ +
+ +
{{ vm.sliceJobDetails.offset }}
diff --git a/awx/ui/client/features/output/output.strings.js b/awx/ui/client/features/output/output.strings.js index 9e2da6d98379..53430984b851 100644 --- a/awx/ui/client/features/output/output.strings.js +++ b/awx/ui/client/features/output/output.strings.js @@ -23,7 +23,7 @@ function OutputStrings (BaseString) { EXTRA_VARS: t.s('Read-only view of extra variables added to the job template'), INVENTORY: t.s('View the Inventory'), JOB_TEMPLATE: t.s('View the Job Template'), - SPLIT_JOB_DETAILS: t.s('Job is one of several from a JT that splits on inventory'), + SLICE_JOB_DETAILS: t.s('Job is one of several from a JT that slices on inventory'), PROJECT: t.s('View the Project'), PROJECT_UPDATE: t.s('View Project checkout results'), SCHEDULE: t.s('View the Schedule'), @@ -56,7 +56,7 @@ function OutputStrings (BaseString) { JOB_EXPLANATION: t.s('Explanation'), JOB_TAGS: t.s('Job Tags'), JOB_TEMPLATE: t.s('Job Template'), - SPLIT_JOB: t.s('Split Job'), + SLICE_JOB: t.s('Slice Job'), JOB_TYPE: t.s('Job Type'), LABELS: t.s('Labels'), LAUNCHED_BY: t.s('Launched By'), diff --git a/awx/ui/client/lib/components/launchTemplateButton/launchTemplateButton.component.js b/awx/ui/client/lib/components/launchTemplateButton/launchTemplateButton.component.js index 7a24be1711d9..5ff065c0c35b 100644 --- a/awx/ui/client/lib/components/launchTemplateButton/launchTemplateButton.component.js +++ b/awx/ui/client/lib/components/launchTemplateButton/launchTemplateButton.component.js @@ -41,7 +41,7 @@ function atLaunchTemplateCtrl ( selectedJobTemplate .postLaunch({ id: vm.template.id }) .then(({ data }) => { - /* Split Jobs: Redirect to WF Details page if returned + /* Slice Jobs: Redirect to WF Details page if returned job type is a WF job */ if (data.type === 'workflow_job' && data.workflow_job !== null) { $state.go('workflowResults', { id: data.workflow_job }, { reload: true }); diff --git a/awx/ui/client/src/templates/job_templates/job-template.form.js b/awx/ui/client/src/templates/job_templates/job-template.form.js index b807bd169d08..da3454ef609b 100644 --- a/awx/ui/client/src/templates/job_templates/job-template.form.js +++ b/awx/ui/client/src/templates/job_templates/job-template.form.js @@ -257,17 +257,17 @@ function(NotificationsList, i18n) { dataPlacement: 'right', control: '', }, - job_split_count: { - label: i18n._('Job Splitting'), + job_slice_count: { + label: i18n._('Job Slicing'), type: 'number', integer: true, min: 1, default: 1, spinner: true, - dataTitle: i18n._('Split Job Count'), + dataTitle: i18n._('Slice Job Count'), dataPlacement: 'right', dataContainer: 'body', - awPopOver: "

" + i18n._("The number of jobs to split into at runtime. Will cause the Job Template to launch a workflow if value is non-zero.") + "

", + awPopOver: "

" + i18n._("The number of jobs to slice into at runtime. Will cause the Job Template to launch a workflow if value is greater than 1.") + "

", ngDisabled: '!(job_template_obj.summary_fields.user_capabilities.edit || canAddJobTemplate)' }, diff_mode: { diff --git a/awx/ui/client/src/workflow-results/workflow-results.controller.js b/awx/ui/client/src/workflow-results/workflow-results.controller.js index b6556767cc6c..0477f4ca7031 100644 --- a/awx/ui/client/src/workflow-results/workflow-results.controller.js +++ b/awx/ui/client/src/workflow-results/workflow-results.controller.js @@ -39,7 +39,7 @@ export default ['workflowData', 'workflowResultsService', 'workflowDataOptions', DELETE: i18n._('Delete'), EDIT_USER: i18n._('Edit the user'), EDIT_WORKFLOW: i18n._('Edit the workflow job template'), - EDIT_SPLIT_TEMPLATE: i18n._('Edit the split job template'), + EDIT_SLICE_TEMPLATE: i18n._('Edit the slice job template'), EDIT_SCHEDULE: i18n._('Edit the schedule'), TOGGLE_STDOUT_FULLSCREEN: i18n._('Expand Output'), STATUS: '' // re-assigned elsewhere @@ -50,7 +50,7 @@ export default ['workflowData', 'workflowResultsService', 'workflowDataOptions', STARTED: i18n._('Started'), FINISHED: i18n._('Finished'), LABELS: i18n._('Labels'), - SPLIT_TEMPLATE: i18n._('Split Template'), + SLICE_TEMPLATE: i18n._('Slice Job Template'), STATUS: i18n._('Status') }, details: { diff --git a/awx/ui/client/src/workflow-results/workflow-results.partial.html b/awx/ui/client/src/workflow-results/workflow-results.partial.html index c88aa6a00d16..474fac1d6e1a 100644 --- a/awx/ui/client/src/workflow-results/workflow-results.partial.html +++ b/awx/ui/client/src/workflow-results/workflow-results.partial.html @@ -144,16 +144,16 @@
- +
- {{ workflow.summary_fields.job_template.name }} diff --git a/docs/job_slicing.md b/docs/job_slicing.md new file mode 100644 index 000000000000..d9234a978e9a --- /dev/null +++ b/docs/job_slicing.md @@ -0,0 +1,13 @@ +# Job Slicing Overview + +Ansible, by default, runs jobs from a single control instance. At best a single Ansible job can be sliced up on a single system via forks but this doesn't fully take advantage of AWX's ability to distribute work to multiple nodes in a cluster. + +Job Slicing solves this by adding a Job Template field `job_slice_count`. This field specifies the number of **Jobs** to slice the Ansible run into. When this number is greater than 1 ``AWX`` will generate a **Workflow** from a **JobTemplate** instead of a **Job**. The **Inventory** will be distributed evenly amongst the slice jobs. The workflow job is then started and proceeds as though it were a normal workflow. The API will return either a **Job** resource (if `job_slice_count` < 2) or a **WorkflowJob** resource otherwise. Likewise, the UI will redirect to the appropriate screen to display the status of the run. + +## Implications for Job execution + +When jobs are sliced they can run on any Tower node and some may not run at the same time. Because of this, anything that relies on setting/sliced state (using modules such as ``set_fact``) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (if there is not enough capacity in the system for example) + +## Simultaneous Execution Behavior + +By default Job Templates aren't normally configured to execute simultaneously (``allow_simultaneous`` must be checked). Slicing overrides this behavior and implies ``allow_simultaneous`` even if that setting is unchecked. diff --git a/docs/job_splitting.md b/docs/job_splitting.md deleted file mode 100644 index a8eb83b05863..000000000000 --- a/docs/job_splitting.md +++ /dev/null @@ -1,13 +0,0 @@ -# Job Splitting Overview - -Ansible, by default, runs jobs from a single control instance. At best a single Ansible job can be split up on a single system via forks but this doesn't fully take advantage of AWX's ability to distribute work to multiple nodes in a cluster. - -Job Splitting solves this by adding a Job Template field `job_split_count`. This field specifies the number of **Jobs** to split the Ansible run into. When this number is greater than 1 ``AWX`` will generate a **Workflow** from a **JobTemplate** instead of a **Job**. The **Inventory** will be split evenly amongst the split jobs. The workflow job is then started and proceeds as though it were a normal workflow. The API will return either a **Job** resource (if `job_split_count` < 2) or a **WorkflowJob** resource otherwise. Likewise, the UI will redirect to the appropriate screen to display the status of the run. - -## Implications for Job execution - -When jobs are split they can run on any Tower node and some may not run at the same time. Because of this, anything that relies on setting/split state (using modules such as ``set_fact``) will not work as expected. It's reasonable to expect that not all jobs will actually run at the same time (if there is not enough capacity in the system for example) - -## Simultaneous Execution Behavior - -By default Job Templates aren't normally configured to execute simultaneously (``allow_simultaneous`` must be checked). Splitting overrides this behavior and implies ``allow_simultaneous`` even if that setting is unchecked.