From 31df0b73e0421ca6819ef0aca99496587c06e402 Mon Sep 17 00:00:00 2001 From: Reza Fazeli Date: Mon, 27 Jan 2025 12:40:19 -0500 Subject: [PATCH] fix: pass environment_id when calling assistant message_stateless (#70) * fix: pass environment id when calling assistant message_stateless endpoint * fix: add ci step to install nltk punkt data * fix: download nltk data another way * fix: small bug * fix: add a step to output pytest error message * fix: more nltk data installs * fix: remove temp error message step --- .github/workflows/ci.yaml | 4 ++++ assistant_skill_analysis/highlighting/highlighter.py | 6 ++++++ assistant_skill_analysis/inferencing/inferencer.py | 11 +++++++++++ assistant_skill_analysis/utils/skills_util.py | 3 +++ new_experience_skill_analysis.ipynb | 10 ++++++++-- 5 files changed, 32 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index e9114d2..ac150d0 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -23,6 +23,10 @@ jobs: pip install pytest pip install -r requirements.txt export PYTHONPATH=`pwd` + - name: Download NLTK data + run: | + python -m nltk.downloader punkt + python -m nltk.downloader punkt_tab - name: style-check run: | black --check assistant_skill_analysis diff --git a/assistant_skill_analysis/highlighting/highlighter.py b/assistant_skill_analysis/highlighting/highlighter.py index e471461..243ded1 100644 --- a/assistant_skill_analysis/highlighting/highlighter.py +++ b/assistant_skill_analysis/highlighting/highlighter.py @@ -26,6 +26,7 @@ def get_highlights_in_batch_multi_thread( skill_id=None, assistant_id=None, intent_to_action_mapping=None, + environment_id=None, ): """ Given the prediction result, rank prediction results from worst to best @@ -39,6 +40,7 @@ def get_highlights_in_batch_multi_thread( :param show_worst_k: the top worst k results based on heuristics :param assistant_id: :param intent_to_action_mapping: + :param environment_id: the environment id :return: """ if isinstance(conversation, ibm_watson.AssistantV1): @@ -68,6 +70,7 @@ def get_highlights_in_batch_multi_thread( skill_id=skill_id, assistant_id=assistant_id, intent_to_action_mapping=intent_to_action_mapping, + environment_id=environment_id, ) if not adversarial_results.empty: @@ -253,6 +256,7 @@ def _adversarial_examples_multi_thread_inference( skill_id=None, assistant_id=None, intent_to_action_mapping=None, + environment_id=None, ): """ Perform multi threaded inference on all the adversarial examples @@ -261,6 +265,7 @@ def _adversarial_examples_multi_thread_inference( :param skill_id: :param assistant_id: :param intent_to_action_mapping: + :param environment_id: """ if isinstance(conversation, ibm_watson.AssistantV1): assert skill_id is not None @@ -299,6 +304,7 @@ def _adversarial_examples_multi_thread_inference( skill_id=skill_id, assistant_id=assistant_id, intent_to_action_mapping=intent_to_action_mapping, + environment_id=environment_id, ) display(Markdown(" ")) return adversarial_results, adversarial_span_dict diff --git a/assistant_skill_analysis/inferencing/inferencer.py b/assistant_skill_analysis/inferencing/inferencer.py index 2d19200..a2173b6 100644 --- a/assistant_skill_analysis/inferencing/inferencer.py +++ b/assistant_skill_analysis/inferencing/inferencer.py @@ -18,6 +18,7 @@ def inference( skill_id=None, intent_to_action_mapping=None, timeout=1, + environment_id=None, ): """ query the message api to generate results on the test data @@ -29,6 +30,7 @@ def inference( :parameter: assistant_id: :parameter: intent_to_action_mapping: :parameter: timeout: integer or float that specifies number of seconds each thread should wait for inference result + :parameter: environment_id: environment id :return result_df: results dataframe """ skd_version = "V1" @@ -54,6 +56,7 @@ def inference( user_id=user_id, assistant_id=assistant_id, skill_id=skill_id, + environment_id=environment_id, ) time.sleep(0.3) @@ -118,6 +121,7 @@ def inference( assistant_id=assistant_id, intent_to_action_mapping=intent_to_action_mapping, timeout=timeout, + environment_id=environment_id, ) return result_df @@ -131,6 +135,7 @@ def thread_inference( skill_id=None, intent_to_action_mapping=None, timeout=1, + environment_id=None, ): """ Perform multi thread inference for faster inference time @@ -143,6 +148,7 @@ def thread_inference( :param assistant_id: :parameter: intent_to_action_mapping: :parameter: timeout: integer or float that specifies number of seconds each thread should wait for inference result + :parameter: environment_id: environment id :return result_df: results dataframe """ if isinstance(conversation, ibm_watson.AssistantV1): @@ -162,6 +168,7 @@ def thread_inference( user_id=user_id, assistant_id=assistant_id, skill_id=skill_id, + environment_id=environment_id, ) except Exception: count += 1 @@ -179,6 +186,7 @@ def thread_inference( user_id=user_id, assistant_id=assistant_id, skill_id=skill_id, + environment_id=environment_id, retry=0, ) futures[future] = (test_example, ground_truth) @@ -248,6 +256,7 @@ def get_intent_confidence_retry( user_id, assistant_id, skill_id, + environment_id, retry=0, ): try: @@ -258,6 +267,7 @@ def get_intent_confidence_retry( user_id=user_id, assistant_id=assistant_id, skill_id=skill_id, + environment_id=environment_id, ) except Exception as e: if retry < MAX_RETRY: @@ -268,6 +278,7 @@ def get_intent_confidence_retry( user_id, assistant_id, skill_id, + environment_id=environment_id, retry=retry + 1, ) else: diff --git a/assistant_skill_analysis/utils/skills_util.py b/assistant_skill_analysis/utils/skills_util.py index f0df92d..e8d913b 100644 --- a/assistant_skill_analysis/utils/skills_util.py +++ b/assistant_skill_analysis/utils/skills_util.py @@ -453,6 +453,7 @@ def retrieve_classifier_response( user_id="256", assistant_id=None, skill_id=None, + environment_id=None, ): """ retrieve classifier response @@ -462,6 +463,7 @@ def retrieve_classifier_response( :param alternate_intents: :param user_id: :param assistant_id: + :param environment_id: environment id :return response: """ if isinstance(conversation, ibm_watson.AssistantV1): @@ -482,5 +484,6 @@ def retrieve_classifier_response( }, context={"metadata": {"user_id": user_id}}, assistant_id=assistant_id, + environment_id=environment_id, ).get_result() return response diff --git a/new_experience_skill_analysis.ipynb b/new_experience_skill_analysis.ipynb index 904dbae..f34fdb0 100644 --- a/new_experience_skill_analysis.ipynb +++ b/new_experience_skill_analysis.ipynb @@ -114,6 +114,8 @@ "\n", "iam_apikey, _, ASSISTANT_ID = skills_util.input_credentials(input_apikey=True,input_skill_id=False,input_assistant_id=True)\n", "\n", + "ENVIRONMENT_ID = ASSISTANT_ID\n", + "\n", "# If you do not have IAM based API Keys\n", "# but have access to a Username, Password\n", "# You can use username and password for authentication purpose and comment out iam_apikey\n", @@ -614,7 +616,8 @@ " max_thread=THREAD_NUM, \n", " assistant_id=ASSISTANT_ID,\n", " intent_to_action_mapping=intent_to_action_mapping,\n", - " timeout=TIMEOUT\n", + " timeout=TIMEOUT,\n", + " environment_id=ENVIRONMENT_ID,\n", " )" ] }, @@ -794,7 +797,8 @@ " max_thread = 1, \n", " assistant_id=ASSISTANT_ID,\n", " intent_to_action_mapping=intent_to_action_mapping,\n", - " timeout=TIMEOUT\n", + " timeout=TIMEOUT,\n", + " environment_id=ENVIRONMENT_ID,\n", " )\n", "\n", "highlighter.get_highlights_in_batch_multi_thread(conversation=conversation, \n", @@ -805,6 +809,7 @@ " lang_util=lang_util,\n", " assistant_id=ASSISTANT_ID,\n", " intent_to_action_mapping=intent_to_action_mapping,\n", + " environment_id=ENVIRONMENT_ID,\n", " )" ] }, @@ -847,6 +852,7 @@ " lang_util=lang_util,\n", " assistant_id=ASSISTANT_ID,\n", " intent_to_action_mapping=intent_to_action_mapping,\n", + " environment_id=ENVIRONMENT_ID,\n", " )" ] },