diff --git a/autogpt_platform/backend/backend/data/graph.py b/autogpt_platform/backend/backend/data/graph.py index 77108f8d190b..f2c05de0dab9 100644 --- a/autogpt_platform/backend/backend/data/graph.py +++ b/autogpt_platform/backend/backend/data/graph.py @@ -6,7 +6,13 @@ from typing import Any, Literal, Optional, Type import prisma -from prisma.models import AgentGraph, AgentGraphExecution, AgentNode, AgentNodeLink +from prisma.models import ( + AgentGraph, + AgentGraphExecution, + AgentNode, + AgentNodeLink, + StoreListingVersion, +) from prisma.types import AgentGraphWhereInput from pydantic.fields import computed_field @@ -529,7 +535,6 @@ async def get_execution(user_id: str, execution_id: str) -> GraphExecution | Non async def get_graph( graph_id: str, version: int | None = None, - template: bool = False, user_id: str | None = None, for_export: bool = False, ) -> GraphModel | None: @@ -543,21 +548,36 @@ async def get_graph( where_clause: AgentGraphWhereInput = { "id": graph_id, } + if version is not None: where_clause["version"] = version - elif not template: + else: where_clause["isActive"] = True - # TODO: Fix hack workaround to get adding store agents to work - if user_id is not None and not template: - where_clause["userId"] = user_id - graph = await AgentGraph.prisma().find_first( where=where_clause, include=AGENT_GRAPH_INCLUDE, order={"version": "desc"}, ) - return GraphModel.from_db(graph, for_export) if graph else None + + # The Graph has to be owned by the user or a store listing. + if ( + graph is None + or graph.userId != user_id + and not ( + await StoreListingVersion.prisma().find_first( + where=prisma.types.StoreListingVersionWhereInput( + agentId=graph_id, + agentVersion=version or graph.version, + isDeleted=False, + StoreListing={"is": {"isApproved": True}}, + ) + ) + ) + ): + return None + + return GraphModel.from_db(graph, for_export) async def set_graph_active_version(graph_id: str, version: int, user_id: str) -> None: @@ -611,9 +631,7 @@ async def create_graph(graph: Graph, user_id: str) -> GraphModel: async with transaction() as tx: await __create_graph(tx, graph, user_id) - if created_graph := await get_graph( - graph.id, graph.version, graph.is_template, user_id=user_id - ): + if created_graph := await get_graph(graph.id, graph.version, user_id=user_id): return created_graph raise ValueError(f"Created graph {graph.id} v{graph.version} is not in DB") diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py index 046da905a11c..e58f8013380d 100644 --- a/autogpt_platform/backend/backend/executor/manager.py +++ b/autogpt_platform/backend/backend/executor/manager.py @@ -780,7 +780,7 @@ def add_execution( graph_id: str, data: BlockInput, user_id: str, - graph_version: int | None = None, + graph_version: int, ) -> GraphExecutionEntry: graph: GraphModel | None = self.db_client.get_graph( graph_id=graph_id, user_id=user_id, version=graph_version diff --git a/autogpt_platform/backend/backend/executor/scheduler.py b/autogpt_platform/backend/backend/executor/scheduler.py index edf126f6919a..eee45819c5fd 100644 --- a/autogpt_platform/backend/backend/executor/scheduler.py +++ b/autogpt_platform/backend/backend/executor/scheduler.py @@ -63,7 +63,10 @@ def execute_graph(**kwargs): try: log(f"Executing recurring job for graph #{args.graph_id}") get_execution_client().add_execution( - args.graph_id, args.input_data, args.user_id + graph_id=args.graph_id, + data=args.input_data, + user_id=args.user_id, + graph_version=args.graph_version, ) except Exception as e: logger.exception(f"Error executing graph {args.graph_id}: {e}") diff --git a/autogpt_platform/backend/backend/server/integrations/router.py b/autogpt_platform/backend/backend/server/integrations/router.py index 6a8c274dd733..ffc460ab8bf7 100644 --- a/autogpt_platform/backend/backend/server/integrations/router.py +++ b/autogpt_platform/backend/backend/server/integrations/router.py @@ -320,7 +320,8 @@ async def webhook_ingress_generic( continue logger.debug(f"Executing graph #{node.graph_id} node #{node.id}") executor.add_execution( - node.graph_id, + graph_id=node.graph_id, + graph_version=node.graph_version, data={f"webhook_{webhook_id}_payload": payload}, user_id=webhook.user_id, ) diff --git a/autogpt_platform/backend/backend/server/rest_api.py b/autogpt_platform/backend/backend/server/rest_api.py index c5be1c179260..762cf48fe57c 100644 --- a/autogpt_platform/backend/backend/server/rest_api.py +++ b/autogpt_platform/backend/backend/server/rest_api.py @@ -2,6 +2,7 @@ import logging import typing +import autogpt_libs.auth.models import fastapi import fastapi.responses import starlette.middleware.cors @@ -17,6 +18,7 @@ import backend.data.user import backend.server.routers.v1 import backend.server.v2.library.routes +import backend.server.v2.store.model import backend.server.v2.store.routes import backend.util.service import backend.util.settings @@ -117,9 +119,24 @@ def run(self): @staticmethod async def test_execute_graph( - graph_id: str, node_input: dict[typing.Any, typing.Any], user_id: str + graph_id: str, + graph_version: int, + node_input: dict[typing.Any, typing.Any], + user_id: str, ): - return backend.server.routers.v1.execute_graph(graph_id, node_input, user_id) + return backend.server.routers.v1.execute_graph( + graph_id, graph_version, node_input, user_id + ) + + @staticmethod + async def test_get_graph( + graph_id: str, + graph_version: int, + user_id: str, + ): + return await backend.server.routers.v1.get_graph( + graph_id, user_id, graph_version + ) @staticmethod async def test_create_graph( @@ -149,5 +166,18 @@ async def test_get_graph_run_node_execution_results( async def test_delete_graph(graph_id: str, user_id: str): return await backend.server.routers.v1.delete_graph(graph_id, user_id) + @staticmethod + async def test_create_store_listing( + request: backend.server.v2.store.model.StoreSubmissionRequest, user_id: str + ): + return await backend.server.v2.store.routes.create_submission(request, user_id) + + @staticmethod + async def test_review_store_listing( + request: backend.server.v2.store.model.ReviewSubmissionRequest, + user: autogpt_libs.auth.models.User, + ): + return await backend.server.v2.store.routes.review_submission(request, user) + def set_test_dependency_overrides(self, overrides: dict): app.dependency_overrides.update(overrides) diff --git a/autogpt_platform/backend/backend/server/routers/v1.py b/autogpt_platform/backend/backend/server/routers/v1.py index 9e8bf50d6d9d..7bfd41b502d9 100644 --- a/autogpt_platform/backend/backend/server/routers/v1.py +++ b/autogpt_platform/backend/backend/server/routers/v1.py @@ -200,12 +200,11 @@ async def get_graph_all_versions( async def create_new_graph( create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] ) -> graph_db.GraphModel: - return await do_create_graph(create_graph, is_template=False, user_id=user_id) + return await do_create_graph(create_graph, user_id=user_id) async def do_create_graph( create_graph: CreateGraph, - is_template: bool, # user_id doesn't have to be annotated like on other endpoints, # because create_graph isn't used directly as an endpoint user_id: str, @@ -217,7 +216,6 @@ async def do_create_graph( graph = await graph_db.get_graph( create_graph.template_id, create_graph.template_version, - template=True, user_id=user_id, ) if not graph: @@ -230,8 +228,6 @@ async def do_create_graph( status_code=400, detail="Either graph or template_id must be provided." ) - graph.is_template = is_template - graph.is_active = not is_template graph.reassign_ids(user_id=user_id, reassign_graph_id=True) graph = await graph_db.create_graph(graph, user_id=user_id) @@ -368,12 +364,13 @@ def get_credentials(credentials_id: str) -> "Credentials | None": ) def execute_graph( graph_id: str, + graph_version: int, node_input: dict[Any, Any], user_id: Annotated[str, Depends(get_user_id)], ) -> dict[str, Any]: # FIXME: add proper return type try: graph_exec = execution_manager_client().add_execution( - graph_id, node_input, user_id=user_id + graph_id, node_input, user_id=user_id, graph_version=graph_version ) return {"id": graph_exec.graph_exec_id} except Exception as e: @@ -452,7 +449,7 @@ async def get_templates( async def get_template( graph_id: str, version: int | None = None ) -> graph_db.GraphModel: - graph = await graph_db.get_graph(graph_id, version, template=True) + graph = await graph_db.get_graph(graph_id, version) if not graph: raise HTTPException(status_code=404, detail=f"Template #{graph_id} not found.") return graph @@ -466,7 +463,7 @@ async def get_template( async def create_new_template( create_graph: CreateGraph, user_id: Annotated[str, Depends(get_user_id)] ) -> graph_db.GraphModel: - return await do_create_graph(create_graph, is_template=True, user_id=user_id) + return await do_create_graph(create_graph, user_id=user_id) ######################################################## diff --git a/autogpt_platform/backend/backend/server/v2/library/routes.py b/autogpt_platform/backend/backend/server/v2/library/routes.py index 0c3b1a77ec93..3ee8680254e2 100644 --- a/autogpt_platform/backend/backend/server/v2/library/routes.py +++ b/autogpt_platform/backend/backend/server/v2/library/routes.py @@ -91,7 +91,7 @@ async def add_agent_to_library( # Create a new graph from the template graph = await backend.data.graph.get_graph( - agent.id, agent.version, template=True, user_id=user_id + agent.id, agent.version, user_id=user_id ) if not graph: diff --git a/autogpt_platform/backend/backend/server/v2/store/db.py b/autogpt_platform/backend/backend/server/v2/store/db.py index deed4e158734..6b5ed733c749 100644 --- a/autogpt_platform/backend/backend/server/v2/store/db.py +++ b/autogpt_platform/backend/backend/server/v2/store/db.py @@ -325,7 +325,10 @@ async def get_store_submissions( where = prisma.types.StoreSubmissionWhereInput(user_id=user_id) # Query submissions from database submissions = await prisma.models.StoreSubmission.prisma().find_many( - where=where, skip=skip, take=page_size, order=[{"date_submitted": "desc"}] + where=where, + skip=skip, + take=page_size, + order=[{"date_submitted": "desc"}], ) # Get total count for pagination @@ -504,7 +507,15 @@ async def create_store_submission( "subHeading": sub_heading, } }, - } + }, + include={"StoreListingVersions": True}, + ) + + slv_id = ( + listing.StoreListingVersions[0].id + if listing.StoreListingVersions is not None + and len(listing.StoreListingVersions) > 0 + else None ) logger.debug(f"Created store listing for agent {agent_id}") @@ -521,6 +532,7 @@ async def create_store_submission( status=prisma.enums.SubmissionStatus.PENDING, runs=0, rating=0.0, + store_listing_version_id=slv_id, ) except ( @@ -811,9 +823,7 @@ async def get_agent( agent = store_listing_version.Agent - graph = await backend.data.graph.get_graph( - agent.id, agent.version, template=True - ) + graph = await backend.data.graph.get_graph(agent.id, agent.version) if not graph: raise fastapi.HTTPException( @@ -832,3 +842,74 @@ async def get_agent( raise backend.server.v2.store.exceptions.DatabaseError( "Failed to fetch agent" ) from e + + +async def review_store_submission( + store_listing_version_id: str, is_approved: bool, comments: str, reviewer_id: str +) -> prisma.models.StoreListingSubmission: + """Review a store listing submission.""" + try: + store_listing_version = ( + await prisma.models.StoreListingVersion.prisma().find_unique( + where={"id": store_listing_version_id}, + include={"StoreListing": True}, + ) + ) + + if not store_listing_version or not store_listing_version.StoreListing: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing version {store_listing_version_id} not found", + ) + + status = ( + prisma.enums.SubmissionStatus.APPROVED + if is_approved + else prisma.enums.SubmissionStatus.REJECTED + ) + + create_data = prisma.types.StoreListingSubmissionCreateInput( + StoreListingVersion={"connect": {"id": store_listing_version_id}}, + Status=status, + reviewComments=comments, + Reviewer={"connect": {"id": reviewer_id}}, + StoreListing={"connect": {"id": store_listing_version.StoreListing.id}}, + createdAt=datetime.now(), + updatedAt=datetime.now(), + ) + + update_data = prisma.types.StoreListingSubmissionUpdateInput( + Status=status, + reviewComments=comments, + Reviewer={"connect": {"id": reviewer_id}}, + StoreListing={"connect": {"id": store_listing_version.StoreListing.id}}, + updatedAt=datetime.now(), + ) + + if is_approved: + await prisma.models.StoreListing.prisma().update( + where={"id": store_listing_version.StoreListing.id}, + data={"isApproved": True}, + ) + + submission = await prisma.models.StoreListingSubmission.prisma().upsert( + where={"storeListingVersionId": store_listing_version_id}, + data=prisma.types.StoreListingSubmissionUpsertInput( + create=create_data, + update=update_data, + ), + ) + + if not submission: + raise fastapi.HTTPException( + status_code=404, + detail=f"Store listing submission {store_listing_version_id} not found", + ) + + return submission + + except Exception as e: + logger.error(f"Error reviewing store submission: {str(e)}") + raise backend.server.v2.store.exceptions.DatabaseError( + "Failed to review store submission" + ) from e diff --git a/autogpt_platform/backend/backend/server/v2/store/model.py b/autogpt_platform/backend/backend/server/v2/store/model.py index 78c595000cb8..700f8d56ff34 100644 --- a/autogpt_platform/backend/backend/server/v2/store/model.py +++ b/autogpt_platform/backend/backend/server/v2/store/model.py @@ -115,6 +115,7 @@ class StoreSubmission(pydantic.BaseModel): status: prisma.enums.SubmissionStatus runs: int rating: float + store_listing_version_id: str | None = None class StoreSubmissionsResponse(pydantic.BaseModel): @@ -151,3 +152,9 @@ class StoreReviewCreate(pydantic.BaseModel): store_listing_version_id: str score: int comments: str | None = None + + +class ReviewSubmissionRequest(pydantic.BaseModel): + store_listing_version_id: str + isApproved: bool + comments: str diff --git a/autogpt_platform/backend/backend/server/v2/store/routes.py b/autogpt_platform/backend/backend/server/v2/store/routes.py index 6dc9d7594963..f421f76d618e 100644 --- a/autogpt_platform/backend/backend/server/v2/store/routes.py +++ b/autogpt_platform/backend/backend/server/v2/store/routes.py @@ -642,3 +642,33 @@ def remove_credentials(obj): return fastapi.responses.FileResponse( tmp_file.name, filename=file_name, media_type="application/json" ) + + +@router.post( + "/submissions/review/{store_listing_version_id}", + tags=["store", "private"], +) +async def review_submission( + request: backend.server.v2.store.model.ReviewSubmissionRequest, + user: typing.Annotated[ + autogpt_libs.auth.models.User, + fastapi.Depends(autogpt_libs.auth.depends.requires_admin_user), + ], +): + # Proceed with the review submission logic + try: + submission = await backend.server.v2.store.db.review_store_submission( + store_listing_version_id=request.store_listing_version_id, + is_approved=request.isApproved, + comments=request.comments, + reviewer_id=user.user_id, + ) + return submission + except Exception: + logger.exception("Exception occurred whilst reviewing store submission") + return fastapi.responses.JSONResponse( + status_code=500, + content={ + "detail": "An error occurred while reviewing the store submission" + }, + ) diff --git a/autogpt_platform/backend/backend/usecases/block_autogen.py b/autogpt_platform/backend/backend/usecases/block_autogen.py index dc34b79f3278..3c5ab7719c7e 100644 --- a/autogpt_platform/backend/backend/usecases/block_autogen.py +++ b/autogpt_platform/backend/backend/usecases/block_autogen.py @@ -253,7 +253,7 @@ async def block_autogen_agent(): test_graph = await create_graph(create_test_graph(), user_id=test_user.id) input_data = {"input": "Write me a block that writes a string into a file."} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + test_graph.id, test_graph.version, input_data, test_user.id ) print(response) result = await wait_execution( diff --git a/autogpt_platform/backend/backend/usecases/reddit_marketing.py b/autogpt_platform/backend/backend/usecases/reddit_marketing.py index 8ea2f651f30e..1960e97a69a6 100644 --- a/autogpt_platform/backend/backend/usecases/reddit_marketing.py +++ b/autogpt_platform/backend/backend/usecases/reddit_marketing.py @@ -157,7 +157,7 @@ async def reddit_marketing_agent(): test_graph = await create_graph(create_test_graph(), user_id=test_user.id) input_data = {"subreddit": "AutoGPT"} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + test_graph.id, test_graph.version, input_data, test_user.id ) print(response) result = await wait_execution(test_user.id, test_graph.id, response["id"], 120) diff --git a/autogpt_platform/backend/backend/usecases/sample.py b/autogpt_platform/backend/backend/usecases/sample.py index eb6ab6211f01..4548b75233f3 100644 --- a/autogpt_platform/backend/backend/usecases/sample.py +++ b/autogpt_platform/backend/backend/usecases/sample.py @@ -8,12 +8,19 @@ from backend.util.test import SpinTestServer, wait_execution -async def create_test_user() -> User: - test_user_data = { - "sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1", - "email": "testuser#example.com", - "name": "Test User", - } +async def create_test_user(alt_user: bool = False) -> User: + if alt_user: + test_user_data = { + "sub": "3e53486c-cf57-477e-ba2a-cb02dc828e1b", + "email": "testuser2#example.com", + "name": "Test User 2", + } + else: + test_user_data = { + "sub": "ef3b97d7-1161-4eb4-92b2-10c24fb154c1", + "email": "testuser#example.com", + "name": "Test User", + } user = await get_or_create_user(test_user_data) return user @@ -79,7 +86,7 @@ async def sample_agent(): test_graph = await create_graph(create_test_graph(), test_user.id) input_data = {"input_1": "Hello", "input_2": "World"} response = await server.agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + test_graph.id, test_graph.version, input_data, test_user.id ) print(response) result = await wait_execution(test_user.id, test_graph.id, response["id"], 10) diff --git a/autogpt_platform/backend/schema.prisma b/autogpt_platform/backend/schema.prisma index ea1865791be0..81784b8556c8 100644 --- a/autogpt_platform/backend/schema.prisma +++ b/autogpt_platform/backend/schema.prisma @@ -235,7 +235,7 @@ model AgentGraphExecution { AgentNodeExecutions AgentNodeExecution[] - // Link to User model + // Link to User model -- Executed by this user userId String user User @relation(fields: [userId], references: [id], onDelete: Cascade) diff --git a/autogpt_platform/backend/test/data/test_graph.py b/autogpt_platform/backend/test/data/test_graph.py index ddff6f3ad817..00e9531caa3a 100644 --- a/autogpt_platform/backend/test/data/test_graph.py +++ b/autogpt_platform/backend/test/data/test_graph.py @@ -1,8 +1,11 @@ from typing import Any from uuid import UUID +import autogpt_libs.auth.models +import fastapi.exceptions import pytest +import backend.server.v2.store.model from backend.blocks.basic import AgentInputBlock, AgentOutputBlock, StoreValueBlock from backend.data.block import BlockSchema from backend.data.graph import Graph, Link, Node @@ -202,3 +205,92 @@ async def test_clean_graph(server: SpinTestServer): n for n in created_graph.nodes if n.block_id == AgentInputBlock().id ) assert input_node.input_default["value"] == "" + + +@pytest.mark.asyncio(scope="session") +async def test_access_store_listing_graph(server: SpinTestServer): + """ + Test the access of a store listing graph. + """ + graph = Graph( + id="test_clean_graph", + name="Test Clean Graph", + description="Test graph cleaning", + nodes=[ + Node( + id="input_node", + block_id=AgentInputBlock().id, + input_default={ + "name": "test_input", + "value": "test value", + "description": "Test input description", + }, + ), + ], + links=[], + ) + + # Create graph and get model + create_graph = CreateGraph(graph=graph) + created_graph = await server.agent_server.test_create_graph( + create_graph, DEFAULT_USER_ID + ) + + store_submission_request = backend.server.v2.store.model.StoreSubmissionRequest( + agent_id=created_graph.id, + agent_version=created_graph.version, + slug="test-slug", + name="Test name", + sub_heading="Test sub heading", + video_url=None, + image_urls=[], + description="Test description", + categories=[], + ) + + # First we check the graph an not be accessed by a different user + with pytest.raises(fastapi.exceptions.HTTPException) as exc_info: + await server.agent_server.test_get_graph( + created_graph.id, + created_graph.version, + "3e53486c-cf57-477e-ba2a-cb02dc828e1b", + ) + assert exc_info.value.status_code == 404 + assert "Graph" in str(exc_info.value.detail) + + # Now we create a store listing + store_listing = await server.agent_server.test_create_store_listing( + store_submission_request, DEFAULT_USER_ID + ) + + if isinstance(store_listing, fastapi.responses.JSONResponse): + assert False, "Failed to create store listing" + + slv_id = ( + store_listing.store_listing_version_id + if store_listing.store_listing_version_id is not None + else None + ) + + assert slv_id is not None + + admin = autogpt_libs.auth.models.User( + user_id="3e53486c-cf57-477e-ba2a-cb02dc828e1b", + role="admin", + email="admin@example.com", + phone_number="1234567890", + ) + await server.agent_server.test_review_store_listing( + backend.server.v2.store.model.ReviewSubmissionRequest( + store_listing_version_id=slv_id, + isApproved=True, + comments="Test comments", + ), + admin, + ) + + # Now we check the graph can be accessed by a user that does not own the graph + got_graph = await server.agent_server.test_get_graph( + created_graph.id, created_graph.version, "3e53486c-cf57-477e-ba2a-cb02dc828e1b" + ) + assert got_graph is not None diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py index b9dad8895469..bdd9eaaccc1a 100644 --- a/autogpt_platform/backend/test/executor/test_manager.py +++ b/autogpt_platform/backend/test/executor/test_manager.py @@ -1,8 +1,11 @@ import logging +import autogpt_libs.auth.models +import fastapi.responses import pytest from prisma.models import User +import backend.server.v2.store.model from backend.blocks.basic import FindInDictionaryBlock, StoreValueBlock from backend.blocks.maths import CalculatorBlock, Operation from backend.data import execution, graph @@ -31,7 +34,7 @@ async def execute_graph( # --- Test adding new executions --- # response = await agent_server.test_execute_graph( - test_graph.id, input_data, test_user.id + test_graph.id, test_graph.version, input_data, test_user.id ) graph_exec_id = response["id"] logger.info(f"Created execution with ID: {graph_exec_id}") @@ -287,3 +290,68 @@ async def test_static_input_link_on_graph(server: SpinTestServer): assert exec_data.status == execution.ExecutionStatus.COMPLETED assert exec_data.output_data == {"result": [9]} logger.info("Completed test_static_input_link_on_graph") + + +@pytest.mark.asyncio(scope="session") +async def test_store_listing_graph(server: SpinTestServer): + logger.info("Starting test_agent_execution") + test_user = await create_test_user() + test_graph = await create_graph(server, create_test_graph(), test_user) + + store_submission_request = backend.server.v2.store.model.StoreSubmissionRequest( + agent_id=test_graph.id, + agent_version=test_graph.version, + slug="test-slug", + name="Test name", + sub_heading="Test sub heading", + video_url=None, + image_urls=[], + description="Test description", + categories=[], + ) + + store_listing = await server.agent_server.test_create_store_listing( + store_submission_request, test_user.id + ) + + if isinstance(store_listing, fastapi.responses.JSONResponse): + assert False, "Failed to create store listing" + + slv_id = ( + store_listing.store_listing_version_id + if store_listing.store_listing_version_id is not None + else None + ) + + assert slv_id is not None + + admin = autogpt_libs.auth.models.User( + user_id="3e53486c-cf57-477e-ba2a-cb02dc828e1b", + role="admin", + email="admin@example.com", + phone_number="1234567890", + ) + await server.agent_server.test_review_store_listing( + backend.server.v2.store.model.ReviewSubmissionRequest( + store_listing_version_id=slv_id, + isApproved=True, + comments="Test comments", + ), + admin, + ) + + alt_test_user = await create_test_user(alt_user=True) + + data = {"input_1": "Hello", "input_2": "World"} + graph_exec_id = await execute_graph( + server.agent_server, + test_graph, + alt_test_user, + data, + 4, + ) + + await assert_sample_graph_executions( + server.agent_server, test_graph, alt_test_user, graph_exec_id + ) + logger.info("Completed test_agent_execution")