diff --git a/source/darwin/dispatch_queue_event_loop.c b/source/darwin/dispatch_queue_event_loop.c index 10a6e4e56..84ceb83e0 100644 --- a/source/darwin/dispatch_queue_event_loop.c +++ b/source/darwin/dispatch_queue_event_loop.c @@ -441,9 +441,11 @@ static int s_run(struct aws_event_loop *event_loop) { dispatch_resume(dispatch_loop->dispatch_queue); dispatch_loop->synced_data.suspended = false; s_rlock_dispatch_loop_context(dispatch_loop->context); - s_lock_service_entries(dispatch_loop->context); - s_try_schedule_new_iteration(dispatch_loop->context, 0); - s_unlock_service_entries(dispatch_loop->context); + if (dispatch_loop->context->io_dispatch_loop) { + s_lock_service_entries(dispatch_loop->context); + s_try_schedule_new_iteration(dispatch_loop->context, 0); + s_unlock_service_entries(dispatch_loop->context); + } s_runlock_dispatch_loop_context(dispatch_loop->context); } s_unlock_cross_thread_data(dispatch_loop); @@ -618,6 +620,9 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws struct dispatch_loop *dispatch_loop = event_loop->impl_data; s_rlock_dispatch_loop_context(dispatch_loop->context); + if (dispatch_loop->context->io_dispatch_loop == NULL) { + goto schedule_task_common_cleanup; + } s_lock_cross_thread_data(dispatch_loop); task->timestamp = run_at_nanos; @@ -653,6 +658,7 @@ static void s_schedule_task_common(struct aws_event_loop *event_loop, struct aws } s_unlock_cross_thread_data(dispatch_loop); +schedule_task_common_cleanup: s_runlock_dispatch_loop_context(dispatch_loop->context); } diff --git a/source/darwin/nw_socket.c b/source/darwin/nw_socket.c index 11dba7c2e..2fb09c03e 100644 --- a/source/darwin/nw_socket.c +++ b/source/darwin/nw_socket.c @@ -1388,22 +1388,28 @@ static int s_socket_start_accept_fn( (void *)socket, (void *)nw_socket->nw_connection); - struct aws_task *task = aws_mem_calloc(socket->allocator, 1, sizeof(struct aws_task)); - - struct nw_socket_scheduled_task_args *args = - aws_mem_calloc(socket->allocator, 1, sizeof(struct nw_socket_scheduled_task_args)); - - args->nw_socket = nw_socket; - args->allocator = nw_socket->allocator; - // acquire ref count for the task - nw_socket_acquire_internal_ref(nw_socket); - AWS_LOGF_DEBUG( - AWS_LS_IO_SOCKET, - "id=%p: nw_socket_acquire_internal_ref: s_process_set_listener_endpoint_task", - (void *)nw_socket); + // TODO: revisit + aws_mutex_lock(&nw_socket->synced_data.lock); + if (nw_socket->synced_data.event_loop) { + struct aws_task *task = aws_mem_calloc(socket->allocator, 1, sizeof(struct aws_task)); + + struct nw_socket_scheduled_task_args *args = + aws_mem_calloc(socket->allocator, 1, sizeof(struct nw_socket_scheduled_task_args)); + + args->nw_socket = nw_socket; + args->allocator = nw_socket->allocator; + // acquire ref count for the task + nw_socket_acquire_internal_ref(nw_socket); + AWS_LOGF_DEBUG( + AWS_LS_IO_SOCKET, + "id=%p: nw_socket_acquire_internal_ref: s_process_set_listener_endpoint_task", + (void *)nw_socket); - aws_task_init(task, s_process_set_listener_endpoint_task, args, "listenerSuccessTask"); - aws_event_loop_schedule_task_now(socket->event_loop, task); + aws_task_init(task, s_process_set_listener_endpoint_task, args, "listenerSuccessTask"); + // TODO: what if event loop is shuting down & what happened if we schedule the task here. + aws_event_loop_schedule_task_now(socket->event_loop, task); + } + aws_mutex_unlock(&nw_socket->synced_data.lock); } else if (state == nw_listener_state_cancelled) { AWS_LOGF_DEBUG(