-
Notifications
You must be signed in to change notification settings - Fork 0
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[YUNIKORN-2504] Support canonical labels for queue/applicationId in scheduler #54
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -395,7 +395,11 @@ | |
for _, task := range app.GetNewTasks() { | ||
if taskScheduleCondition(task) { | ||
// for each new task, we do a sanity check before moving the state to Pending_Schedule | ||
if err := task.sanityCheckBeforeScheduling(); err == nil { | ||
// if the task is not ready for scheduling, we keep it in New state | ||
// if the task pod is bounded and have conflicting metadata, we move the task to Rejected state | ||
err, rejectTask := task.sanityCheckBeforeScheduling() | ||
|
||
if err == nil { | ||
// note, if we directly trigger submit task event, it may spawn too many duplicate | ||
// events, because a task might be submitted multiple times before its state transits to PENDING. | ||
if handleErr := task.handle( | ||
|
@@ -406,11 +410,20 @@ | |
log.Log(log.ShimCacheApplication).Warn("init task failed", zap.Error(err)) | ||
} | ||
} else { | ||
events.GetRecorder().Eventf(task.GetTaskPod().DeepCopy(), nil, v1.EventTypeWarning, "FailedScheduling", "FailedScheduling", err.Error()) | ||
log.Log(log.ShimCacheApplication).Debug("task is not ready for scheduling", | ||
zap.String("appID", task.applicationID), | ||
zap.String("taskID", task.taskID), | ||
zap.Error(err)) | ||
if !rejectTask { | ||
// no state transition | ||
events.GetRecorder().Eventf(task.GetTaskPod().DeepCopy(), nil, v1.EventTypeWarning, "FailedScheduling", "FailedScheduling", err.Error()) | ||
log.Log(log.ShimCacheApplication).Debug("task is not ready for scheduling", | ||
zap.String("appID", task.applicationID), | ||
zap.String("taskID", task.taskID), | ||
zap.Error(err)) | ||
} else { | ||
// task transits to Rejected state | ||
if handleErr := task.handle( | ||
NewRejectTaskEvent(task.applicationID, task.taskID, err.Error())); handleErr != nil { | ||
log.Log(log.ShimCacheApplication).Warn("reject task failed", zap.Error(err)) | ||
} | ||
} | ||
} | ||
} | ||
} | ||
|
@@ -568,22 +581,6 @@ | |
}() | ||
} | ||
|
||
func failTaskPodWithReasonAndMsg(task *Task, reason string, msg string) { | ||
podCopy := task.GetTaskPod().DeepCopy() | ||
podCopy.Status = v1.PodStatus{ | ||
Phase: v1.PodFailed, | ||
Reason: reason, | ||
Message: msg, | ||
} | ||
log.Log(log.ShimCacheApplication).Info("setting pod to failed", zap.String("podName", task.GetTaskPod().Name)) | ||
pod, err := task.UpdateTaskPodStatus(podCopy) | ||
if err != nil { | ||
log.Log(log.ShimCacheApplication).Error("failed to update task pod status", zap.Error(err)) | ||
} else { | ||
log.Log(log.ShimCacheApplication).Info("new pod status", zap.String("status", string(pod.Status.Phase))) | ||
} | ||
} | ||
|
||
func (app *Application) handleFailApplicationEvent(errMsg string) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Moved to failTaskPodWithReasonAndMsg() to task.go change
to prevent deadlock when task state machine is handling TaskRejected event. |
||
go func() { | ||
getPlaceholderManager().cleanUp(app) | ||
|
@@ -598,10 +595,10 @@ | |
for _, task := range unalloc { | ||
// Only need to fail the non-placeholder pod(s) | ||
if strings.Contains(errMsg, constants.ApplicationInsufficientResourcesFailure) { | ||
failTaskPodWithReasonAndMsg(task, constants.ApplicationInsufficientResourcesFailure, "Scheduling has timed out due to insufficient resources") | ||
task.failTaskPodWithReasonAndMsg(constants.ApplicationInsufficientResourcesFailure, "Scheduling has timed out due to insufficient resources") | ||
} else if strings.Contains(errMsg, constants.ApplicationRejectedFailure) { | ||
errMsgArr := strings.Split(errMsg, ":") | ||
failTaskPodWithReasonAndMsg(task, constants.ApplicationRejectedFailure, errMsgArr[1]) | ||
task.failTaskPodWithReasonAndMsg(constants.ApplicationRejectedFailure, errMsgArr[1]) | ||
} | ||
events.GetRecorder().Eventf(task.GetTaskPod().DeepCopy(), nil, v1.EventTypeWarning, "ApplicationFailed", "ApplicationFailed", | ||
"Application %s scheduling failed, reason: %s", app.applicationID, errMsg) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -90,8 +90,8 @@ func newPlaceholder(placeholderName string, app *Application, taskGroup TaskGrou | |
Name: placeholderName, | ||
Namespace: app.tags[constants.AppTagNamespace], | ||
Labels: utils.MergeMaps(taskGroup.Labels, map[string]string{ | ||
constants.LabelApplicationID: app.GetApplicationID(), | ||
constants.LabelQueueName: app.GetQueue(), | ||
constants.CanonicalLabelApplicationID: app.GetApplicationID(), | ||
constants.CanonicalLabelQueueName: app.GetQueue(), | ||
Comment on lines
+93
to
+94
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note: |
||
}), | ||
Annotations: annotations, | ||
OwnerReferences: ownerRefs, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -22,6 +22,7 @@ | |
"context" | ||
"fmt" | ||
"strconv" | ||
"strings" | ||
"time" | ||
|
||
"github.com/looplab/fsm" | ||
|
@@ -187,6 +188,22 @@ | |
return task.context.apiProvider.GetAPIs().KubeClient.UpdatePod(pod, podMutator) | ||
} | ||
|
||
func (task *Task) failTaskPodWithReasonAndMsg(reason string, msg string) { | ||
podCopy := task.pod.DeepCopy() | ||
podCopy.Status = v1.PodStatus{ | ||
Phase: v1.PodFailed, | ||
Reason: reason, | ||
Message: msg, | ||
} | ||
log.Log(log.ShimCacheTask).Info("setting pod to failed", zap.String("podName", podCopy.Name)) | ||
pod, err := task.UpdateTaskPodStatus(podCopy) | ||
if err != nil { | ||
log.Log(log.ShimCacheTask).Error("failed to update task pod status", zap.Error(err)) | ||
} else { | ||
log.Log(log.ShimCacheTask).Info("new pod status", zap.String("status", string(pod.Status.Phase))) | ||
} | ||
} | ||
|
||
func (task *Task) isTerminated() bool { | ||
for _, states := range TaskStates().Terminated { | ||
if task.GetTaskState() == states { | ||
|
@@ -457,16 +474,19 @@ | |
} | ||
} | ||
|
||
func (task *Task) postTaskRejected() { | ||
// currently, once task is rejected by scheduler, we directly move task to failed state. | ||
// so this function simply triggers the state transition when it is rejected. | ||
// but further, we can introduce retry mechanism if necessary. | ||
func (task *Task) postTaskRejected(reason string) { | ||
// if task is rejected because of conflicting metadata, we should fail the pod with reason | ||
if strings.Contains(reason, constants.TaskPodInconsistMetadataFailure) { | ||
task.failTaskPodWithReasonAndMsg(constants.TaskRejectedFailure, reason) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fail the pod if the task's reject reason is inconsistent metadata. |
||
} | ||
|
||
// move task to failed state. | ||
dispatcher.Dispatch(NewFailTaskEvent(task.applicationID, task.taskID, | ||
fmt.Sprintf("task %s failed because it is rejected by scheduler", task.alias))) | ||
fmt.Sprintf("task %s failed because it is rejected", task.alias))) | ||
|
||
events.GetRecorder().Eventf(task.pod.DeepCopy(), nil, | ||
v1.EventTypeWarning, "TaskRejected", "TaskRejected", | ||
"Task %s is rejected by the scheduler", task.alias) | ||
"Task %s is rejected", task.alias) | ||
} | ||
|
||
// beforeTaskFail releases the allocation or ask from scheduler core | ||
|
@@ -543,7 +563,56 @@ | |
// some sanity checks before sending task for scheduling, | ||
// this reduces the scheduling overhead by blocking such | ||
// request away from the core scheduler. | ||
func (task *Task) sanityCheckBeforeScheduling() error { | ||
func (task *Task) sanityCheckBeforeScheduling() (error, bool) { | ||
rejectTask := false | ||
|
||
if err := task.checkPodPVCs(); err != nil { | ||
return err, rejectTask | ||
} | ||
|
||
// only check pod labels and annotations consistency if pod is not already bound | ||
// reject the task if pod metadata is conflicting | ||
if !utils.PodAlreadyBound(task.pod) { | ||
if err := task.checkPodMetadata(); err != nil { | ||
rejectTask = true | ||
return err, rejectTask | ||
} | ||
} | ||
|
||
return nil, rejectTask | ||
} | ||
|
||
func (task *Task) checkPodMetadata() error { | ||
// check application ID | ||
appIdLabelKeys := []string{ | ||
constants.CanonicalLabelApplicationID, | ||
constants.SparkLabelAppID, | ||
constants.LabelApplicationID, | ||
} | ||
appIdAnnotationKeys := []string{ | ||
constants.AnnotationApplicationID, | ||
} | ||
if !utils.ValidatePodLabelAnnotationConsistency(task.pod, appIdLabelKeys, appIdAnnotationKeys) { | ||
return fmt.Errorf("application ID is not consistently set in pod's labels and annotations. [%s]", constants.TaskPodInconsistMetadataFailure) | ||
} | ||
|
||
// check queue name | ||
queueLabelKeys := []string{ | ||
constants.CanonicalLabelQueueName, | ||
constants.LabelQueueName, | ||
} | ||
|
||
queueAnnotationKeys := []string{ | ||
constants.AnnotationQueueName, | ||
} | ||
|
||
if !utils.ValidatePodLabelAnnotationConsistency(task.pod, queueLabelKeys, queueAnnotationKeys) { | ||
return fmt.Errorf("queue is not consistently set in pod's labels and annotations. [%s]", constants.TaskPodInconsistMetadataFailure) | ||
} | ||
return nil | ||
} | ||
|
||
func (task *Task) checkPodPVCs() error { | ||
// Check PVCs used by the pod | ||
namespace := task.pod.Namespace | ||
manifest := &(task.pod.Spec) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Perform a sanity check before move this task to Pending state.
Before this PR, sanity check only check PVC's readiness
After this PR (Sanity check check PVC and Pod Metadata)
Design decision: Only reject unbound pods because we don't want to failed existing running pod after restart YK.