Skip to content

Commit

Permalink
Merge pull request #81 from gabemontero/br-log-follow
Browse files Browse the repository at this point in the history
add -F / --follow options to shp buildrun logs
  • Loading branch information
openshift-merge-robot authored Dec 14, 2021
2 parents d5cab79 + bfcda08 commit 4f763c3
Show file tree
Hide file tree
Showing 8 changed files with 634 additions and 199 deletions.
164 changes: 8 additions & 156 deletions pkg/shp/cmd/build/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,15 @@ package build
import (
"errors"
"fmt"
"sync"
"time"

buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
buildclientset "github.com/shipwright-io/build/pkg/client/clientset/versioned"

"github.com/shipwright-io/cli/pkg/shp/cmd/follower"
"github.com/shipwright-io/cli/pkg/shp/cmd/runner"
"github.com/shipwright-io/cli/pkg/shp/flags"
"github.com/shipwright-io/cli/pkg/shp/params"
"github.com/shipwright-io/cli/pkg/shp/reactor"
"github.com/shipwright-io/cli/pkg/shp/tail"

"github.com/spf13/cobra"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
)
Expand All @@ -27,19 +21,12 @@ import (
type RunCommand struct {
cmd *cobra.Command // cobra command instance

ioStreams *genericclioptions.IOStreams // io-streams instance
pw *reactor.PodWatcher // pod-watcher instance
logTail *tail.Tail // follow container logs
tailLogsStarted map[string]bool // controls tail instance per container

logLock sync.Mutex

buildName string
buildRunName string
namespace string
buildRunSpec *buildv1alpha1.BuildRunSpec // stores command-line flags
shpClientset buildclientset.Interface
follow bool // flag to tail pod logs
follow bool // flag to tail pod logs
follower *follower.Follower
}

const buildRunLongDesc = `
Expand All @@ -63,35 +50,13 @@ func (r *RunCommand) Complete(params *params.Params, io *genericclioptions.IOStr
return errors.New("build name is not informed")
}

clientset, err := params.ClientSet()
if err != nil {
return err
}
r.logTail = tail.NewTail(r.Cmd().Context(), clientset)
r.ioStreams = io
r.namespace = params.Namespace()
if r.follow {
if r.shpClientset, err = params.ShipwrightClientSet(); err != nil {
return err
}

kclientset, err := params.ClientSet()
if err != nil {
return err
}
to, err := params.RequestTimeout()
if err != nil {
return err
}
r.pw, err = reactor.NewPodWatcher(r.Cmd().Context(), to, kclientset, params.Namespace())
var err error
r.follower, err = follower.NewFollower(r.Cmd().Context(), r.buildRunName, io, params)
if err != nil {
return err
}

r.pw.WithOnPodModifiedFn(r.onEvent)
r.pw.WithTimeoutPodFn(r.onTimeout)
r.pw.WithNoPodEventsYetFn(r.onNoPodEventsYet)

}

// overwriting build-ref name to use what's on arguments
Expand All @@ -106,109 +71,6 @@ func (r *RunCommand) Validate() error {
return nil
}

// tailLogs start tailing logs for each container name in init-containers and containers, if not
// started already.
func (r *RunCommand) tailLogs(pod *corev1.Pod) {
containers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
for _, container := range containers {
if _, exists := r.tailLogsStarted[container.Name]; exists {
continue
}
r.tailLogsStarted[container.Name] = true
r.logTail.Start(pod.GetNamespace(), pod.GetName(), container.Name)
}
}

// onNoPodEventsYet reacts to the pod watcher telling us it has not received any pod events for our build run
func (r *RunCommand) onNoPodEventsYet() {
r.Log(fmt.Sprintf("BuildRun %q log following has not observed any pod events yet.", r.buildRunName))
br, err := r.shpClientset.ShipwrightV1alpha1().BuildRuns(r.namespace).Get(r.cmd.Context(), r.buildRunName, metav1.GetOptions{})
if err != nil {
r.Log(fmt.Sprintf("error accessing BuildRun %q: %s", r.buildRunName, err.Error()))
return
}

c := br.Status.GetCondition(buildv1alpha1.Succeeded)
giveUp := false
msg := ""
switch {
case c != nil && c.Status == corev1.ConditionTrue:
giveUp = true
msg = fmt.Sprintf("BuildRun '%s' has been marked as successful.\n", br.Name)
case c != nil && c.Status == corev1.ConditionFalse:
giveUp = true
msg = fmt.Sprintf("BuildRun '%s' has been marked as failed.\n", br.Name)
case br.IsCanceled():
giveUp = true
msg = fmt.Sprintf("BuildRun '%s' has been canceled.\n", br.Name)
case br.DeletionTimestamp != nil:
giveUp = true
msg = fmt.Sprintf("BuildRun '%s' has been deleted.\n", br.Name)
case !br.HasStarted():
r.Log(fmt.Sprintf("BuildRun '%s' has been marked as failed.\n", br.Name))
}
if giveUp {
r.Log(msg)
r.Log(fmt.Sprintf("exiting 'ship build run --follow' for BuildRun %q", br.Name))
r.stop()
}

}

// onTimeout reacts to either the context or request timeout causing the pod watcher to exit
func (r *RunCommand) onTimeout(msg string) {
r.Log(fmt.Sprintf("BuildRun %q log following has stopped because: %q\n", r.buildRunName, msg))
}

// onEvent reacts on pod state changes, to start and stop tailing container logs.
func (r *RunCommand) onEvent(pod *corev1.Pod) error {
switch pod.Status.Phase {
case corev1.PodRunning:
// graceful time to wait for container start
time.Sleep(3 * time.Second)
// start tailing container logs
r.tailLogs(pod)
case corev1.PodFailed:
msg := ""
br, err := r.shpClientset.ShipwrightV1alpha1().BuildRuns(pod.Namespace).Get(r.cmd.Context(), r.buildRunName, metav1.GetOptions{})
switch {
case err == nil && br.IsCanceled():
msg = fmt.Sprintf("BuildRun '%s' has been canceled.\n", br.Name)
case err == nil && br.DeletionTimestamp != nil:
msg = fmt.Sprintf("BuildRun '%s' has been deleted.\n", br.Name)
case pod.DeletionTimestamp != nil:
msg = fmt.Sprintf("Pod '%s' has been deleted.\n", pod.GetName())
default:
msg = fmt.Sprintf("Pod '%s' has failed!\n", pod.GetName())
err = fmt.Errorf("build pod '%s' has failed", pod.GetName())
}
// see if because of deletion or cancelation
r.Log(msg)
r.stop()
return err
case corev1.PodSucceeded:
r.Log(fmt.Sprintf("Pod '%s' has succeeded!\n", pod.GetName()))
r.stop()
default:
r.Log(fmt.Sprintf("Pod '%s' is in state %q...\n", pod.GetName(), string(pod.Status.Phase)))
// handle any issues with pulling images that may fail
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodInitialized || c.Type == corev1.ContainersReady {
if c.Status == corev1.ConditionUnknown {
return fmt.Errorf(c.Message)
}
}
}
}
return nil
}

// stop invoke stop on streaming components.
func (r *RunCommand) stop() {
r.logTail.Stop()
r.pw.Stop()
}

// Run creates a BuildRun resource based on Build's name informed on arguments.
func (r *RunCommand) Run(params *params.Params, ioStreams *genericclioptions.IOStreams) error {
// resource using GenerateName, which will provide a unique instance
Expand Down Expand Up @@ -244,18 +106,10 @@ func (r *RunCommand) Run(params *params.Params, ioStreams *genericclioptions.IOS
r.buildName,
br.GetName(),
)}
_, err = r.pw.Start(listOpts)
_, err = r.follower.Start(listOpts)
return err
}

// Log prints a message
func (r *RunCommand) Log(msg string) {
// concurrent fmt.Fprintf(r.ioStream.Out...) calls need locking to avoid data races, as we 'write' to the stream
r.logLock.Lock()
defer r.logLock.Unlock()
fmt.Fprint(r.ioStreams.Out, msg)
}

// runCmd instantiate the "build run" sub-command using common BuildRun flags.
func runCmd() runner.SubCommand {
cmd := &cobra.Command{
Expand All @@ -264,10 +118,8 @@ func runCmd() runner.SubCommand {
Long: buildRunLongDesc,
}
runCommand := &RunCommand{
cmd: cmd,
buildRunSpec: flags.BuildRunSpecFromFlags(cmd.Flags()),
tailLogsStarted: make(map[string]bool),
logLock: sync.Mutex{},
cmd: cmd,
buildRunSpec: flags.BuildRunSpecFromFlags(cmd.Flags()),
}
cmd.Flags().BoolVarP(&runCommand.follow, "follow", "F", runCommand.follow, "Start a build and watch its log until it completes or fails.")
return runCommand
Expand Down
54 changes: 35 additions & 19 deletions pkg/shp/cmd/build/run_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ package build
import (
"bytes"
"strings"
"sync"
"testing"

buildv1alpha1 "github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
shpfake "github.com/shipwright-io/build/pkg/client/clientset/versioned/fake"
"github.com/shipwright-io/cli/pkg/shp/cmd/follower"
"github.com/shipwright-io/cli/pkg/shp/flags"
"github.com/shipwright-io/cli/pkg/shp/params"
"github.com/shipwright-io/cli/pkg/shp/reactor"
Expand Down Expand Up @@ -35,40 +35,40 @@ func TestStartBuildRunFollowLog(t *testing.T) {
{
name: "succeeded",
phase: corev1.PodSucceeded,
logText: "Pod 'testpod' has succeeded!",
logText: "Pod \"testpod\" has succeeded!",
},
{
name: "pending",
phase: corev1.PodPending,
logText: "Pod 'testpod' is in state \"Pending\"",
logText: "Pod \"testpod\" is in state \"Pending\"",
},
{
name: "unknown",
phase: corev1.PodUnknown,
logText: "Pod 'testpod' is in state \"Unknown\"",
logText: "Pod \"testpod\" is in state \"Unknown\"",
},
{
name: "failed-cancelled",
phase: corev1.PodFailed,
cancelled: true,
logText: "BuildRun 'testpod' has been canceled.",
logText: "BuildRun \"testpod\" has been canceled.",
},
{
name: "failed-br-deleted",
phase: corev1.PodFailed,
brDeleted: true,
logText: "BuildRun 'testpod' has been deleted.",
logText: "BuildRun \"testpod\" has been deleted.",
},
{
name: "failed-pod-deleted",
phase: corev1.PodFailed,
podDeleted: true,
logText: "Pod 'testpod' has been deleted.",
logText: "Pod \"testpod\" has been deleted.",
},
{
name: "failed-something-else",
phase: corev1.PodFailed,
logText: "Pod 'testpod' has failed!",
logText: "Pod \"testpod\" has failed!",
},
{
name: "running",
Expand Down Expand Up @@ -135,13 +135,10 @@ func TestStartBuildRunFollowLog(t *testing.T) {
}
ccmd := &cobra.Command{}
cmd := &RunCommand{
cmd: ccmd,
buildRunName: name,
buildRunSpec: flags.BuildRunSpecFromFlags(ccmd.Flags()),
follow: true,
shpClientset: shpclientset,
tailLogsStarted: make(map[string]bool),
logLock: sync.Mutex{},
cmd: ccmd,
buildRunName: name,
buildRunSpec: flags.BuildRunSpecFromFlags(ccmd.Flags()),
follow: true,
}

// set up context
Expand All @@ -153,14 +150,33 @@ func TestStartBuildRunFollowLog(t *testing.T) {
param := params.NewParamsForTest(kclientset, shpclientset, pm, metav1.NamespaceDefault)

ioStreams, _, out, _ := genericclioptions.NewTestIOStreams()
cmd.follower, _ = follower.NewFollower(cmd.Cmd().Context(), br.Name, &ioStreams, param)

switch {
case test.cancelled:
br.Spec.State = buildv1alpha1.BuildRunStateCancel
br.Status.Conditions = []buildv1alpha1.Condition{
{
Type: buildv1alpha1.Succeeded,
Status: corev1.ConditionFalse,
},
}
case test.brDeleted:
br.DeletionTimestamp = &metav1.Time{}
br.Status.Conditions = []buildv1alpha1.Condition{
{
Type: buildv1alpha1.Succeeded,
Status: corev1.ConditionFalse,
},
}
case test.podDeleted:
pod.DeletionTimestamp = &metav1.Time{}
br.Status.Conditions = []buildv1alpha1.Condition{
{
Type: buildv1alpha1.Succeeded,
Status: corev1.ConditionFalse,
},
}
}

cmd.Complete(param, &ioStreams, []string{name})
Expand All @@ -181,18 +197,18 @@ func TestStartBuildRunFollowLog(t *testing.T) {
if !test.noPodYet {
// mimic watch events, bypassing k8s fake client watch hoopla whose plug points are not always useful;
pod.Status.Phase = test.phase
cmd.onEvent(pod)
cmd.follower.OnEvent(pod)
} else {
cmd.onNoPodEventsYet()
cmd.follower.OnNoPodEventsYet()
}
checkLog(test.name, test.logText, cmd, out, t)
}
}

func checkLog(name, text string, cmd *RunCommand, out *bytes.Buffer, t *testing.T) {
// need to employ log lock since accessing same iostream out used by Run cmd
cmd.logLock.Lock()
defer cmd.logLock.Unlock()
cmd.follower.GetLogLock().Lock()
defer cmd.follower.GetLogLock().Unlock()
if !strings.Contains(out.String(), text) {
t.Errorf("test %s: unexpected output: %s", name, out.String())
}
Expand Down
Loading

0 comments on commit 4f763c3

Please sign in to comment.