Skip to content

Commit

Permalink
Fix issue with timeout on buildruns
Browse files Browse the repository at this point in the history
With changes to the Shipwright internals, especially the removal of the Status
field the tool failed to correctly identify the completion of a buildrun.

Bump to latest available https://github.com/shipwright-io/build.

Update function names and interface names to match new names.

Rework polling and error function to work with conditions.
  • Loading branch information
HeavyWombat committed May 20, 2021
1 parent f8e58c2 commit c6affe5
Show file tree
Hide file tree
Showing 7 changed files with 219 additions and 681 deletions.
24 changes: 11 additions & 13 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,20 @@ go 1.15
require (
github.com/dgrijalva/jwt-go v3.2.0+incompatible
github.com/gonvenience/bunt v1.3.2
github.com/gonvenience/neat v1.3.5
github.com/gonvenience/neat v1.3.6
github.com/gonvenience/text v1.0.6
github.com/gonvenience/wrap v1.1.0
github.com/lucasb-eyer/go-colorful v1.2.0
github.com/onsi/ginkgo v1.15.2
github.com/onsi/gomega v1.11.0
github.com/shipwright-io/build v0.3.1-0.20210305111301-3e3bf18672a3
github.com/onsi/ginkgo v1.16.2
github.com/onsi/gomega v1.12.0
github.com/shipwright-io/build v0.4.1-0.20210520103818-100d81d98775
github.com/spf13/cobra v1.1.3
github.com/spf13/viper v1.7.1
github.com/tektoncd/pipeline v0.20.1
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
k8s.io/api v0.19.8
k8s.io/apimachinery v0.19.8
k8s.io/client-go v12.0.0+incompatible
k8s.io/utils v0.0.0-20200729134348-d5654de09c73
knative.dev/pkg v0.0.0-20210107022335-51c72e24c179
github.com/tektoncd/pipeline v0.23.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
k8s.io/api v0.20.2
k8s.io/apimachinery v0.20.2
k8s.io/client-go v0.20.2
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
knative.dev/pkg v0.0.0-20210127163530-0d31134d5f4e
)

replace k8s.io/client-go => k8s.io/client-go v0.19.8
799 changes: 164 additions & 635 deletions go.sum

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion internal/load/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func waitForBuildRegistered(kubeAccess KubeAccess, build *buildv1alpha1.Build) (

debug("Polling every %v to wait for registration of build %s", interval, build.Name)
err := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
build, err = kubeAccess.BuildClient.BuildV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
build, err = kubeAccess.BuildClient.ShipwrightV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
if err != nil {
return false, err
}
Expand Down
18 changes: 9 additions & 9 deletions internal/load/buildrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,15 @@ type BuildRunOption func(*buildRunOptions)
// would put onto the system
func CheckSystemAndConfig(kubeAccess KubeAccess, buildCfg BuildConfig, parallel int) error {
// Check whether the configured cluster build strategy is available
clusterBuildStrategy, err := kubeAccess.BuildClient.BuildV1alpha1().ClusterBuildStrategies().Get(kubeAccess.Context, buildCfg.ClusterBuildStrategy, metav1.GetOptions{})
clusterBuildStrategy, err := kubeAccess.BuildClient.ShipwrightV1alpha1().ClusterBuildStrategies().Get(kubeAccess.Context, buildCfg.ClusterBuildStrategy, metav1.GetOptions{})
if err != nil {
clusterBuildStrategy = nil

switch terr := err.(type) {
case *errors.StatusError:
switch terr.ErrStatus.Code {
case http.StatusNotFound:
if list, _ := kubeAccess.BuildClient.BuildV1alpha1().ClusterBuildStrategies().List(kubeAccess.Context, metav1.ListOptions{}); list != nil {
if list, _ := kubeAccess.BuildClient.ShipwrightV1alpha1().ClusterBuildStrategies().List(kubeAccess.Context, metav1.ListOptions{}); list != nil {
var names = make([]string, len(list.Items))
for i, entry := range list.Items {
names[i] = entry.GetName()
Expand All @@ -77,7 +77,7 @@ func CheckSystemAndConfig(kubeAccess KubeAccess, buildCfg BuildConfig, parallel

// Given that the permissions allow it, check how many buildruns are
// currently in the system already
if buildRunsResults, err := kubeAccess.BuildClient.BuildV1alpha1().BuildRuns("").List(kubeAccess.Context, metav1.ListOptions{}); err == nil {
if buildRunsResults, err := kubeAccess.BuildClient.ShipwrightV1alpha1().BuildRuns("").List(kubeAccess.Context, metav1.ListOptions{}); err == nil {
var (
totalBuildRuns int
completedBuildRuns int
Expand Down Expand Up @@ -204,9 +204,9 @@ func ExecuteSingleBuildRun(kubeAccess KubeAccess, namespace string, name string,

if !buildRunOptions.skipDelete {
defer func() {
debug("Delete container image %s", buildRun.Status.BuildSpec.Output.ImageURL)
if err := deleteContainerImage(kubeAccess, buildRun.Namespace, build.Spec.Output.SecretRef, buildRun.Status.BuildSpec.Output.ImageURL); err != nil {
warn("failed to delete image %s, %v\n", buildRun.Status.BuildSpec.Output.ImageURL, err)
debug("Delete container image %s", buildRun.Status.BuildSpec.Output.Image)
if err := deleteContainerImage(kubeAccess, buildRun.Namespace, build.Spec.Output.Credentials, buildRun.Status.BuildSpec.Output.Image); err != nil {
warn("failed to delete image %s, %v\n", buildRun.Status.BuildSpec.Output.Image, err)
}
}()
}
Expand Down Expand Up @@ -344,18 +344,18 @@ func ExecuteTestPlan(kubeAccess KubeAccess, testplan TestPlan) error {
i+1,
len(testplan.Steps),
step.Name,
step.BuildSpec.StrategyRef.Name,
step.BuildSpec.Strategy.Name,
step.BuildSpec.Source.URL,
)

name := fmt.Sprintf("test-plan-step-%s", step.Name)

outputImageURL, err := getOutputImageURL(name, step.BuildSpec.Output.ImageURL)
outputImageURL, err := getOutputImageURL(name, step.BuildSpec.Output.Image)
if err != nil {
return err
}

step.BuildSpec.Output.ImageURL = outputImageURL
step.BuildSpec.Output.Image = outputImageURL

if _, err := ExecuteSingleBuildRun(kubeAccess, testplan.Namespace, name, step.BuildSpec, step.BuildAnnotations, GenerateServiceAccount(testplan.GenerateServiceAccount)); err != nil {
return err
Expand Down
2 changes: 1 addition & 1 deletion internal/load/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ import (
"os"
"path/filepath"

buildclient "github.com/shipwright-io/build/pkg/client/build/clientset/versioned"
buildclient "github.com/shipwright-io/build/pkg/client/clientset/versioned"
tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"

"github.com/gonvenience/bunt"
Expand Down
37 changes: 24 additions & 13 deletions internal/load/kubeops.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func applyBuild(kubeAccess KubeAccess, build buildv1alpha1.Build) (*buildv1alpha

debug("Create build %s", build.Name)
return kubeAccess.BuildClient.
BuildV1alpha1().
ShipwrightV1alpha1().
Builds(build.Namespace).
Create(kubeAccess.Context, &build, metav1.CreateOptions{})
}
Expand All @@ -106,43 +106,43 @@ func applyBuildRun(kubeAccess KubeAccess, buildRun buildv1alpha1.BuildRun) (*bui

debug("Create buildrun %s", buildRun.Name)
return kubeAccess.BuildClient.
BuildV1alpha1().
ShipwrightV1alpha1().
BuildRuns(buildRun.Namespace).
Create(kubeAccess.Context, &buildRun, metav1.CreateOptions{})
}

func deleteBuild(kubeAccess KubeAccess, namespace string, name string, deleteOptions *metav1.DeleteOptions) error {
_, err := kubeAccess.BuildClient.BuildV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
_, err := kubeAccess.BuildClient.ShipwrightV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}

debug("Delete build %s", name)
if err := kubeAccess.BuildClient.BuildV1alpha1().Builds(namespace).Delete(kubeAccess.Context, name, *deleteOptions); err != nil {
if err := kubeAccess.BuildClient.ShipwrightV1alpha1().Builds(namespace).Delete(kubeAccess.Context, name, *deleteOptions); err != nil {
return err
}

return wait.PollImmediate(1*time.Second, 10*time.Second, func() (done bool, err error) {
_, err = kubeAccess.BuildClient.BuildV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
_, err = kubeAccess.BuildClient.ShipwrightV1alpha1().Builds(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
return errors.IsNotFound(err), nil
})
}

func deleteBuildRun(kubeAccess KubeAccess, namespace string, name string, deleteOptions *metav1.DeleteOptions) error {
buildRun, err := kubeAccess.BuildClient.BuildV1alpha1().BuildRuns(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
buildRun, err := kubeAccess.BuildClient.ShipwrightV1alpha1().BuildRuns(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil
}

_, pod := lookUpTaskRunAndPod(kubeAccess, *buildRun)

debug("Delete buildrun %s", name)
if err := kubeAccess.BuildClient.BuildV1alpha1().BuildRuns(namespace).Delete(kubeAccess.Context, name, *deleteOptions); err != nil {
if err := kubeAccess.BuildClient.ShipwrightV1alpha1().BuildRuns(namespace).Delete(kubeAccess.Context, name, *deleteOptions); err != nil {
return err
}

err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (done bool, err error) {
_, err = kubeAccess.BuildClient.BuildV1alpha1().BuildRuns(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
_, err = kubeAccess.BuildClient.ShipwrightV1alpha1().BuildRuns(namespace).Get(kubeAccess.Context, name, metav1.GetOptions{})
return errors.IsNotFound(err), nil
})

Expand All @@ -167,7 +167,7 @@ func lookUpTimeout(kubeAccess KubeAccess, buildRun *buildv1alpha1.BuildRun) time
}

if buildRun.Spec.BuildRef != nil {
build, err := kubeAccess.BuildClient.BuildV1alpha1().Builds(buildRun.Namespace).Get(kubeAccess.Context, buildRun.Spec.BuildRef.Name, metav1.GetOptions{})
build, err := kubeAccess.BuildClient.ShipwrightV1alpha1().Builds(buildRun.Namespace).Get(kubeAccess.Context, buildRun.Spec.BuildRef.Name, metav1.GetOptions{})
if err == nil {
if build.Spec.Timeout != nil {
debug("Using Build specified timeout of %v", build.Spec.Timeout.Duration)
Expand All @@ -190,12 +190,17 @@ func waitForBuildRunCompletion(kubeAccess KubeAccess, buildRun *buildv1alpha1.Bu

debug("Polling every %v to wait for completion of buildrun %s within %v", interval, buildRun.Name, timeout)
err := wait.PollImmediate(interval, timeout, func() (done bool, err error) {
buildRun, err = kubeAccess.BuildClient.BuildV1alpha1().BuildRuns(namespace).Get(context.TODO(), name, metav1.GetOptions{})
buildRun, err = kubeAccess.BuildClient.ShipwrightV1alpha1().BuildRuns(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}

switch buildRun.Status.Succeeded {
var condition = buildRun.Status.GetCondition(buildv1alpha1.Succeeded)
if condition == nil {
return false, nil
}

switch condition.Status {
case corev1.ConditionTrue:
if buildRun.Status.CompletionTime != nil {
return true, nil
Expand Down Expand Up @@ -285,7 +290,13 @@ func lookUpDockerCredentialsFromSecret(kubeAccess KubeAccess, namespace string,
}

func buildRunError(kubeAccess KubeAccess, buildRun buildv1alpha1.BuildRun) error {
if buildRun.Status.Succeeded == corev1.ConditionTrue {
var condition = buildRun.Status.GetCondition(buildv1alpha1.Succeeded)

if condition == nil {
return nil
}

if condition.Status == corev1.ConditionTrue {
return nil
}

Expand Down Expand Up @@ -348,7 +359,7 @@ func buildRunError(kubeAccess KubeAccess, buildRun buildv1alpha1.BuildRun) error

// default error with not much more details other than the status reason
return wrap.Errorf(
fmt.Errorf(buildRun.Status.Reason),
fmt.Errorf(condition.Reason),
"buildRun %s failed",
buildRun.Name,
)
Expand Down
18 changes: 9 additions & 9 deletions internal/load/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"time"

buildv1alpha "github.com/shipwright-io/build/pkg/apis/build/v1alpha1"
buildclient "github.com/shipwright-io/build/pkg/client/build/clientset/versioned"
buildclient "github.com/shipwright-io/build/pkg/client/clientset/versioned"
tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -212,23 +212,23 @@ func createBuildSpec(name string, buildCfg BuildConfig) (*buildv1alpha.BuildSpec
}

return &buildv1alpha.BuildSpec{
StrategyRef: &buildv1alpha.StrategyRef{
Strategy: &buildv1alpha.Strategy{
Name: buildCfg.ClusterBuildStrategy,
Kind: strategyRefKind(buildv1alpha.ClusterBuildStrategyKind),
},

Source: buildv1alpha.GitSource{
URL: buildCfg.SourceURL,
Revision: pointer.StringPtr(buildCfg.SourceRevision),
ContextDir: pointer.StringPtr(buildCfg.SourceContextDir),
SecretRef: secrefRef(buildCfg.SourceSecretRef),
Source: buildv1alpha.Source{
URL: buildCfg.SourceURL,
Revision: pointer.StringPtr(buildCfg.SourceRevision),
ContextDir: pointer.StringPtr(buildCfg.SourceContextDir),
Credentials: secrefRef(buildCfg.SourceSecretRef),
},

Dockerfile: dockerfile(),

Output: buildv1alpha.Image{
ImageURL: outputImageURL,
SecretRef: secrefRef(buildCfg.OutputSecretRef),
Image: outputImageURL,
Credentials: secrefRef(buildCfg.OutputSecretRef),
},

Timeout: func() *metav1.Duration {
Expand Down

0 comments on commit c6affe5

Please sign in to comment.