diff --git a/Gopkg.lock b/Gopkg.lock index a3e48418d..c554818ec 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -211,7 +211,7 @@ [[projects]] branch = "master" - digest = "1:db9b2658df733c976b9b71834154d7571eccb11853c7c55bb10e0481be0aeadc" + digest = "1:2da9794ff5835c3249a15deb04d8340cb51363637925db89fd4b67bb25dcd5c7" name = "github.com/kubeflow/tf-operator" packages = [ "pkg/apis/common/v1beta1", @@ -225,7 +225,20 @@ "pkg/version", ] pruneopts = "NUT" - revision = "c2849477dffdeacc2ebc11de66f826a6ce5cf690" + revision = "4689a23554f2b8da2c5db8e397e414782156a945" + +[[projects]] + digest = "1:680c0fba95a0cff934e350b1ad6774d8229378a3e37d9902e07e2861e82a5908" + name = "github.com/kubernetes-sigs/kube-batch" + packages = [ + "pkg/apis/scheduling/v1alpha1", + "pkg/client/clientset/versioned", + "pkg/client/clientset/versioned/scheme", + "pkg/client/clientset/versioned/typed/scheduling/v1alpha1", + ] + pruneopts = "NUT" + revision = "b0dbd4f2df590237cecee48f463bfb8746cfa357" + version = "v0.3" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 959377a00..127658aac 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -13,6 +13,14 @@ required = [ name = "github.com/golang/protobuf" version = "1.1.0" +[[constraint]] + name = "github.com/kubernetes-sigs/kube-batch" + version = "v0.3" + +[[constraint]] + name = "github.com/kubeflow/tf-operator" + branch = "master" + [[constraint]] name = "github.com/sirupsen/logrus" version = "v1.0.4" diff --git a/cmd/pytorch-operator.v1beta1/app/server.go b/cmd/pytorch-operator.v1beta1/app/server.go index 6075ba61e..2c6dc8d98 100644 --- a/cmd/pytorch-operator.v1beta1/app/server.go +++ b/cmd/pytorch-operator.v1beta1/app/server.go @@ -19,6 +19,7 @@ import ( "os" "time" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" crdclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" @@ -87,7 +88,7 @@ func Run(opt *options.ServerOption) error { } // Create clients. - kubeClientSet, leaderElectionClientSet, pytorchJobClientSet, err := createClientSets(kcfg) + kubeClientSet, leaderElectionClientSet, pytorchJobClientSet, kubeBatchClientSet, err := createClientSets(kcfg) if err != nil { return err } @@ -99,7 +100,7 @@ func Run(opt *options.ServerOption) error { unstructuredInformer := controller.NewUnstructuredPyTorchJobInformer(kcfg, opt.Namespace) // Create pytorch controller. - tc := controller.NewPyTorchController(unstructuredInformer, kubeClientSet, pytorchJobClientSet, kubeInformerFactory, pytorchJobInformerFactory, *opt) + tc := controller.NewPyTorchController(unstructuredInformer, kubeClientSet, kubeBatchClientSet, pytorchJobClientSet, kubeInformerFactory, pytorchJobInformerFactory, *opt) // Start informer goroutines. go kubeInformerFactory.Start(stopCh) @@ -154,32 +155,37 @@ func Run(opt *options.ServerOption) error { return nil } -func createClientSets(config *restclientset.Config) (kubeclientset.Interface, kubeclientset.Interface, jobclientset.Interface, error) { +func createClientSets(config *restclientset.Config) (kubeclientset.Interface, kubeclientset.Interface, jobclientset.Interface, kubebatchclient.Interface, error) { crdClient, err := crdclient.NewForConfig(config) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } checkCRDExists(crdClient, v1beta1.PytorchCRD) kubeClientSet, err := kubeclientset.NewForConfig(restclientset.AddUserAgent(config, "pytorch-operator")) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } leaderElectionClientSet, err := kubeclientset.NewForConfig(restclientset.AddUserAgent(config, "leader-election")) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } jobClientSet, err := jobclientset.NewForConfig(config) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } - return kubeClientSet, leaderElectionClientSet, jobClientSet, nil + kubeBatchClientSet, err := kubebatchclient.NewForConfig(restclientset.AddUserAgent(config, "kube-batch")) + if err != nil { + return nil, nil, nil, nil, err + } + + return kubeClientSet, leaderElectionClientSet, jobClientSet, kubeBatchClientSet, nil } func checkCRDExists(clientset crdclient.Interface, crdName string) { diff --git a/cmd/pytorch-operator.v1beta2/app/server.go b/cmd/pytorch-operator.v1beta2/app/server.go index 271f9bb71..37f759d11 100644 --- a/cmd/pytorch-operator.v1beta2/app/server.go +++ b/cmd/pytorch-operator.v1beta2/app/server.go @@ -19,6 +19,7 @@ import ( "os" "time" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -87,7 +88,7 @@ func Run(opt *options.ServerOption) error { } // Create clients. - kubeClientSet, leaderElectionClientSet, pytorchJobClientSet, err := createClientSets(kcfg) + kubeClientSet, leaderElectionClientSet, pytorchJobClientSet, kubeBatchClientSet, err := createClientSets(kcfg) if err != nil { return err } @@ -102,7 +103,7 @@ func Run(opt *options.ServerOption) error { unstructuredInformer := controller.NewUnstructuredPyTorchJobInformer(kcfg, opt.Namespace) // Create pytorch controller. - tc := controller.NewPyTorchController(unstructuredInformer, kubeClientSet, pytorchJobClientSet, kubeInformerFactory, pytorchJobInformerFactory, *opt) + tc := controller.NewPyTorchController(unstructuredInformer, kubeClientSet, kubeBatchClientSet, pytorchJobClientSet, kubeInformerFactory, pytorchJobInformerFactory, *opt) // Start informer goroutines. go kubeInformerFactory.Start(stopCh) @@ -157,24 +158,29 @@ func Run(opt *options.ServerOption) error { return nil } -func createClientSets(config *restclientset.Config) (kubeclientset.Interface, kubeclientset.Interface, jobclientset.Interface, error) { +func createClientSets(config *restclientset.Config) (kubeclientset.Interface, kubeclientset.Interface, jobclientset.Interface, kubebatchclient.Interface, error) { kubeClientSet, err := kubeclientset.NewForConfig(restclientset.AddUserAgent(config, "pytorch-operator")) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } leaderElectionClientSet, err := kubeclientset.NewForConfig(restclientset.AddUserAgent(config, "leader-election")) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } jobClientSet, err := jobclientset.NewForConfig(config) if err != nil { - return nil, nil, nil, err + return nil, nil, nil, nil, err } - return kubeClientSet, leaderElectionClientSet, jobClientSet, nil + kubeBatchClientSet, err := kubebatchclient.NewForConfig(restclientset.AddUserAgent(config, "kube-batch")) + if err != nil { + return nil, nil, nil, nil, err + } + + return kubeClientSet, leaderElectionClientSet, jobClientSet, kubeBatchClientSet, nil } func checkCRDExists(clientset jobclientset.Interface, namespace string) bool { diff --git a/manifests/podgroup.yaml b/manifests/podgroup.yaml new file mode 100644 index 000000000..1432fc160 --- /dev/null +++ b/manifests/podgroup.yaml @@ -0,0 +1,39 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: podgroups.scheduling.incubator.k8s.io +spec: + group: scheduling.incubator.k8s.io + names: + kind: PodGroup + plural: podgroups + scope: Namespaced + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + minMember: + format: int32 + type: integer + type: object + status: + properties: + succeeded: + format: int32 + type: integer + failed: + format: int32 + type: integer + running: + format: int32 + type: integer + type: object + type: object + version: v1alpha1 diff --git a/pkg/controller.v1beta1/pytorch/controller.go b/pkg/controller.v1beta1/pytorch/controller.go index db863d623..10cc3d073 100644 --- a/pkg/controller.v1beta1/pytorch/controller.go +++ b/pkg/controller.v1beta1/pytorch/controller.go @@ -19,6 +19,7 @@ import ( "fmt" "time" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -98,6 +99,7 @@ func NewPyTorchController( // This variable is for unstructured informer. jobInformer jobinformersv1beta1.PyTorchJobInformer, kubeClientSet kubeclientset.Interface, + kubeBatchClientSet kubebatchclient.Interface, jobClientSet jobclientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, // This field is not used now but we keep it since it will be used @@ -116,7 +118,7 @@ func NewPyTorchController( // Create base controller log.Info("Creating Job controller") jc := jobcontroller.NewJobController(pc, metav1.Duration{Duration: 15 * time.Second}, - option.EnableGangScheduling, kubeClientSet, kubeInformerFactory, v1beta1.Plural) + option.EnableGangScheduling, kubeClientSet, kubeBatchClientSet, kubeInformerFactory, v1beta1.Plural) pc.JobController = jc // Set sync handler. pc.syncHandler = pc.syncPyTorchJob @@ -303,9 +305,9 @@ func (pc *PyTorchController) syncPyTorchJob(key string) (bool, error) { if pc.Config.EnableGangScheduling { minAvailableReplicas := getTotalReplicas(job) - _, err := pc.SyncPdb(job, minAvailableReplicas) + _, err := pc.SyncPodGroup(job, minAvailableReplicas) if err != nil { - logger.Warnf("Sync pdb %v: %v", job.Name, err) + logger.Warnf("Sync PodGroup %v: %v", job.Name, err) } } @@ -364,12 +366,12 @@ func (pc *PyTorchController) reconcilePyTorchJobs(job *v1beta1.PyTorchJob) error } if pc.Config.EnableGangScheduling { - pc.Recorder.Event(job, v1.EventTypeNormal, "JobTerminated", "Job is terminated, deleting pdb") - if err := pc.DeletePdb(job); err != nil { - pc.Recorder.Eventf(job, v1.EventTypeWarning, "FailedDeletePdb", "Error deleting: %v", err) + pc.Recorder.Event(job, v1.EventTypeNormal, "JobTerminated", "Job is terminated, deleting PodGroup") + if err := pc.DeletePodGroup(job); err != nil { + pc.Recorder.Eventf(job, v1.EventTypeWarning, "FailedDeletePodGroup", "Error deleting: %v", err) return err } else { - pc.Recorder.Eventf(job, v1.EventTypeNormal, "SuccessfulDeletePdb", "Deleted pdb: %v", job.Name) + pc.Recorder.Eventf(job, v1.EventTypeNormal, "SuccessfulDeletePodGroup", "Deleted PodGroup: %v", job.Name) } } @@ -468,6 +470,10 @@ func (pc *PyTorchController) GetReplicaIndexLabelKey() string { return replicaIndexLabel } +func (pc *PyTorchController) GetJobRoleKey() string { + return labelPyTorchJobRole +} + func (pc *PyTorchController) ControllerName() string { return controllerName } diff --git a/pkg/controller.v1beta1/pytorch/controller_test.go b/pkg/controller.v1beta1/pytorch/controller_test.go index 8838c3da9..a6f5ef5b4 100644 --- a/pkg/controller.v1beta1/pytorch/controller_test.go +++ b/pkg/controller.v1beta1/pytorch/controller_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/golang/protobuf/proto" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" apiv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,6 +49,7 @@ var ( func newPyTorchController( config *rest.Config, kubeClientSet kubeclientset.Interface, + kubeBatchClientSet kubebatchclient.Interface, jobClientSet jobclientset.Interface, resyncPeriod controller.ResyncPeriodFunc, option options.ServerOption, @@ -60,7 +62,7 @@ func newPyTorchController( jobInformer := NewUnstructuredPyTorchJobInformer(config, metav1.NamespaceAll) - ctr := NewPyTorchController(jobInformer, kubeClientSet, jobClientSet, kubeInformerFactory, jobInformerFactory, option) + ctr := NewPyTorchController(jobInformer, kubeClientSet, kubeBatchClientSet, jobClientSet, kubeInformerFactory, jobInformerFactory, option) ctr.PodControl = &controller.FakePodControl{} ctr.ServiceControl = &control.FakeServiceControl{} return ctr, kubeInformerFactory, jobInformerFactory @@ -227,6 +229,15 @@ func TestNormalPath(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -235,7 +246,7 @@ func TestNormalPath(t *testing.T) { } option := options.ServerOption{} jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -347,6 +358,15 @@ func TestRun(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -354,7 +374,7 @@ func TestRun(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -380,12 +400,21 @@ func TestSyncPdb(t *testing.T) { GroupVersion: &v1beta1.SchemeGroupVersion, }, } + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + jobClientSet := jobclientset.NewForConfigOrDie(config) kubeClientSet := fake.NewSimpleClientset() option := options.ServerOption{ EnableGangScheduling: true, } - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) type testCase struct { job *v1beta1.PyTorchJob diff --git a/pkg/controller.v1beta1/pytorch/job_test.go b/pkg/controller.v1beta1/pytorch/job_test.go index 7a6ad24ed..c0ed2a275 100644 --- a/pkg/controller.v1beta1/pytorch/job_test.go +++ b/pkg/controller.v1beta1/pytorch/job_test.go @@ -48,7 +48,7 @@ func TestAddPyTorchJob(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, nil, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -107,7 +107,7 @@ func TestCopyLabelsAndAnnotation(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, nil, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.jobInformerSynced = testutil.AlwaysReady @@ -248,7 +248,7 @@ func TestDeletePodsAndServices(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, nil, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl fakeServiceControl := &control.FakeServiceControl{} @@ -402,7 +402,7 @@ func TestCleanupPyTorchJob(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, nil, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl fakeServiceControl := &control.FakeServiceControl{} diff --git a/pkg/controller.v1beta1/pytorch/pod_test.go b/pkg/controller.v1beta1/pytorch/pod_test.go index 59d9a8fc2..a0788ce04 100644 --- a/pkg/controller.v1beta1/pytorch/pod_test.go +++ b/pkg/controller.v1beta1/pytorch/pod_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -39,6 +40,16 @@ func TestAddPod(t *testing.T) { }, }, ) + + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -46,7 +57,7 @@ func TestAddPod(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -220,6 +231,16 @@ func TestExitCode(t *testing.T) { }, }, ) + + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -227,7 +248,7 @@ func TestExitCode(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.jobInformerSynced = testutil.AlwaysReady diff --git a/pkg/controller.v1beta1/pytorch/service_test.go b/pkg/controller.v1beta1/pytorch/service_test.go index 73e0fb136..9235ba6a2 100644 --- a/pkg/controller.v1beta1/pytorch/service_test.go +++ b/pkg/controller.v1beta1/pytorch/service_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,6 +39,16 @@ func TestAddService(t *testing.T) { }, }, ) + + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -45,7 +56,7 @@ func TestAddService(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady diff --git a/pkg/controller.v1beta1/pytorch/status_test.go b/pkg/controller.v1beta1/pytorch/status_test.go index 73c2c1f78..655b45b03 100644 --- a/pkg/controller.v1beta1/pytorch/status_test.go +++ b/pkg/controller.v1beta1/pytorch/status_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -40,6 +41,16 @@ func TestFailed(t *testing.T) { }, }, ) + + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -47,7 +58,7 @@ func TestFailed(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -212,6 +223,15 @@ func TestStatus(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -219,7 +239,7 @@ func TestStatus(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.Recorder = &record.FakeRecorder{} diff --git a/pkg/controller.v1beta2/pytorch/controller.go b/pkg/controller.v1beta2/pytorch/controller.go index 9afdbac65..3febed768 100644 --- a/pkg/controller.v1beta2/pytorch/controller.go +++ b/pkg/controller.v1beta2/pytorch/controller.go @@ -19,6 +19,7 @@ import ( "fmt" "time" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -98,6 +99,7 @@ func NewPyTorchController( // This variable is for unstructured informer. jobInformer jobinformersv1beta2.PyTorchJobInformer, kubeClientSet kubeclientset.Interface, + kubeBatchClientSet kubebatchclient.Interface, jobClientSet jobclientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, // This field is not used now but we keep it since it will be used @@ -116,7 +118,7 @@ func NewPyTorchController( // Create base controller log.Info("Creating Job controller") jc := jobcontroller.NewJobController(pc, metav1.Duration{Duration: 15 * time.Second}, - option.EnableGangScheduling, kubeClientSet, kubeInformerFactory, v1beta2.Plural) + option.EnableGangScheduling, kubeClientSet, kubeBatchClientSet, kubeInformerFactory, v1beta2.Plural) pc.JobController = jc // Set sync handler. pc.syncHandler = pc.syncPyTorchJob @@ -303,9 +305,9 @@ func (pc *PyTorchController) syncPyTorchJob(key string) (bool, error) { if pc.Config.EnableGangScheduling { minAvailableReplicas := getTotalReplicas(job) - _, err := pc.SyncPdb(job, minAvailableReplicas) + _, err := pc.SyncPodGroup(job, minAvailableReplicas) if err != nil { - logger.Warnf("Sync pdb %v: %v", job.Name, err) + logger.Warnf("Sync PodGroup %v: %v", job.Name, err) } } @@ -364,12 +366,12 @@ func (pc *PyTorchController) reconcilePyTorchJobs(job *v1beta2.PyTorchJob) error } if pc.Config.EnableGangScheduling { - pc.Recorder.Event(job, v1.EventTypeNormal, "JobTerminated", "Job is terminated, deleting pdb") - if err := pc.DeletePdb(job); err != nil { - pc.Recorder.Eventf(job, v1.EventTypeWarning, "FailedDeletePdb", "Error deleting: %v", err) + pc.Recorder.Event(job, v1.EventTypeNormal, "JobTerminated", "Job is terminated, deleting PodGroup") + if err := pc.DeletePodGroup(job); err != nil { + pc.Recorder.Eventf(job, v1.EventTypeWarning, "FailedDeletePodGroup", "Error deleting: %v", err) return err } else { - pc.Recorder.Eventf(job, v1.EventTypeNormal, "SuccessfulDeletePdb", "Deleted pdb: %v", job.Name) + pc.Recorder.Eventf(job, v1.EventTypeNormal, "SuccessfulDeletePodGroup", "Deleted PodGroup: %v", job.Name) } } @@ -468,6 +470,10 @@ func (pc *PyTorchController) GetReplicaIndexLabelKey() string { return replicaIndexLabel } +func (pc *PyTorchController) GetJobRoleKey() string { + return labelPyTorchJobRole +} + func (pc *PyTorchController) ControllerName() string { return controllerName } diff --git a/pkg/controller.v1beta2/pytorch/controller_test.go b/pkg/controller.v1beta2/pytorch/controller_test.go index 77579afa0..92b25c36b 100644 --- a/pkg/controller.v1beta2/pytorch/controller_test.go +++ b/pkg/controller.v1beta2/pytorch/controller_test.go @@ -21,6 +21,7 @@ import ( "time" "github.com/golang/protobuf/proto" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" apiv1beta1 "k8s.io/api/policy/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,6 +49,7 @@ var ( func newPyTorchController( config *rest.Config, kubeClientSet kubeclientset.Interface, + kubeBatchClientSet kubebatchclient.Interface, jobClientSet jobclientset.Interface, resyncPeriod controller.ResyncPeriodFunc, option options.ServerOption, @@ -60,7 +62,7 @@ func newPyTorchController( jobInformer := NewUnstructuredPyTorchJobInformer(config, metav1.NamespaceAll) - ctr := NewPyTorchController(jobInformer, kubeClientSet, jobClientSet, kubeInformerFactory, jobInformerFactory, option) + ctr := NewPyTorchController(jobInformer, kubeClientSet, kubeBatchClientSet, jobClientSet, kubeInformerFactory, jobInformerFactory, option) ctr.PodControl = &controller.FakePodControl{} ctr.ServiceControl = &control.FakeServiceControl{} return ctr, kubeInformerFactory, jobInformerFactory @@ -229,6 +231,15 @@ func TestNormalPath(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -237,7 +248,7 @@ func TestNormalPath(t *testing.T) { } option := options.ServerOption{} jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -349,6 +360,16 @@ func TestRun(t *testing.T) { }, }, ) + + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -356,7 +377,7 @@ func TestRun(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -382,12 +403,21 @@ func TestSyncPdb(t *testing.T) { GroupVersion: &v1beta2.SchemeGroupVersion, }, } + + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + jobClientSet := jobclientset.NewForConfigOrDie(config) kubeClientSet := fake.NewSimpleClientset() option := options.ServerOption{ EnableGangScheduling: true, } - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, option) type testCase struct { job *v1beta2.PyTorchJob diff --git a/pkg/controller.v1beta2/pytorch/job_test.go b/pkg/controller.v1beta2/pytorch/job_test.go index 4e92735d5..19a75ebf8 100644 --- a/pkg/controller.v1beta2/pytorch/job_test.go +++ b/pkg/controller.v1beta2/pytorch/job_test.go @@ -18,6 +18,7 @@ import ( "testing" "time" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -41,6 +42,15 @@ func TestAddPyTorchJob(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -48,7 +58,7 @@ func TestAddPyTorchJob(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -100,6 +110,15 @@ func TestCopyLabelsAndAnnotation(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -107,7 +126,7 @@ func TestCopyLabelsAndAnnotation(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.jobInformerSynced = testutil.AlwaysReady @@ -241,6 +260,15 @@ func TestDeletePodsAndServices(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -248,7 +276,7 @@ func TestDeletePodsAndServices(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl fakeServiceControl := &control.FakeServiceControl{} @@ -395,6 +423,15 @@ func TestCleanupPyTorchJob(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -402,7 +439,7 @@ func TestCleanupPyTorchJob(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl fakeServiceControl := &control.FakeServiceControl{} diff --git a/pkg/controller.v1beta2/pytorch/pod_test.go b/pkg/controller.v1beta2/pytorch/pod_test.go index b024d4d64..233baf8e6 100644 --- a/pkg/controller.v1beta2/pytorch/pod_test.go +++ b/pkg/controller.v1beta2/pytorch/pod_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -39,6 +40,15 @@ func TestAddPod(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -46,7 +56,7 @@ func TestAddPod(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -220,6 +230,15 @@ func TestExitCode(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -227,7 +246,7 @@ func TestExitCode(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, kubeInformerFactory, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.jobInformerSynced = testutil.AlwaysReady diff --git a/pkg/controller.v1beta2/pytorch/service_test.go b/pkg/controller.v1beta2/pytorch/service_test.go index d41c9faca..66ca10fe1 100644 --- a/pkg/controller.v1beta2/pytorch/service_test.go +++ b/pkg/controller.v1beta2/pytorch/service_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -38,6 +39,15 @@ func TestAddService(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -45,7 +55,7 @@ func TestAddService(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady diff --git a/pkg/controller.v1beta2/pytorch/status_test.go b/pkg/controller.v1beta2/pytorch/status_test.go index 491ee0a8d..709acd547 100644 --- a/pkg/controller.v1beta2/pytorch/status_test.go +++ b/pkg/controller.v1beta2/pytorch/status_test.go @@ -18,6 +18,7 @@ package pytorch import ( "testing" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" "k8s.io/api/core/v1" kubeclientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -40,6 +41,15 @@ func TestFailed(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -47,7 +57,7 @@ func TestFailed(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) ctr.jobInformerSynced = testutil.AlwaysReady ctr.PodInformerSynced = testutil.AlwaysReady ctr.ServiceInformerSynced = testutil.AlwaysReady @@ -212,6 +222,15 @@ func TestStatus(t *testing.T) { }, }, ) + // Prepare the kube-batch clientset and controller for the test. + kubeBatchClientSet := kubebatchclient.NewForConfigOrDie(&rest.Config{ + Host: "", + ContentConfig: rest.ContentConfig{ + GroupVersion: &v1.SchemeGroupVersion, + }, + }, + ) + config := &rest.Config{ Host: "", ContentConfig: rest.ContentConfig{ @@ -219,7 +238,7 @@ func TestStatus(t *testing.T) { }, } jobClientSet := jobclientset.NewForConfigOrDie(config) - ctr, _, _ := newPyTorchController(config, kubeClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) + ctr, _, _ := newPyTorchController(config, kubeClientSet, kubeBatchClientSet, jobClientSet, controller.NoResyncPeriodFunc, options.ServerOption{}) fakePodControl := &controller.FakePodControl{} ctr.PodControl = fakePodControl ctr.Recorder = &record.FakeRecorder{} diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/doc.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/doc.go new file mode 100644 index 000000000..0621a0c31 --- /dev/null +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/doc.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Kubeflow Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// +groupName=kubeflow.org +package v1beta1 diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/openapi_generated.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/openapi_generated.go index 0e51decd9..7b0a65f2a 100644 --- a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/openapi_generated.go +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/openapi_generated.go @@ -30,6 +30,176 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.JobCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JobCondition describes the state of the job at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of job condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.JobStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JobStatus represents the current observed state of the training Job.", + Properties: map[string]spec.Schema{ + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions is an array of current observed job conditions.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.JobCondition"), + }, + }, + }, + }, + }, + "replicaStatuses": { + SchemaProps: spec.SchemaProps{ + Description: "ReplicaStatuses is map of ReplicaType and ReplicaStatus, specifies the status of each replica.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.ReplicaStatus"), + }, + }, + }, + }, + }, + "startTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "completionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastReconcileTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents last time when the job was reconciled. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + Required: []string{"conditions", "replicaStatuses"}, + }, + }, + Dependencies: []string{ + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.JobCondition", "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.ReplicaStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.ReplicaSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSpec is a description of the replica", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the desired number of replicas of the given template. If unspecified, defaults to 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the object that describes the pod that will be created for this replica. RestartPolicy in PodTemplateSpec will be overide by RestartPolicy in ReplicaSpec", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Restart policy for all replicas within the job. One of Always, OnFailure, Never and ExitCode. Default to Never.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplateSpec"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1.ReplicaStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaStatus represents the current observed state of the replica.", + Properties: map[string]spec.Schema{ + "active": { + SchemaProps: spec.SchemaProps{ + Description: "The number of actively running pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "succeeded": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods which reached phase Succeeded.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods which reached phase Failed.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/zz_generated.deepcopy.go index 00fef9c00..a853e79a8 100644 --- a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta1/zz_generated.deepcopy.go @@ -107,3 +107,19 @@ func (in *ReplicaSpec) DeepCopy() *ReplicaSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaStatus) DeepCopyInto(out *ReplicaStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaStatus. +func (in *ReplicaStatus) DeepCopy() *ReplicaStatus { + if in == nil { + return nil + } + out := new(ReplicaStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/doc.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/doc.go new file mode 100644 index 000000000..b52bb41c8 --- /dev/null +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/doc.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Kubeflow Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// Package v1beta2 is the v1beta2 version of the API. +// +groupName=kubeflow.org +package v1beta2 diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/openapi_generated.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/openapi_generated.go index 55ea2ad80..100e4aa08 100644 --- a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/openapi_generated.go +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/openapi_generated.go @@ -30,6 +30,176 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.JobCondition": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JobCondition describes the state of the job at a certain point.", + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type of job condition.", + Type: []string{"string"}, + Format: "", + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Description: "Status of the condition, one of True, False, Unknown.", + Type: []string{"string"}, + Format: "", + }, + }, + "reason": { + SchemaProps: spec.SchemaProps{ + Description: "The reason for the condition's last transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "A human readable message indicating details about the transition.", + Type: []string{"string"}, + Format: "", + }, + }, + "lastUpdateTime": { + SchemaProps: spec.SchemaProps{ + Description: "The last time this condition was updated.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Last time the condition transitioned from one status to another.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + Required: []string{"type", "status"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.JobStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JobStatus represents the current observed state of the training Job.", + Properties: map[string]spec.Schema{ + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "Conditions is an array of current observed job conditions.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.JobCondition"), + }, + }, + }, + }, + }, + "replicaStatuses": { + SchemaProps: spec.SchemaProps{ + Description: "ReplicaStatuses is map of ReplicaType and ReplicaStatus, specifies the status of each replica.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.ReplicaStatus"), + }, + }, + }, + }, + }, + "startTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "completionTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "lastReconcileTime": { + SchemaProps: spec.SchemaProps{ + Description: "Represents last time when the job was reconciled. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + }, + Required: []string{"conditions", "replicaStatuses"}, + }, + }, + Dependencies: []string{ + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.JobCondition", "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.ReplicaStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.ReplicaSpec": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaSpec is a description of the replica", + Properties: map[string]spec.Schema{ + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the desired number of replicas of the given template. If unspecified, defaults to 1.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "template": { + SchemaProps: spec.SchemaProps{ + Description: "Template is the object that describes the pod that will be created for this replica. RestartPolicy in PodTemplateSpec will be overide by RestartPolicy in ReplicaSpec", + Ref: ref("k8s.io/api/core/v1.PodTemplateSpec"), + }, + }, + "restartPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Restart policy for all replicas within the job. One of Always, OnFailure, Never and ExitCode. Default to Never.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.PodTemplateSpec"}, + }, + "github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2.ReplicaStatus": { + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ReplicaStatus represents the current observed state of the replica.", + Properties: map[string]spec.Schema{ + "active": { + SchemaProps: spec.SchemaProps{ + Description: "The number of actively running pods.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "succeeded": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods which reached phase Succeeded.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "failed": { + SchemaProps: spec.SchemaProps{ + Description: "The number of pods which reached phase Failed.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + Dependencies: []string{}, + }, "k8s.io/api/core/v1.AWSElasticBlockStoreVolumeSource": { Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/common_types.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/types.go similarity index 100% rename from vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/common_types.go rename to vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/types.go diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/zz_generated.deepcopy.go b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/zz_generated.deepcopy.go index fefeb94ce..ddc9521b1 100644 --- a/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/zz_generated.deepcopy.go +++ b/vendor/github.com/kubeflow/tf-operator/pkg/apis/common/v1beta2/zz_generated.deepcopy.go @@ -107,3 +107,19 @@ func (in *ReplicaSpec) DeepCopy() *ReplicaSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaStatus) DeepCopyInto(out *ReplicaStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaStatus. +func (in *ReplicaStatus) DeepCopy() *ReplicaStatus { + if in == nil { + return nil + } + out := new(ReplicaStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/kubeflow/tf-operator/pkg/common/jobcontroller/jobcontroller.go b/vendor/github.com/kubeflow/tf-operator/pkg/common/jobcontroller/jobcontroller.go index df76135e5..d0a375e9f 100644 --- a/vendor/github.com/kubeflow/tf-operator/pkg/common/jobcontroller/jobcontroller.go +++ b/vendor/github.com/kubeflow/tf-operator/pkg/common/jobcontroller/jobcontroller.go @@ -5,6 +5,8 @@ import ( "fmt" "strings" + "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + kubebatchclient "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned" log "github.com/sirupsen/logrus" "k8s.io/api/core/v1" "k8s.io/api/policy/v1beta1" @@ -52,6 +54,9 @@ type ControllerInterface interface { // Returns the Replica Index(value) in the labels of the job GetReplicaIndexLabelKey() string + // Returns the Job Role(key) in the labels of the job + GetJobRoleKey() string + // Returns the Job from Informer Cache GetJobFromInformerCache(namespace, name string) (metav1.Object, error) @@ -88,6 +93,9 @@ type JobController struct { // kubeClientSet is a standard kubernetes clientset. KubeClientSet kubeclientset.Interface + //KubeBatchClientSet is a standard kube-batch clientset. + KubeBatchClientSet kubebatchclient.Interface + // podLister can list/get pods from the shared informer's store. PodLister corelisters.PodLister @@ -135,6 +143,7 @@ func NewJobController( reconcilerSyncPeriod metav1.Duration, enableGangScheduling bool, kubeClientSet kubeclientset.Interface, + kubeBatchClientSet kubebatchclient.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, workQueueName string) JobController { @@ -160,14 +169,15 @@ func NewJobController( } jc := JobController{ - Controller: controllerImpl, - Config: jobControllerConfig, - PodControl: realPodControl, - ServiceControl: realServiceControl, - KubeClientSet: kubeClientSet, - Expectations: controller.NewControllerExpectations(), - WorkQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), - Recorder: recorder, + Controller: controllerImpl, + Config: jobControllerConfig, + PodControl: realPodControl, + ServiceControl: realServiceControl, + KubeClientSet: kubeClientSet, + KubeBatchClientSet: kubeBatchClientSet, + Expectations: controller.NewControllerExpectations(), + WorkQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), + Recorder: recorder, } return jc @@ -197,6 +207,31 @@ func (jc *JobController) GenLabels(jobName string) map[string]string { } } +func (jc *JobController) SyncPodGroup(job metav1.Object, minAvailableReplicas int32) (*v1alpha1.PodGroup, error) { + + kubeBatchClientInterface := jc.KubeBatchClientSet + // Check whether podGroup exists or not + podGroup, err := kubeBatchClientInterface.SchedulingV1alpha1().PodGroups(job.GetNamespace()).Get(job.GetName(), metav1.GetOptions{}) + if err == nil { + return podGroup, nil + } + + // create podGroup for gang scheduling by kube-batch + minAvailable := intstr.FromInt(int(minAvailableReplicas)) + createPodGroup := &v1alpha1.PodGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: job.GetName(), + OwnerReferences: []metav1.OwnerReference{ + *jc.GenOwnerReference(job), + }, + }, + Spec: v1alpha1.PodGroupSpec{ + MinMember: minAvailable.IntVal, + }, + } + return kubeBatchClientInterface.SchedulingV1alpha1().PodGroups(job.GetNamespace()).Create(createPodGroup) +} + // SyncPdb will create a PDB for gang scheduling by kube-arbitrator. func (jc *JobController) SyncPdb(job metav1.Object, minAvailableReplicas int32) (*v1beta1.PodDisruptionBudget, error) { labelJobName := jc.Controller.GetJobNameLabelKey() @@ -231,6 +266,25 @@ func (jc *JobController) SyncPdb(job metav1.Object, minAvailableReplicas int32) return jc.KubeClientSet.PolicyV1beta1().PodDisruptionBudgets(job.GetNamespace()).Create(createPdb) } +func (jc *JobController) DeletePodGroup(job metav1.Object) error { + kubeBatchClientInterface := jc.KubeBatchClientSet + + //check whether podGroup exists or not + _, err := kubeBatchClientInterface.SchedulingV1alpha1().PodGroups(job.GetNamespace()).Get(job.GetName(), metav1.GetOptions{}) + if err != nil && k8serrors.IsNotFound(err) { + return nil + } + + log.Infof("Deleting PodGroup %s", job.GetName()) + + //delete podGroup + err = kubeBatchClientInterface.SchedulingV1alpha1().PodGroups(job.GetNamespace()).Delete(job.GetName(), &metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("unable to delete PodGroup: %v", err) + } + return nil +} + func (jc *JobController) DeletePdb(job metav1.Object) error { // Check the pdb exist or not diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE b/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/doc.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..8e8da1129 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +package v1alpha1 diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/labels.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/labels.go new file mode 100644 index 000000000..0fe02fe0b --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/labels.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// GroupNameAnnotationKey is the annotation key of Pod to identify +// which PodGroup it belongs to. +const GroupNameAnnotationKey = "scheduling.k8s.io/group-name" diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/register.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/register.go new file mode 100644 index 000000000..0178c2065 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +const ( + // GroupName is the group name used in this package. + GroupName = "scheduling.incubator.k8s.io" + + // GroupVersion is the version of scheduling group + GroupVersion = "v1alpha1" +) + +// SchemeGroupVersion is the group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion} + +// Resource takes an unqualified resource and returns a Group-qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodGroup{}, + &PodGroupList{}, + &Queue{}, + &QueueList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/types.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/types.go new file mode 100644 index 000000000..d439edb4c --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/types.go @@ -0,0 +1,151 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodGroup is a collection of Pod; used for batch workload. +type PodGroup struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod group. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec PodGroupSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information about a pod group. + // This data may not be up to date. + // +optional + Status PodGroupStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodGroupSpec represents the template of a pod group. +type PodGroupSpec struct { + // MinMember defines the minimal number of members/tasks to run the pod group; + // if there's not enough resources to start all tasks, the scheduler + // will not start anyone. + MinMember int32 `json:"minMember,omitempty" protobuf:"bytes,1,opt,name=minMember"` + + // Queue defines the queue to allocate resource for PodGroup; if queue does not exist, + // the PodGroup will not be scheduled. + Queue string `json:"queue,omitempty" protobuf:"bytes,2,opt,name=queue"` +} + +// PodGroupStatus represents the current state of a pod group. +type PodGroupStatus struct { + // The number of actively running pods. + // +optional + Running int32 `json:"running,omitempty" protobuf:"bytes,3,opt,name=running"` + // The number of pods which reached phase Succeeded. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"bytes,3,opt,name=succeeded"` + // The number of pods which reached phase Failed. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"bytes,3,opt,name=failed"` +} + +// Action is the action that PodGroup controller will take according to the event. +type Action string + +// Event represent the phase of PodGroup, e.g. pod-failed. +type Event string + +const ( + UnschedulableEvent Event = "Unschedulable" + EvictEvent Event = "Evict" + PodFailedEvent Event = "PodFailed" + + RestartAction Action = "restart" +) + +// LifecyclePolicy represents the lifecycle policy of PodGroup. +type LifeCyclePolicy struct { + // The action that will be taken to the PodGroup according to Event. + // One of "Restart", "None". + // Default to None. + // +optional + Action Action + // The Event recorded by scheduler; the controller takes actions + // according to this Event. + // One of "PodFailed", "Unschedulable". + // +optional + Event Event + // Timeout is the grace period for controller to take actions. + // Default to nil (take action immediately). + // +optional + Timeout *metav1.Duration +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodGroupList is a collection of pod groups. +type PodGroupList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PodGroup + Items []PodGroup `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Queue is a queue of PodGroup. +type Queue struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod group. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec QueueSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// QueueSpec represents the template of Queue. +type QueueSpec struct { + Weight int32 `json:"weight,omitempty" protobuf:"bytes,1,opt,name=weight"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// QueueList is a collection of queues. +type QueueList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PodGroup + Items []Queue `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..fabd3eae6 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,216 @@ +// +build !ignore_autogenerated + +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifeCyclePolicy) DeepCopyInto(out *LifeCyclePolicy) { + *out = *in + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(v1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifeCyclePolicy. +func (in *LifeCyclePolicy) DeepCopy() *LifeCyclePolicy { + if in == nil { + return nil + } + out := new(LifeCyclePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroup) DeepCopyInto(out *PodGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroup. +func (in *PodGroup) DeepCopy() *PodGroup { + if in == nil { + return nil + } + out := new(PodGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupList) DeepCopyInto(out *PodGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupList. +func (in *PodGroupList) DeepCopy() *PodGroupList { + if in == nil { + return nil + } + out := new(PodGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupSpec) DeepCopyInto(out *PodGroupSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupSpec. +func (in *PodGroupSpec) DeepCopy() *PodGroupSpec { + if in == nil { + return nil + } + out := new(PodGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodGroupStatus) DeepCopyInto(out *PodGroupStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodGroupStatus. +func (in *PodGroupStatus) DeepCopy() *PodGroupStatus { + if in == nil { + return nil + } + out := new(PodGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Queue) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 000000000..c1ddfe07c --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client +} + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// Deprecated: Scheduling retrieves the default version of SchedulingClient. +// Please explicitly pick a version. +func (c *Clientset) Scheduling() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go new file mode 100644 index 000000000..41721ca52 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..7dc375616 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..e08eecc63 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,60 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + schedulingv1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + + corev1 "k8s.io/api/core/v1" + //PDB defintions + policyv1beta1 "k8s.io/api/policy/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + AddToScheme(Scheme) +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +func AddToScheme(scheme *runtime.Scheme) { + schedulingv1alpha1.AddToScheme(scheme) + corev1.AddToScheme(scheme) + policyv1beta1.AddToScheme(scheme) +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..f19581492 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type PodGroupExpansion interface{} + +type QueueExpansion interface{} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go new file mode 100644 index 000000000..ea0cb3628 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/podgroup.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodGroupsGetter has a method to return a PodGroupInterface. +// A group's client should implement this interface. +type PodGroupsGetter interface { + PodGroups(namespace string) PodGroupInterface +} + +// PodGroupInterface has methods to work with PodGroup resources. +type PodGroupInterface interface { + Create(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) + Update(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) + UpdateStatus(*v1alpha1.PodGroup) (*v1alpha1.PodGroup, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PodGroup, error) + List(opts v1.ListOptions) (*v1alpha1.PodGroupList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodGroup, err error) + PodGroupExpansion +} + +// podGroups implements PodGroupInterface +type podGroups struct { + client rest.Interface + ns string +} + +// newPodGroups returns a PodGroups +func newPodGroups(c *SchedulingV1alpha1Client, namespace string) *podGroups { + return &podGroups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podGroup, and returns the corresponding podGroup object, and an error if there is any. +func (c *podGroups) Get(name string, options v1.GetOptions) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodGroups that match those selectors. +func (c *podGroups) List(opts v1.ListOptions) (result *v1alpha1.PodGroupList, err error) { + result = &v1alpha1.PodGroupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podGroups. +func (c *podGroups) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a podGroup and creates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *podGroups) Create(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podgroups"). + Body(podGroup). + Do(). + Into(result) + return +} + +// Update takes the representation of a podGroup and updates it. Returns the server's representation of the podGroup, and an error, if there is any. +func (c *podGroups) Update(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podgroups"). + Name(podGroup.Name). + Body(podGroup). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *podGroups) UpdateStatus(podGroup *v1alpha1.PodGroup) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podgroups"). + Name(podGroup.Name). + SubResource("status"). + Body(podGroup). + Do(). + Into(result) + return +} + +// Delete takes name of the podGroup and deletes it. Returns an error if one occurs. +func (c *podGroups) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podgroups"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podGroups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podgroups"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podGroup. +func (c *podGroups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodGroup, err error) { + result = &v1alpha1.PodGroup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podgroups"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go new file mode 100644 index 000000000..77b1b40ae --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/queue.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + scheme "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// QueuesGetter has a method to return a QueueInterface. +// A group's client should implement this interface. +type QueuesGetter interface { + Queues() QueueInterface +} + +// QueueInterface has methods to work with Queue resources. +type QueueInterface interface { + Create(*v1alpha1.Queue) (*v1alpha1.Queue, error) + Update(*v1alpha1.Queue) (*v1alpha1.Queue, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Queue, error) + List(opts v1.ListOptions) (*v1alpha1.QueueList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Queue, err error) + QueueExpansion +} + +// queues implements QueueInterface +type queues struct { + client rest.Interface +} + +// newQueues returns a Queues +func newQueues(c *SchedulingV1alpha1Client) *queues { + return &queues{ + client: c.RESTClient(), + } +} + +// Get takes name of the queue, and returns the corresponding queue object, and an error if there is any. +func (c *queues) Get(name string, options v1.GetOptions) (result *v1alpha1.Queue, err error) { + result = &v1alpha1.Queue{} + err = c.client.Get(). + Resource("queues"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Queues that match those selectors. +func (c *queues) List(opts v1.ListOptions) (result *v1alpha1.QueueList, err error) { + result = &v1alpha1.QueueList{} + err = c.client.Get(). + Resource("queues"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested queues. +func (c *queues) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("queues"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a queue and creates it. Returns the server's representation of the queue, and an error, if there is any. +func (c *queues) Create(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) { + result = &v1alpha1.Queue{} + err = c.client.Post(). + Resource("queues"). + Body(queue). + Do(). + Into(result) + return +} + +// Update takes the representation of a queue and updates it. Returns the server's representation of the queue, and an error, if there is any. +func (c *queues) Update(queue *v1alpha1.Queue) (result *v1alpha1.Queue, err error) { + result = &v1alpha1.Queue{} + err = c.client.Put(). + Resource("queues"). + Name(queue.Name). + Body(queue). + Do(). + Into(result) + return +} + +// Delete takes name of the queue and deletes it. Returns an error if one occurs. +func (c *queues) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("queues"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *queues) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("queues"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched queue. +func (c *queues) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Queue, err error) { + result = &v1alpha1.Queue{} + err = c.client.Patch(pt). + Resource("queues"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go new file mode 100644 index 000000000..d57f01217 --- /dev/null +++ b/vendor/github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/typed/scheduling/v1alpha1/scheduling_client.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/kubernetes-sigs/kube-batch/pkg/apis/scheduling/v1alpha1" + "github.com/kubernetes-sigs/kube-batch/pkg/client/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1alpha1Interface interface { + RESTClient() rest.Interface + PodGroupsGetter + QueuesGetter +} + +// SchedulingV1alpha1Client is used to interact with features provided by the scheduling group. +type SchedulingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1alpha1Client) PodGroups(namespace string) PodGroupInterface { + return newPodGroups(c, namespace) +} + +func (c *SchedulingV1alpha1Client) Queues() QueueInterface { + return newQueues(c) +} + +// NewForConfig creates a new SchedulingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1alpha1Client { + return &SchedulingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +}