From d7c946de297c69d09c47035a514cb16dc74f077b Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Mon, 21 Jun 2021 14:14:30 +0530 Subject: [PATCH 1/6] feat: implement replay feature without backup and validation --- .gitignore | 1 + cmd/replay.go | 16 ++ cmd/server/server.go | 4 + config/config.go | 5 +- core/tree/tree_node.go | 7 + ext/scheduler/airflow/airflow.go | 54 +++++++ ext/scheduler/airflow/airflow_test.go | 60 ++++++++ ext/scheduler/airflow2/airflow.go | 32 ++++ ext/scheduler/airflow2/airflow_test.go | 55 +++++++ job/replay.go | 30 +++- job/replay_manager.go | 142 ++++++++++++++++++ job/replay_manager_test.go | 1 + job/replay_test.go | 13 +- job/replay_worker.go | 66 ++++++++ job/replay_worker_test.go | 55 +++++++ job/service.go | 3 + job/service_test.go | 20 +-- mock/replay.go | 20 +++ models/replay.go | 41 +++++ models/scheduler.go | 3 + .../000009_create_replay_table.down.sql | 1 + .../000009_create_replay_table.up.sql | 11 ++ store/postgres/replay_repository.go | 83 ++++++++++ store/postgres/replay_repository_test.go | 77 ++++++++++ 24 files changed, 779 insertions(+), 21 deletions(-) create mode 100644 job/replay_manager.go create mode 100644 job/replay_manager_test.go create mode 100644 job/replay_worker.go create mode 100644 job/replay_worker_test.go create mode 100644 mock/replay.go create mode 100644 models/replay.go create mode 100644 resources/pack/migrations/000009_create_replay_table.down.sql create mode 100644 resources/pack/migrations/000009_create_replay_table.up.sql create mode 100644 store/postgres/replay_repository.go create mode 100644 store/postgres/replay_repository_test.go diff --git a/.gitignore b/.gitignore index 198dcdd879..ee19fe6a64 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ jobs /resources/resource_fs.go __pycache__ .optimus.yaml +coverage.txt # git ignore generate files related to gRPC and proto /proton/ diff --git a/cmd/replay.go b/cmd/replay.go index 68add55571..fda6433a05 100644 --- a/cmd/replay.go +++ b/cmd/replay.go @@ -6,6 +6,8 @@ import ( "strings" "time" + "github.com/AlecAivazis/survey/v2" + "github.com/odpf/optimus/core/set" pb "github.com/odpf/optimus/api/proto/odpf/optimus" @@ -108,6 +110,20 @@ ReplayDryRun date ranges are inclusive. //if only dry run, exit now return nil } + + confirm := false + if err := survey.AskOne(&survey.Select{ + Message: "Proceed with replay?", + Options: []string{"Yes", "No"}, + Default: "Yes", + }, &confirm); err != nil { + return err + } + if !confirm { + l.Print("aborting...") + return nil + } + return nil } return reCmd diff --git a/cmd/server/server.go b/cmd/server/server.go index 4da68d2eb2..77837ef9e8 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -379,6 +379,9 @@ func Initialize(conf config.Provider) error { db: dbConn, projectResourceSpecRepoFac: projectResourceSpecRepoFac, } + replayRepo := postgres.NewReplayRepository(dbConn) + replayWorker := job.NewReplayWorker(replayRepo, models.Scheduler) + replayManager := job.NewManager(replayWorker, conf.GetServe().JobQueueSize) // runtime service instance over grpc pb.RegisterRuntimeServiceServer(grpcServer, v1handler.NewRuntimeServiceServer( @@ -394,6 +397,7 @@ func Initialize(conf config.Provider) error { priorityResolver, metaSvcFactory, &projectJobSpecRepoFac, + replayManager, ), datastore.NewService(&resourceSpecRepoFac, models.DatastoreRegistry), projectRepoFac, diff --git a/config/config.go b/config/config.go index bf000e5d8a..d5ecf9af31 100644 --- a/config/config.go +++ b/config/config.go @@ -96,8 +96,9 @@ type ServerConfig struct { // random 32 character hash used for encrypting secrets AppKey string `yaml:"app_key"` - DB DBConfig `yaml:"db"` - Metadata MetadataConfig `yaml:"metadata"` + DB DBConfig `yaml:"db"` + Metadata MetadataConfig `yaml:"metadata"` + JobQueueSize int `yaml:"job_queue_size"` } type DBConfig struct { diff --git a/core/tree/tree_node.go b/core/tree/tree_node.go index 5685f64b15..3fdb7e5584 100644 --- a/core/tree/tree_node.go +++ b/core/tree/tree_node.go @@ -15,6 +15,13 @@ type TreeNode struct { Runs set.Set } +func (t *TreeNode) GetAllNodes(allNodes map[string]*TreeNode) { + allNodes[t.Data.GetName()] = t + for _, dep := range t.Dependents { + dep.GetAllNodes(allNodes) + } +} + func (t *TreeNode) GetName() string { return t.Data.GetName() } diff --git a/ext/scheduler/airflow/airflow.go b/ext/scheduler/airflow/airflow.go index d9f20007d8..963a46de4d 100644 --- a/ext/scheduler/airflow/airflow.go +++ b/ext/scheduler/airflow/airflow.go @@ -29,6 +29,7 @@ var resBaseDAG []byte const ( baseLibFileName = "__lib.py" dagStatusURL = "api/experimental/dags/%s/dag_runs" + dagRunClearURL = "clear&dag_id=%s&start_date=%s&end_date=%s" ) type HTTPClient interface { @@ -170,3 +171,56 @@ func (a *scheduler) GetJobStatus(ctx context.Context, projSpec models.ProjectSpe return jobStatus, nil } + +func (a *scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobName string, startDate, endDate time.Time) error { + schdHost, ok := projSpec.Config[models.ProjectSchedulerHost] + if !ok { + return errors.Errorf("scheduler host not set for %s", projSpec.Name) + } + + schdHost = strings.Trim(schdHost, "/") + airflowDateFormat := "2006-01-02T15:04:05" + utcTimezone, _ := time.LoadLocation("UTC") + fetchURL := fmt.Sprintf( + fmt.Sprintf("%s/%s", schdHost, dagRunClearURL), + jobName, + startDate.In(utcTimezone).Format(airflowDateFormat), + endDate.In(utcTimezone).Format(airflowDateFormat)) + request, err := http.NewRequest(http.MethodGet, fetchURL, nil) + if err != nil { + return errors.Wrapf(err, "failed to build http request for %s", fetchURL) + } + + resp, err := a.httpClient.Do(request) + if err != nil { + return errors.Wrapf(err, "failed to clear airflow dag runs from %s", fetchURL) + } + if resp.StatusCode != http.StatusOK { + return errors.Errorf("failed to clear airflow dag runs from %s", fetchURL) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errors.Wrap(err, "failed to read airflow response") + } + + //{ + // "http_response_code": 200, + // "status": "status" + //} + responseJSON := map[string]interface{}{} + err = json.Unmarshal(body, &responseJSON) + if err != nil { + return errors.Wrapf(err, "json error: %s", string(body)) + } + + responseFields := []string{"http_response_code", "status"} + for _, field := range responseFields { + _, ok := responseJSON[field] + if !ok { + return errors.Errorf("failed to find required response fields %s in %s", field, responseJSON) + } + } + return nil +} diff --git a/ext/scheduler/airflow/airflow_test.go b/ext/scheduler/airflow/airflow_test.go index 087f1a9b6f..fa64d82420 100644 --- a/ext/scheduler/airflow/airflow_test.go +++ b/ext/scheduler/airflow/airflow_test.go @@ -7,6 +7,9 @@ import ( "io/ioutil" "net/http" "testing" + "time" + + "github.com/odpf/optimus/job" "github.com/odpf/optimus/store" "github.com/stretchr/testify/mock" @@ -164,4 +167,61 @@ func TestAirflow(t *testing.T) { assert.Len(t, status, 0) }) }) + t.Run("Clear", func(t *testing.T) { + host := "http://airflow.example.io" + startDate := "2021-05-20" + startDateTime, _ := time.Parse(job.ReplayDateFormat, startDate) + endDate := "2021-05-25" + endDateTime, _ := time.Parse(job.ReplayDateFormat, endDate) + + t.Run("should return job status with valid args", func(t *testing.T) { + respString := ` +{ + "http_response_code": 200, + "status": "success" +}` + // create a new reader with JSON + r := ioutil.NopCloser(bytes.NewReader([]byte(respString))) + client := &MockHttpClient{ + DoFunc: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: r, + }, nil + }, + } + + air := airflow.NewScheduler(nil, client) + err := air.Clear(ctx, models.ProjectSpec{ + Name: "test-proj", + Config: map[string]string{ + models.ProjectSchedulerHost: host, + }, + }, "sample_select", startDateTime, endDateTime) + + assert.Nil(t, err) + }) + t.Run("should fail if host fails to return OK", func(t *testing.T) { + respString := `INTERNAL ERROR` + r := ioutil.NopCloser(bytes.NewReader([]byte(respString))) + client := &MockHttpClient{ + DoFunc: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: r, + }, nil + }, + } + + air := airflow.NewScheduler(nil, client) + err := air.Clear(ctx, models.ProjectSpec{ + Name: "test-proj", + Config: map[string]string{ + models.ProjectSchedulerHost: host, + }, + }, "sample_select", startDateTime, endDateTime) + + assert.NotNil(t, err) + }) + }) } diff --git a/ext/scheduler/airflow2/airflow.go b/ext/scheduler/airflow2/airflow.go index 778a3f9745..de158e2014 100644 --- a/ext/scheduler/airflow2/airflow.go +++ b/ext/scheduler/airflow2/airflow.go @@ -29,6 +29,7 @@ var resBaseDAG []byte const ( baseLibFileName = "__lib.py" dagStatusUrl = "api/v1/dags/%s/dagRuns" + dagRunClearURL = "api/v1/dags/%s/clearTaskInstances" ) type HttpClient interface { @@ -177,3 +178,34 @@ func (a *scheduler) GetJobStatus(ctx context.Context, projSpec models.ProjectSpe return jobStatus, nil } + +func (a *scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobName string, startDate, endDate time.Time) error { + schdHost, ok := projSpec.Config[models.ProjectSchedulerHost] + if !ok { + return errors.Errorf("scheduler host not set for %s", projSpec.Name) + } + + schdHost = strings.Trim(schdHost, "/") + airflowDateFormat := "2006-01-02T15:04:05" + var jsonStr = []byte(fmt.Sprintf(`{"start_date":"%s", "end_date": "%s"}`, + startDate.UTC().Format(airflowDateFormat), + endDate.UTC().Format(airflowDateFormat))) + postURL := fmt.Sprintf( + fmt.Sprintf("%s/%s", schdHost, dagRunClearURL), + jobName) + request, err := http.NewRequest(http.MethodPost, postURL, bytes.NewBuffer(jsonStr)) + if err != nil { + return errors.Wrapf(err, "failed to build http request for %s", postURL) + } + + resp, err := a.httpClient.Do(request) + if err != nil { + return errors.Wrapf(err, "failed to clear airflow dag runs from %s", postURL) + } + if resp.StatusCode != http.StatusOK { + return errors.Errorf("failed to clear airflow dag runs from %s", postURL) + } + defer resp.Body.Close() + + return nil +} diff --git a/ext/scheduler/airflow2/airflow_test.go b/ext/scheduler/airflow2/airflow_test.go index 1adcfa1834..03d220c16e 100644 --- a/ext/scheduler/airflow2/airflow_test.go +++ b/ext/scheduler/airflow2/airflow_test.go @@ -7,6 +7,9 @@ import ( "io/ioutil" "net/http" "testing" + "time" + + "github.com/odpf/optimus/job" "github.com/odpf/optimus/store" "github.com/stretchr/testify/mock" @@ -167,4 +170,56 @@ func TestAirflow2(t *testing.T) { assert.Len(t, status, 0) }) }) + t.Run("Clear", func(t *testing.T) { + host := "http://airflow.example.io" + startDate := "2021-05-20" + startDateTime, _ := time.Parse(job.ReplayDateFormat, startDate) + endDate := "2021-05-25" + endDateTime, _ := time.Parse(job.ReplayDateFormat, endDate) + + t.Run("should clear dagrun state successfully", func(t *testing.T) { + // create a new reader with JSON + r := ioutil.NopCloser(bytes.NewReader([]byte(""))) + client := &MockHttpClient{ + DoFunc: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: r, + }, nil + }, + } + + air := airflow2.NewScheduler(nil, client) + err := air.Clear(ctx, models.ProjectSpec{ + Name: "test-proj", + Config: map[string]string{ + models.ProjectSchedulerHost: host, + }, + }, "sample_select", startDateTime, endDateTime) + + assert.Nil(t, err) + }) + t.Run("should fail if host fails to return OK", func(t *testing.T) { + respString := `INTERNAL ERROR` + r := ioutil.NopCloser(bytes.NewReader([]byte(respString))) + client := &MockHttpClient{ + DoFunc: func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusInternalServerError, + Body: r, + }, nil + }, + } + + air := airflow2.NewScheduler(nil, client) + err := air.Clear(ctx, models.ProjectSpec{ + Name: "test-proj", + Config: map[string]string{ + models.ProjectSchedulerHost: host, + }, + }, "sample_select", startDateTime, endDateTime) + + assert.NotNil(t, err) + }) + }) } diff --git a/job/replay.go b/job/replay.go index 42a219b853..780ea1e92d 100644 --- a/job/replay.go +++ b/job/replay.go @@ -26,7 +26,7 @@ func (srv *Service) ReplayDryRun(namespace models.NamespaceSpec, replayJobSpec m dagSpecMap[currSpec.Name] = currSpec } - rootInstance, err := prepareTree(dagSpecMap, replayJobSpec.Name, start, end) + rootInstance, err := PrepareTree(dagSpecMap, replayJobSpec.Name, start, end) if err != nil { return nil, err } @@ -34,8 +34,32 @@ func (srv *Service) ReplayDryRun(namespace models.NamespaceSpec, replayJobSpec m return rootInstance, nil } -// prepareTree creates a execution tree for replay operation -func prepareTree(dagSpecMap map[string]models.JobSpec, replayJobName string, start, end time.Time) (*tree.TreeNode, error) { +func (srv *Service) Replay(namespace models.NamespaceSpec, replayJobSpec models.JobSpec, start, end time.Time) (string, error) { + projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(namespace.ProjectSpec) + jobSpecs, err := srv.getDependencyResolvedSpecs(namespace.ProjectSpec, projectJobSpecRepo, nil) + if err != nil { + return "", err + } + dagSpecMap := make(map[string]models.JobSpec) + for _, currSpec := range jobSpecs { + dagSpecMap[currSpec.Name] = currSpec + } + replayRequest := models.ReplayRequestInput{ + Job: replayJobSpec, + Start: start, + End: end, + Project: namespace.ProjectSpec, + DagSpecMap: dagSpecMap, + } + replayUUID, err := srv.replayManager.Replay(replayRequest) + if err != nil { + return "", err + } + return replayUUID, nil +} + +// PrepareTree creates a execution tree for replay operation +func PrepareTree(dagSpecMap map[string]models.JobSpec, replayJobName string, start, end time.Time) (*tree.TreeNode, error) { replayJobSpec, found := dagSpecMap[replayJobName] if !found { return nil, fmt.Errorf("couldn't find any job with name %s", replayJobName) diff --git a/job/replay_manager.go b/job/replay_manager.go new file mode 100644 index 0000000000..71f1a6a8ee --- /dev/null +++ b/job/replay_manager.go @@ -0,0 +1,142 @@ +package job + +import ( + "context" + "sync" + + "github.com/google/uuid" + "github.com/odpf/optimus/core/bus" + "github.com/odpf/optimus/core/logger" + "github.com/odpf/optimus/models" + "github.com/pkg/errors" +) + +var ( + // ErrRequestQueueFull signifies that the deployment manager's + // request queue is full + ErrRequestQueueFull = errors.New("request queue is full") +) + +type ReplayManager interface { + Init() + Replay(models.ReplayRequestInput) (string, error) +} + +// Manager for replaying operation(s). +// Offers an asynchronous interface to pipeline, with a fixed size request queue +// Only one replay happens at one time, any other request is queued, and executed +// when any in-progress operation is complete. +// The zero value of a Manager is an invalid Manager. Use `NewManager` constructor for +// creating a manager. +type Manager struct { + // wait group to synchronise on workers + wg sync.WaitGroup + mu sync.Mutex + + // request queue, used by workers + requestQ chan models.ReplayRequestInput + // request map, used for verifying if a request is + // in queue without actually consuming it + requestMap map[uuid.UUID]bool + + //listen for replay requests inserted in db + clearRequestMapListener chan interface{} + + //request worker + replayWorker ReplayWorker +} + +// Replay a request asynchronously, returns a replay id that can +// can be used to query its status +func (m *Manager) Replay(reqInput models.ReplayRequestInput) (string, error) { + uuidOb, err := uuid.NewRandom() + if err != nil { + return "", err + } + reqInput.ID = uuidOb + + // try sending the job request down the request queue + // if full return error indicating that we don't have capacity + // to process this request at the moment + select { + case m.requestQ <- reqInput: + m.mu.Lock() + //request pushed to worker + m.requestMap[reqInput.ID] = true + m.mu.Unlock() + + return reqInput.ID.String(), nil + default: + return "", ErrRequestQueueFull + } +} + +// start a worker goroutine that runs the deployment pipeline in background +func (m *Manager) spawnServiceWorker() { + m.wg.Add(1) + go func() { + defer m.wg.Done() + for reqInput := range m.requestQ { + logger.I("worker picked up the request for ", reqInput.Project.Name) + ctx := context.Background() + + if err := m.replayWorker.Process(ctx, reqInput); err != nil { + //do something about this error + logger.E(errors.Wrap(err, "worker failed to process")) + } + } + }() +} + +//Close stops consuming any new request +func (m *Manager) Close() error { + if m.requestQ != nil { + //stop accepting any more requests + close(m.requestQ) + } + + //wait for request worker to finish + m.wg.Wait() + + _ = bus.Stop(EvtRecordInsertedInDB, m.clearRequestMapListener) + _ = bus.Stop(EvtFailedToPrepareForReplay, m.clearRequestMapListener) + if m.clearRequestMapListener != nil { + close(m.clearRequestMapListener) + } + return nil +} + +func (m *Manager) Init() { + logger.I("starting replay workers") + m.spawnServiceWorker() + + // listen for replay request being inserted in db + bus.Listen(EvtRecordInsertedInDB, m.clearRequestMapListener) + // listen when replay failed to even prepare to start + bus.Listen(EvtFailedToPrepareForReplay, m.clearRequestMapListener) + go func() { + for { + raw, ok := <-m.clearRequestMapListener + if !ok { + return + } + + ID := raw.(uuid.UUID) + m.mu.Lock() + delete(m.requestMap, ID) + m.mu.Unlock() + } + }() +} + +// NewManager constructs a new instance of Manager +func NewManager(worker ReplayWorker, size int) *Manager { + mgr := &Manager{ + replayWorker: worker, + requestMap: make(map[uuid.UUID]bool), + requestQ: make(chan models.ReplayRequestInput, size), + clearRequestMapListener: make(chan interface{}), + } + mgr.Init() + return mgr +} diff --git a/job/replay_manager_test.go b/job/replay_manager_test.go new file mode 100644 index 0000000000..8d7765aa64 --- /dev/null +++ b/job/replay_manager_test.go @@ -0,0 +1 @@ +package job diff --git a/job/replay_test.go b/job/replay_test.go index bd4bb06dcb..e78054c324 100644 --- a/job/replay_test.go +++ b/job/replay_test.go @@ -5,13 +5,14 @@ import ( "testing" "time" + "github.com/odpf/optimus/job" + "github.com/odpf/optimus/core/tree" "github.com/hashicorp/go-multierror" "github.com/pkg/errors" "github.com/google/uuid" - "github.com/odpf/optimus/job" "github.com/odpf/optimus/mock" "github.com/odpf/optimus/models" "github.com/stretchr/testify/assert" @@ -109,7 +110,7 @@ func TestReplay(t *testing.T) { replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac) + jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) _, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec1], replayStart, replayEnd) assert.NotNil(t, err) @@ -137,7 +138,7 @@ func TestReplay(t *testing.T) { replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac) + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) _, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec1], replayStart, replayEnd) assert.NotNil(t, err) @@ -174,7 +175,7 @@ func TestReplay(t *testing.T) { replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac) + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) _, err := jobSvc.ReplayDryRun(namespaceSpec, cyclicDagSpec[0], replayStart, replayEnd) assert.NotNil(t, err) @@ -203,7 +204,7 @@ func TestReplay(t *testing.T) { compiler := new(mock.Compiler) defer compiler.AssertExpectations(t) - jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac) + jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") @@ -249,7 +250,7 @@ func TestReplay(t *testing.T) { compiler := new(mock.Compiler) defer compiler.AssertExpectations(t) - jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac) + jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") diff --git a/job/replay_worker.go b/job/replay_worker.go new file mode 100644 index 0000000000..d547e1298d --- /dev/null +++ b/job/replay_worker.go @@ -0,0 +1,66 @@ +package job + +import ( + "context" + + "github.com/odpf/optimus/core/bus" + "github.com/odpf/optimus/core/tree" + "github.com/odpf/optimus/models" + "github.com/pkg/errors" +) + +const ( + // EvtRecordInsertedInDB is emitted to event bus when a replay record is inserted in db + // it passes replay ID as string in bus + EvtRecordInsertedInDB = "replay_record_inserted_in_db" + + // EvtFailedToPrepareForReplay is emitted to event bus when a replay is failed to even prepare + // to execute, it passes replay ID as string in bus + EvtFailedToPrepareForReplay = "replay_request_failed_to_prepare" +) + +type ReplayWorker interface { + Process(context.Context, models.ReplayRequestInput) error +} + +type replayWorker struct { + replayRepo models.ReplayRepository + scheduler models.SchedulerUnit +} + +func (w *replayWorker) Process(ctx context.Context, input models.ReplayRequestInput) (err error) { + // save replay request + replay := models.ReplaySpec{ + ID: input.ID, + Job: input.Job, + StartDate: input.Start, + EndDate: input.End, + Status: models.ReplayStatusAccepted, + Project: input.Project, + } + if err = w.replayRepo.Insert(&replay); err != nil { + bus.Post(EvtFailedToPrepareForReplay, input.ID) + return + } + + replayTree, err := PrepareTree(input.DagSpecMap, input.Job.Name, input.Start, input.End) + if err != nil { + return err + } + + replayDagsMap := make(map[string]*tree.TreeNode) + replayTree.GetAllNodes(replayDagsMap) + + for jobName := range replayDagsMap { + if err = w.scheduler.Clear(ctx, input.Project, jobName, input.Start, input.End); err != nil { + return errors.Wrapf(err, "error while clearing dag runs for job %s", jobName) + } + } + + bus.Post(EvtRecordInsertedInDB, replay.ID) + return nil +} + +func NewReplayWorker(replayRepo models.ReplayRepository, scheduler models.SchedulerUnit) *replayWorker { + return &replayWorker{replayRepo: replayRepo, scheduler: scheduler} +} diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go new file mode 100644 index 0000000000..1cf63dec9d --- /dev/null +++ b/job/replay_worker_test.go @@ -0,0 +1,55 @@ +package job_test + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/odpf/optimus/job" + "github.com/odpf/optimus/mock" + "github.com/odpf/optimus/models" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestReplayWorker(t *testing.T) { + startDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-22") + endDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-26") + currUUID := uuid.Must(uuid.NewRandom()) + replayRequest := models.ReplayRequestInput{ + ID: currUUID, + Job: models.JobSpec{ + Name: "job-name", + }, + Start: startDate, + End: endDate, + Project: models.ProjectSpec{ + Name: "project-name", + }, + DagSpecMap: make(map[string]models.JobSpec), + } + replaySpecToInsert := &models.ReplaySpec{ + ID: currUUID, + StartDate: startDate, + EndDate: endDate, + Status: models.ReplayStatusAccepted, + Project: replayRequest.Project, + Job: replayRequest.Job, + } + t.Run("Process", func(t *testing.T) { + t.Run("should throw an error when replayRepo throws an error", func(t *testing.T) { + ctx := context.Background() + replayRepository := new(mock.ReplayRepository) + defer replayRepository.AssertExpectations(t) + errMessage := "replay repo error" + + replayRepository.On("Insert", replaySpecToInsert).Return(errors.New(errMessage)) + + worker := job.NewReplayWorker(replayRepository, nil) + err := worker.Process(ctx, replayRequest) + assert.NotNil(t, err) + assert.Equal(t, errMessage, err.Error()) + }) + }) +} diff --git a/job/service.go b/job/service.go index b4d020545d..4515b0ac58 100644 --- a/job/service.go +++ b/job/service.go @@ -63,6 +63,7 @@ type Service struct { priorityResolver PriorityResolver metaSvcFactory meta.MetaSvcFactory projectJobSpecRepoFactory ProjectJobSpecRepoFactory + replayManager ReplayManager Now func() time.Time assetCompiler AssetCompiler @@ -471,6 +472,7 @@ func NewService(jobSpecRepoFactory SpecRepoFactory, jobRepoFact JobRepoFactory, compiler models.JobCompiler, assetCompiler AssetCompiler, dependencyResolver DependencyResolver, priorityResolver PriorityResolver, metaSvcFactory meta.MetaSvcFactory, projectJobSpecRepoFactory ProjectJobSpecRepoFactory, + replayManager ReplayManager, ) *Service { return &Service{ jobSpecRepoFactory: jobSpecRepoFactory, @@ -480,6 +482,7 @@ func NewService(jobSpecRepoFactory SpecRepoFactory, jobRepoFact JobRepoFactory, priorityResolver: priorityResolver, metaSvcFactory: metaSvcFactory, projectJobSpecRepoFactory: projectJobSpecRepoFactory, + replayManager: replayManager, assetCompiler: assetCompiler, Now: time.Now, diff --git a/job/service_test.go b/job/service_test.go index 5445f26a9f..67e02dc0f3 100644 --- a/job/service_test.go +++ b/job/service_test.go @@ -54,7 +54,7 @@ func TestService(t *testing.T) { projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) defer projJobSpecRepoFac.AssertExpectations(t) - svc := job.NewService(repoFac, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac) + svc := job.NewService(repoFac, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) err := svc.Create(namespaceSpec, jobSpec) assert.Nil(t, err) }) @@ -86,7 +86,7 @@ func TestService(t *testing.T) { repoFac.On("New", namespaceSpec).Return(repo) defer repoFac.AssertExpectations(t) - svc := job.NewService(repoFac, nil, nil, dumpAssets, nil, nil, nil, nil) + svc := job.NewService(repoFac, nil, nil, dumpAssets, nil, nil, nil, nil, nil) err := svc.Create(namespaceSpec, jobSpec) assert.NotNil(t, err) }) @@ -195,7 +195,7 @@ func TestService(t *testing.T) { jobRepo.On("Save", ctx, compiledJob).Return(nil) } - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac, nil) err := svc.Sync(ctx, namespaceSpec, nil) assert.Nil(t, err) }) @@ -311,7 +311,7 @@ func TestService(t *testing.T) { // delete unwanted jobRepo.On("Delete", ctx, namespaceSpec, jobs[1].Name).Return(nil) - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac, nil) err := svc.Sync(ctx, namespaceSpec, nil) assert.Nil(t, err) }) @@ -359,7 +359,7 @@ func TestService(t *testing.T) { errors.New("error test-2")) defer depenResolver.AssertExpectations(t) - svc := job.NewService(jobSpecRepoFac, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) err := svc.Sync(ctx, namespaceSpec, nil) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "2 errors occurred")) @@ -467,7 +467,7 @@ func TestService(t *testing.T) { jobRepo.On("Save", ctx, compiledJob).Return(nil) } - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, metaSvcFact, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, metaSvcFact, projJobSpecRepoFac, nil) err := svc.Sync(ctx, namespaceSpec, nil) assert.Nil(t, err) }) @@ -541,7 +541,7 @@ func TestService(t *testing.T) { // delete unwanted jobSpecRepo.On("Delete", jobSpecsBase[0].Name).Return(nil) - svc := job.NewService(jobSpecRepoFac, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) err := svc.KeepOnly(namespaceSpec, toKeep, nil) assert.Nil(t, err) }) @@ -646,7 +646,7 @@ func TestService(t *testing.T) { compiler.On("Compile", namespaceSpec, jobSpecsAfterPriorityResolve[idx]).Return(compiledJob, nil) } - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac, nil) compiledJob, err := svc.Dump(namespaceSpec, jobSpecsBase[0]) assert.Nil(t, err) assert.Equal(t, "come string", string(compiledJob.Contents)) @@ -758,7 +758,7 @@ func TestService(t *testing.T) { jobRepo.On("Save", ctx, compiledJob).Return(nil) } - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac, nil) err := svc.Delete(ctx, namespaceSpec, jobSpecsBase[0]) assert.Nil(t, err) }) @@ -847,7 +847,7 @@ func TestService(t *testing.T) { compiler := new(mock.Compiler) defer compiler.AssertExpectations(t) - svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac) + svc := job.NewService(jobSpecRepoFac, jobRepoFac, compiler, dumpAssets, depenResolver, priorityResolver, nil, projJobSpecRepoFac, nil) err := svc.Delete(ctx, namespaceSpec, jobSpecsBase[0]) assert.NotNil(t, err) assert.Equal(t, "cannot delete job test since it's dependency of job downstream-test", err.Error()) diff --git a/mock/replay.go b/mock/replay.go new file mode 100644 index 0000000000..5fe4fdf17c --- /dev/null +++ b/mock/replay.go @@ -0,0 +1,20 @@ +package mock + +import ( + "github.com/google/uuid" + "github.com/odpf/optimus/models" + "github.com/stretchr/testify/mock" +) + +type ReplayRepository struct { + mock.Mock +} + +func (repo *ReplayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { + called := repo.Called(id) + return called.Get(0).(models.ReplaySpec), called.Error(1) +} + +func (repo *ReplayRepository) Insert(replay *models.ReplaySpec) error { + return repo.Called(replay).Error(0) +} diff --git a/models/replay.go b/models/replay.go new file mode 100644 index 0000000000..6c1c912dd9 --- /dev/null +++ b/models/replay.go @@ -0,0 +1,41 @@ +package models + +import ( + "time" + + "github.com/google/uuid" +) + +const ( + // ReplayStatusAccepted worker picked up the request + ReplayStatusAccepted = "Accepted" +) + +type ReplayRequestInput struct { + ID uuid.UUID + Job JobSpec + Start time.Time + End time.Time + Project ProjectSpec + DagSpecMap map[string]JobSpec +} + +type ReplaySpec struct { + ID uuid.UUID + Job JobSpec + StartDate time.Time + EndDate time.Time + Status string + Message string + CommitID string + Project ProjectSpec +} + +type Syncer interface { + SyncReplayStatusWithAirflow(ReplaySpec) error +} + +type ReplayRepository interface { + Insert(replay *ReplaySpec) error + GetByID(id uuid.UUID) (ReplaySpec, error) +} diff --git a/models/scheduler.go b/models/scheduler.go index 2546730398..99f0be6575 100644 --- a/models/scheduler.go +++ b/models/scheduler.go @@ -37,6 +37,9 @@ type SchedulerUnit interface { // GetJobStatus should return the current and previous status of job GetJobStatus(ctx context.Context, projSpec ProjectSpec, jobName string) ([]JobStatus, error) + + // Clear clears state of job between provided start and end dates + Clear(ctx context.Context, projSpec ProjectSpec, jobName string, startDate, endDate time.Time) error } type JobStatusState string diff --git a/resources/pack/migrations/000009_create_replay_table.down.sql b/resources/pack/migrations/000009_create_replay_table.down.sql new file mode 100644 index 0000000000..ee9125596c --- /dev/null +++ b/resources/pack/migrations/000009_create_replay_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS replay; diff --git a/resources/pack/migrations/000009_create_replay_table.up.sql b/resources/pack/migrations/000009_create_replay_table.up.sql new file mode 100644 index 0000000000..bcb1d9a804 --- /dev/null +++ b/resources/pack/migrations/000009_create_replay_table.up.sql @@ -0,0 +1,11 @@ +CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; +CREATE TABLE IF NOT EXISTS replay ( + id UUID PRIMARY KEY NOT NULL, + dag_id TEXT NOT NULL, + start_date TIMESTAMP WITH TIME ZONE NOT NULL, + end_date TIMESTAMP WITH TIME ZONE NOT NULL, + status TEXT NOT NULL, + message TEXT, + created_at TIMESTAMP WITH TIME ZONE NOT NULL, + updated_at TIMESTAMP WITH TIME ZONE NOT NULL +); diff --git a/store/postgres/replay_repository.go b/store/postgres/replay_repository.go new file mode 100644 index 0000000000..db8c2326ab --- /dev/null +++ b/store/postgres/replay_repository.go @@ -0,0 +1,83 @@ +package postgres + +import ( + "errors" + "time" + + "github.com/google/uuid" + "github.com/jinzhu/gorm" + "github.com/odpf/optimus/models" + "github.com/odpf/optimus/store" +) + +type Replay struct { + ID uuid.UUID `gorm:"primary_key;type:uuid"` + + JobID uuid.UUID `gorm:"not null"` + Job Job `gorm:"foreignKey:JobID"` + + ProjectID uuid.UUID `gorm:"not null"` + Project Project `gorm:"foreignKey:ProjectID"` + + StartDate time.Time + EndDate time.Time + Status string + Message string + CommitID string + + CreatedAt time.Time `gorm:"not null" json:"created_at"` + UpdatedAt time.Time `gorm:"not null" json:"updated_at"` +} + +func (p Replay) FromSpec(spec *models.ReplaySpec) (Replay, error) { + return Replay{ + ID: spec.ID, + JobID: spec.Job.ID, + ProjectID: spec.Project.ID, + StartDate: spec.StartDate, + EndDate: spec.EndDate, + Status: spec.Status, + CommitID: spec.CommitID, + Message: spec.Message, + }, nil +} + +func (p Replay) ToSpec() (models.ReplaySpec, error) { + return models.ReplaySpec{ + ID: p.ID, + Status: p.Status, + StartDate: p.StartDate, + EndDate: p.EndDate, + Message: p.Message, + CommitID: p.CommitID, + }, nil +} + +type replayRepository struct { + DB *gorm.DB +} + +func NewReplayRepository(db *gorm.DB) *replayRepository { + return &replayRepository{ + DB: db, + } +} + +func (repo *replayRepository) Insert(replay *models.ReplaySpec) error { + r, err := Replay{}.FromSpec(replay) + if err != nil { + return err + } + return repo.DB.Create(&r).Error +} + +func (repo *replayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { + var r Replay + if err := repo.DB.Where("id = ?", id).Find(&r).Error; err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + return models.ReplaySpec{}, store.ErrResourceNotFound + } + return models.ReplaySpec{}, err + } + return r.ToSpec() +} diff --git a/store/postgres/replay_repository_test.go b/store/postgres/replay_repository_test.go new file mode 100644 index 0000000000..5be85f62ac --- /dev/null +++ b/store/postgres/replay_repository_test.go @@ -0,0 +1,77 @@ +// +build !unit_test + +package postgres + +import ( + "os" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jinzhu/gorm" + "github.com/odpf/optimus/job" + "github.com/odpf/optimus/models" + "github.com/stretchr/testify/assert" +) + +func TestReplayRepository(t *testing.T) { + DBSetup := func() *gorm.DB { + dbURL, ok := os.LookupEnv("TEST_OPTIMUS_DB_URL") + if !ok { + panic("unable to find TEST_OPTIMUS_DB_URL env var") + } + dbConn, err := Connect(dbURL, 1, 1) + if err != nil { + panic(err) + } + m, err := NewHTTPFSMigrator(dbURL) + if err != nil { + panic(err) + } + if err := m.Drop(); err != nil { + panic(err) + } + if err := Migrate(dbURL); err != nil { + panic(err) + } + + return dbConn + } + + jobSpec := models.JobSpec{ + Name: "job-name", + } + projectSpec := models.ProjectSpec{ + Name: "project-name", + } + + startTime, _ := time.Parse(job.ReplayDateFormat, "2020-01-15") + endTime, _ := time.Parse(job.ReplayDateFormat, "2020-01-20") + uuid := uuid.Must(uuid.NewRandom()) + testConfigs := []*models.ReplaySpec{ + { + ID: uuid, + Job: jobSpec, + StartDate: startTime, + EndDate: endTime, + Status: models.ReplayStatusAccepted, + Project: projectSpec, + }, + } + + t.Run("Insert", func(t *testing.T) { + db := DBSetup() + defer db.Close() + testModels := []*models.ReplaySpec{} + testModels = append(testModels, testConfigs...) + + repo := NewReplayRepository(db) + + err := repo.Insert(testModels[0]) + assert.Nil(t, err) + + checkModel, err := repo.GetByID(testModels[0].ID) + assert.Nil(t, err) + assert.Equal(t, uuid, checkModel.ID) + }) +} From 311337c21119eadc1044abe7a74abe384d95507c Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Tue, 22 Jun 2021 12:32:46 +0530 Subject: [PATCH 2/6] fix: add replay endpoint --- Makefile | 2 +- api/handler/v1/runtime.go | 58 +- api/handler/v1/runtime_test.go | 10 +- api/proto/odpf/optimus/runtime_service.pb.go | 754 ++++++++++-------- .../odpf/optimus/runtime_service.pb.gw.go | 141 +++- .../odpf/optimus/runtime_service_grpc.pb.go | 48 +- cmd/replay.go | 77 +- ext/scheduler/airflow2/airflow.go | 6 +- job/replay.go | 38 +- job/replay_manager.go | 8 +- job/replay_test.go | 47 +- job/replay_worker.go | 33 +- job/replay_worker_test.go | 3 +- mock/job.go | 10 +- mock/replay.go | 4 + models/job.go | 6 +- models/replay.go | 5 +- .../000010_create_replay_table.down.sql | 0 .../000010_create_replay_table.up.sql | 5 +- store/postgres/replay_repository.go | 24 +- store/postgres/replay_repository_test.go | 4 - .../odpf/optimus/runtime_service.swagger.json | 62 ++ 22 files changed, 899 insertions(+), 446 deletions(-) rename resources/pack/migrations/000009_create_replay_table.down.sql => store/postgres/migrations/000010_create_replay_table.down.sql (100%) rename resources/pack/migrations/000009_create_replay_table.up.sql => store/postgres/migrations/000010_create_replay_table.up.sql (80%) diff --git a/Makefile b/Makefile index 6e13fe0c24..83dc2eb74b 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ pack-files: generate-proto: ## regenerate protos @echo " > cloning protobuf from odpf/proton" @rm -rf proton/ - @git -c advice.detachedHead=false clone https://github.com/odpf/proton --depth 1 --quiet --branch main + @git -c advice.detachedHead=false clone https://github.com/odpf/proton --depth 1 --quiet --branch DBTCH-1024 @echo " > generating protobuf" @echo " > info: make sure correct version of dependencies are installed using 'install'" @buf generate diff --git a/api/handler/v1/runtime.go b/api/handler/v1/runtime.go index d56e1fe50c..2b64a93b0e 100644 --- a/api/handler/v1/runtime.go +++ b/api/handler/v1/runtime.go @@ -727,7 +727,44 @@ func (sv *RuntimeServiceServer) ListResourceSpecification(ctx context.Context, r }, nil } -func (sv *RuntimeServiceServer) ReplayDryRun(ctx context.Context, req *pb.ReplayDryRunRequest) (*pb.ReplayDryRunResponse, error) { +func (sv *RuntimeServiceServer) ReplayDryRun(ctx context.Context, req *pb.ReplayRequest) (*pb.ReplayDryRunResponse, error) { + replayRequestInput, err := sv.parseReplayRequest(req) + if err != nil { + return nil, err + } + + rootNode, err := sv.jobSvc.ReplayDryRun(replayRequestInput) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + } + + node, err := sv.adapter.ToReplayExecutionTreeNode(rootNode) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + } + return &pb.ReplayDryRunResponse{ + Success: true, + Response: node, + }, nil +} + +func (sv *RuntimeServiceServer) Replay(ctx context.Context, req *pb.ReplayRequest) (*pb.ReplayResponse, error) { + replayRequestInput, err := sv.parseReplayRequest(req) + if err != nil { + return nil, err + } + + replayUUID, err := sv.jobSvc.Replay(replayRequestInput) + if err != nil { + return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + } + + return &pb.ReplayResponse{ + Id: replayUUID, + }, nil +} + +func (sv *RuntimeServiceServer) parseReplayRequest(req *pb.ReplayRequest) (*models.ReplayRequestInput, error) { projectRepo := sv.projectRepoFactory.New() projSpec, err := projectRepo.GetByName(req.GetProjectName()) if err != nil { @@ -760,20 +797,13 @@ func (sv *RuntimeServiceServer) ReplayDryRun(ctx context.Context, req *pb.Replay if endDate.Before(startDate) { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("replay end date cannot be before start date")) } - - rootNode, err := sv.jobSvc.ReplayDryRun(namespaceSpec, jobSpec, startDate, endDate) - if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + replayRequest := models.ReplayRequestInput{ + Job: jobSpec, + Start: startDate, + End: endDate, + Project: projSpec, } - - node, err := sv.adapter.ToReplayExecutionTreeNode(rootNode) - if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) - } - return &pb.ReplayDryRunResponse{ - Success: true, - Response: node, - }, nil + return &replayRequest, nil } func NewRuntimeServiceServer( diff --git a/api/handler/v1/runtime_test.go b/api/handler/v1/runtime_test.go index 5e20259192..cf7fd7ec98 100644 --- a/api/handler/v1/runtime_test.go +++ b/api/handler/v1/runtime_test.go @@ -1479,11 +1479,17 @@ func TestRuntimeServiceServer(t *testing.T) { }, }), } + replayRequestInput := &models.ReplayRequestInput{ + Job: jobSpec, + Start: startDate, + End: endDate, + Project: projectSpec, + } dagNode := tree.NewTreeNode(jobSpec) jobService := new(mock.JobService) jobService.On("GetByName", jobName, namespaceSpec).Return(jobSpec, nil) - jobService.On("ReplayDryRun", namespaceSpec, jobSpec, startDate, endDate).Return(dagNode, nil) + jobService.On("ReplayDryRun", replayRequestInput).Return(dagNode, nil) defer jobService.AssertExpectations(t) projectRepository := new(mock.ProjectRepository) @@ -1514,7 +1520,7 @@ func TestRuntimeServiceServer(t *testing.T) { nil, nil, ) - replayRequest := pb.ReplayDryRunRequest{ + replayRequest := pb.ReplayRequest{ ProjectName: projectName, Namespace: namespaceSpec.Name, JobName: jobName, diff --git a/api/proto/odpf/optimus/runtime_service.pb.go b/api/proto/odpf/optimus/runtime_service.pb.go index 4c7ff7d5d1..041376a69c 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.go +++ b/api/proto/odpf/optimus/runtime_service.pb.go @@ -3529,7 +3529,7 @@ func (x *UpdateResourceResponse) GetMessage() string { return "" } -type ReplayDryRunRequest struct { +type ReplayRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -3541,8 +3541,8 @@ type ReplayDryRunRequest struct { EndDate string `protobuf:"bytes,5,opt,name=end_date,json=endDate,proto3" json:"end_date,omitempty"` } -func (x *ReplayDryRunRequest) Reset() { - *x = ReplayDryRunRequest{} +func (x *ReplayRequest) Reset() { + *x = ReplayRequest{} if protoimpl.UnsafeEnabled { mi := &file_odpf_optimus_runtime_service_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3550,13 +3550,13 @@ func (x *ReplayDryRunRequest) Reset() { } } -func (x *ReplayDryRunRequest) String() string { +func (x *ReplayRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplayDryRunRequest) ProtoMessage() {} +func (*ReplayRequest) ProtoMessage() {} -func (x *ReplayDryRunRequest) ProtoReflect() protoreflect.Message { +func (x *ReplayRequest) ProtoReflect() protoreflect.Message { mi := &file_odpf_optimus_runtime_service_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -3568,40 +3568,40 @@ func (x *ReplayDryRunRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplayDryRunRequest.ProtoReflect.Descriptor instead. -func (*ReplayDryRunRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ReplayRequest.ProtoReflect.Descriptor instead. +func (*ReplayRequest) Descriptor() ([]byte, []int) { return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{55} } -func (x *ReplayDryRunRequest) GetProjectName() string { +func (x *ReplayRequest) GetProjectName() string { if x != nil { return x.ProjectName } return "" } -func (x *ReplayDryRunRequest) GetJobName() string { +func (x *ReplayRequest) GetJobName() string { if x != nil { return x.JobName } return "" } -func (x *ReplayDryRunRequest) GetNamespace() string { +func (x *ReplayRequest) GetNamespace() string { if x != nil { return x.Namespace } return "" } -func (x *ReplayDryRunRequest) GetStartDate() string { +func (x *ReplayRequest) GetStartDate() string { if x != nil { return x.StartDate } return "" } -func (x *ReplayDryRunRequest) GetEndDate() string { +func (x *ReplayRequest) GetEndDate() string { if x != nil { return x.EndDate } @@ -3726,6 +3726,53 @@ func (x *ReplayExecutionTreeNode) GetRuns() []*timestamp.Timestamp { return nil } +type ReplayResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ReplayResponse) Reset() { + *x = ReplayResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReplayResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplayResponse) ProtoMessage() {} + +func (x *ReplayResponse) ProtoReflect() protoreflect.Message { + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplayResponse.ProtoReflect.Descriptor instead. +func (*ReplayResponse) Descriptor() ([]byte, []int) { + return file_odpf_optimus_runtime_service_proto_rawDescGZIP(), []int{58} +} + +func (x *ReplayResponse) GetId() string { + if x != nil { + return x.Id + } + return "" +} + type ProjectSpecification_ProjectSecret struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3738,7 +3785,7 @@ type ProjectSpecification_ProjectSecret struct { func (x *ProjectSpecification_ProjectSecret) Reset() { *x = ProjectSpecification_ProjectSecret{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[59] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3751,7 +3798,7 @@ func (x *ProjectSpecification_ProjectSecret) String() string { func (*ProjectSpecification_ProjectSecret) ProtoMessage() {} func (x *ProjectSpecification_ProjectSecret) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[59] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3792,7 +3839,7 @@ type JobSpecification_Behavior struct { func (x *JobSpecification_Behavior) Reset() { *x = JobSpecification_Behavior{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[63] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3805,7 +3852,7 @@ func (x *JobSpecification_Behavior) String() string { func (*JobSpecification_Behavior) ProtoMessage() {} func (x *JobSpecification_Behavior) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[63] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3842,7 +3889,7 @@ type JobSpecification_Behavior_Retry struct { func (x *JobSpecification_Behavior_Retry) Reset() { *x = JobSpecification_Behavior_Retry{} if protoimpl.UnsafeEnabled { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[64] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3855,7 +3902,7 @@ func (x *JobSpecification_Behavior_Retry) String() string { func (*JobSpecification_Behavior_Retry) ProtoMessage() {} func (x *JobSpecification_Behavior_Retry) ProtoReflect() protoreflect.Message { - mi := &file_odpf_optimus_runtime_service_proto_msgTypes[64] + mi := &file_odpf_optimus_runtime_service_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4432,274 +4479,284 @@ var file_odpf_optimus_runtime_service_proto_rawDesc = []byte{ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xab, 0x01, 0x0a, 0x13, 0x52, 0x65, - 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, - 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x65, 0x6e, 0x64, 0x44, 0x61, 0x74, 0x65, 0x22, 0x73, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x61, - 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x64, - 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x0d, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, + 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x5f, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x44, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x64, 0x61, + 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x44, 0x61, 0x74, + 0x65, 0x22, 0x73, 0x0a, 0x14, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, + 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xab, 0x01, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, - 0x64, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xab, 0x01, 0x0a, - 0x17, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, - 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x75, - 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, 0x72, 0x75, 0x6e, 0x73, 0x32, 0xe0, 0x1c, 0x0a, 0x0e, 0x52, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x62, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x22, 0x0f, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, - 0x2a, 0x12, 0x77, 0x0a, 0x16, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, - 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, + 0x64, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6a, 0x6f, 0x62, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x45, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x54, 0x72, 0x65, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, + 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x72, 0x75, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x04, + 0x72, 0x75, 0x6e, 0x73, 0x22, 0x20, 0x0a, 0x0e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0xde, 0x1d, 0x0a, 0x0e, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x62, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x22, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x77, 0x0a, + 0x16, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, + 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, + 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x3a, 0x01, + 0x2a, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xb8, 0x01, 0x0a, 0x16, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, - 0x6f, 0x62, 0x3a, 0x01, 0x2a, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, - 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, - 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, - 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x12, 0x43, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, - 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, - 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, - 0x2a, 0x43, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, - 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0x99, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, - 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, - 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, - 0x62, 0x12, 0xa9, 0x01, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x12, 0x43, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, + 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xc0, + 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, + 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4a, 0x6f, 0x62, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x2a, 0x43, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x7d, 0x12, 0x99, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, - 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x65, 0x22, 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, - 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x64, 0x75, 0x6d, 0x70, 0x12, 0xa2, 0x01, - 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x12, 0xa9, 0x01, + 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, + 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x64, 0x75, 0x6d, 0x70, 0x12, 0xa2, 0x01, 0x0a, 0x15, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x12, 0x77, 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, - 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x7a, 0x0a, 0x0f, 0x52, - 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, - 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x14, 0x22, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0xae, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, - 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x2d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x33, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x9b, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, - 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x6f, 0x64, - 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, - 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, - 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, - 0x65, 0x63, 0x72, 0x65, 0x74, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, - 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, - 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, + 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x77, + 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, + 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x7a, 0x0a, 0x0f, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x6f, 0x64, 0x70, + 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x22, + 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x3a, 0x01, 0x2a, 0x12, 0xae, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x2a, 0x12, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x10, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x12, 0x25, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x41, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3b, 0x22, 0x36, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, + 0x12, 0x2d, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, + 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x33, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, - 0x01, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x1e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, - 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x64, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x1e, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x77, - 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x86, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, - 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xde, - 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x6f, - 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6f, + 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x9b, 0x01, 0x0a, 0x0e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, + 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x2f, 0x7b, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x3a, + 0x01, 0x2a, 0x12, 0x6e, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, + 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x11, 0x12, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x2a, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x60, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x5a, 0x12, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x25, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x3b, 0x22, 0x36, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x8a, + 0x01, 0x0a, 0x09, 0x4a, 0x6f, 0x62, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4a, 0x6f, 0x62, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4a, 0x6f, 0x62, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0xc0, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, - 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, - 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x5d, 0x22, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x12, 0x1e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x10, 0x12, 0x0e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x69, 0x6e, 0x64, 0x6f, + 0x77, 0x12, 0x86, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x30, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, + 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0xde, 0x01, 0x0a, 0x19, 0x4c, + 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x60, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x5a, 0x12, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x23, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x5d, 0x22, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xc7, + 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, + 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x6a, 0x12, 0x68, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xc0, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x23, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x5d, 0x1a, 0x58, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x0c, + 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x12, 0x1b, 0x2e, 0x6f, + 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, + 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, + 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, + 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x12, 0x3c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, - 0x01, 0x2a, 0x12, 0xc7, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, - 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x70, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x6a, 0x12, 0x68, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x7b, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x12, 0xc0, 0x01, 0x0a, - 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x23, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x75, 0x73, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x63, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x5d, 0x1a, 0x58, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x7d, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x2f, 0x7b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3a, 0x01, 0x2a, 0x12, - 0x9b, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, - 0x12, 0x21, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, - 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, - 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x44, 0x72, 0x79, 0x52, 0x75, 0x6e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x44, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x12, - 0x3c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, - 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, - 0x65, 0x70, 0x6c, 0x61, 0x79, 0x2d, 0x64, 0x72, 0x79, 0x2d, 0x72, 0x75, 0x6e, 0x42, 0x70, 0x0a, - 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, - 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x42, 0x15, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x01, - 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x64, 0x70, - 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, - 0x92, 0x41, 0x1c, 0x12, 0x05, 0x32, 0x03, 0x30, 0x2e, 0x31, 0x2a, 0x01, 0x01, 0x72, 0x10, 0x0a, - 0x0e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x2d, 0x64, 0x72, 0x79, 0x2d, + 0x72, 0x75, 0x6e, 0x12, 0x81, 0x01, 0x0a, 0x06, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x12, 0x1b, + 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, + 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x36, 0x12, 0x34, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, + 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x42, 0x70, 0x0a, 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, + 0x70, 0x66, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x6e, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, + 0x73, 0x42, 0x15, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x50, 0x01, 0x5a, 0x1e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6f, 0x64, 0x70, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x6e, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x92, 0x41, 0x1c, 0x12, 0x05, 0x32, + 0x03, 0x30, 0x2e, 0x31, 0x2a, 0x01, 0x01, 0x72, 0x10, 0x0a, 0x0e, 0x4f, 0x70, 0x74, 0x69, 0x6d, + 0x75, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -4715,7 +4772,7 @@ func file_odpf_optimus_runtime_service_proto_rawDescGZIP() []byte { } var file_odpf_optimus_runtime_service_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_odpf_optimus_runtime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 69) +var file_odpf_optimus_runtime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 70) var file_odpf_optimus_runtime_service_proto_goTypes = []interface{}{ (InstanceSpec_Type)(0), // 0: odpf.optimus.InstanceSpec.Type (InstanceSpecData_Type)(0), // 1: odpf.optimus.InstanceSpecData.Type @@ -4774,44 +4831,45 @@ var file_odpf_optimus_runtime_service_proto_goTypes = []interface{}{ (*ReadResourceResponse)(nil), // 54: odpf.optimus.ReadResourceResponse (*UpdateResourceRequest)(nil), // 55: odpf.optimus.UpdateResourceRequest (*UpdateResourceResponse)(nil), // 56: odpf.optimus.UpdateResourceResponse - (*ReplayDryRunRequest)(nil), // 57: odpf.optimus.ReplayDryRunRequest + (*ReplayRequest)(nil), // 57: odpf.optimus.ReplayRequest (*ReplayDryRunResponse)(nil), // 58: odpf.optimus.ReplayDryRunResponse (*ReplayExecutionTreeNode)(nil), // 59: odpf.optimus.ReplayExecutionTreeNode - nil, // 60: odpf.optimus.ProjectSpecification.ConfigEntry - (*ProjectSpecification_ProjectSecret)(nil), // 61: odpf.optimus.ProjectSpecification.ProjectSecret - nil, // 62: odpf.optimus.NamespaceSpecification.ConfigEntry - nil, // 63: odpf.optimus.JobSpecification.AssetsEntry - nil, // 64: odpf.optimus.JobSpecification.LabelsEntry - (*JobSpecification_Behavior)(nil), // 65: odpf.optimus.JobSpecification.Behavior - (*JobSpecification_Behavior_Retry)(nil), // 66: odpf.optimus.JobSpecification.Behavior.Retry - nil, // 67: odpf.optimus.InstanceContext.EnvsEntry - nil, // 68: odpf.optimus.InstanceContext.FilesEntry - nil, // 69: odpf.optimus.ResourceSpecification.AssetsEntry - nil, // 70: odpf.optimus.ResourceSpecification.LabelsEntry - (*timestamp.Timestamp)(nil), // 71: google.protobuf.Timestamp - (*_struct.Struct)(nil), // 72: google.protobuf.Struct - (*duration.Duration)(nil), // 73: google.protobuf.Duration + (*ReplayResponse)(nil), // 60: odpf.optimus.ReplayResponse + nil, // 61: odpf.optimus.ProjectSpecification.ConfigEntry + (*ProjectSpecification_ProjectSecret)(nil), // 62: odpf.optimus.ProjectSpecification.ProjectSecret + nil, // 63: odpf.optimus.NamespaceSpecification.ConfigEntry + nil, // 64: odpf.optimus.JobSpecification.AssetsEntry + nil, // 65: odpf.optimus.JobSpecification.LabelsEntry + (*JobSpecification_Behavior)(nil), // 66: odpf.optimus.JobSpecification.Behavior + (*JobSpecification_Behavior_Retry)(nil), // 67: odpf.optimus.JobSpecification.Behavior.Retry + nil, // 68: odpf.optimus.InstanceContext.EnvsEntry + nil, // 69: odpf.optimus.InstanceContext.FilesEntry + nil, // 70: odpf.optimus.ResourceSpecification.AssetsEntry + nil, // 71: odpf.optimus.ResourceSpecification.LabelsEntry + (*timestamp.Timestamp)(nil), // 72: google.protobuf.Timestamp + (*_struct.Struct)(nil), // 73: google.protobuf.Struct + (*duration.Duration)(nil), // 74: google.protobuf.Duration } var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ - 60, // 0: odpf.optimus.ProjectSpecification.config:type_name -> odpf.optimus.ProjectSpecification.ConfigEntry - 61, // 1: odpf.optimus.ProjectSpecification.secrets:type_name -> odpf.optimus.ProjectSpecification.ProjectSecret - 62, // 2: odpf.optimus.NamespaceSpecification.config:type_name -> odpf.optimus.NamespaceSpecification.ConfigEntry + 61, // 0: odpf.optimus.ProjectSpecification.config:type_name -> odpf.optimus.ProjectSpecification.ConfigEntry + 62, // 1: odpf.optimus.ProjectSpecification.secrets:type_name -> odpf.optimus.ProjectSpecification.ProjectSecret + 63, // 2: odpf.optimus.NamespaceSpecification.config:type_name -> odpf.optimus.NamespaceSpecification.ConfigEntry 6, // 3: odpf.optimus.JobSpecHook.config:type_name -> odpf.optimus.JobConfigItem 6, // 4: odpf.optimus.JobSpecification.config:type_name -> odpf.optimus.JobConfigItem 7, // 5: odpf.optimus.JobSpecification.dependencies:type_name -> odpf.optimus.JobDependency - 63, // 6: odpf.optimus.JobSpecification.assets:type_name -> odpf.optimus.JobSpecification.AssetsEntry + 64, // 6: odpf.optimus.JobSpecification.assets:type_name -> odpf.optimus.JobSpecification.AssetsEntry 4, // 7: odpf.optimus.JobSpecification.hooks:type_name -> odpf.optimus.JobSpecHook - 64, // 8: odpf.optimus.JobSpecification.labels:type_name -> odpf.optimus.JobSpecification.LabelsEntry - 65, // 9: odpf.optimus.JobSpecification.behavior:type_name -> odpf.optimus.JobSpecification.Behavior - 71, // 10: odpf.optimus.InstanceSpec.scheduled_at:type_name -> google.protobuf.Timestamp + 65, // 8: odpf.optimus.JobSpecification.labels:type_name -> odpf.optimus.JobSpecification.LabelsEntry + 66, // 9: odpf.optimus.JobSpecification.behavior:type_name -> odpf.optimus.JobSpecification.Behavior + 72, // 10: odpf.optimus.InstanceSpec.scheduled_at:type_name -> google.protobuf.Timestamp 9, // 11: odpf.optimus.InstanceSpec.data:type_name -> odpf.optimus.InstanceSpecData 1, // 12: odpf.optimus.InstanceSpecData.type:type_name -> odpf.optimus.InstanceSpecData.Type - 67, // 13: odpf.optimus.InstanceContext.envs:type_name -> odpf.optimus.InstanceContext.EnvsEntry - 68, // 14: odpf.optimus.InstanceContext.files:type_name -> odpf.optimus.InstanceContext.FilesEntry - 71, // 15: odpf.optimus.JobStatus.scheduled_at:type_name -> google.protobuf.Timestamp - 72, // 16: odpf.optimus.ResourceSpecification.spec:type_name -> google.protobuf.Struct - 69, // 17: odpf.optimus.ResourceSpecification.assets:type_name -> odpf.optimus.ResourceSpecification.AssetsEntry - 70, // 18: odpf.optimus.ResourceSpecification.labels:type_name -> odpf.optimus.ResourceSpecification.LabelsEntry + 68, // 13: odpf.optimus.InstanceContext.envs:type_name -> odpf.optimus.InstanceContext.EnvsEntry + 69, // 14: odpf.optimus.InstanceContext.files:type_name -> odpf.optimus.InstanceContext.FilesEntry + 72, // 15: odpf.optimus.JobStatus.scheduled_at:type_name -> google.protobuf.Timestamp + 73, // 16: odpf.optimus.ResourceSpecification.spec:type_name -> google.protobuf.Struct + 70, // 17: odpf.optimus.ResourceSpecification.assets:type_name -> odpf.optimus.ResourceSpecification.AssetsEntry + 71, // 18: odpf.optimus.ResourceSpecification.labels:type_name -> odpf.optimus.ResourceSpecification.LabelsEntry 5, // 19: odpf.optimus.DeployJobSpecificationRequest.jobs:type_name -> odpf.optimus.JobSpecification 5, // 20: odpf.optimus.ListJobSpecificationResponse.jobs:type_name -> odpf.optimus.JobSpecification 5, // 21: odpf.optimus.CheckJobSpecificationRequest.job:type_name -> odpf.optimus.JobSpecification @@ -4823,7 +4881,7 @@ var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ 5, // 27: odpf.optimus.ReadJobSpecificationResponse.spec:type_name -> odpf.optimus.JobSpecification 2, // 28: odpf.optimus.ListProjectsResponse.projects:type_name -> odpf.optimus.ProjectSpecification 3, // 29: odpf.optimus.ListProjectNamespacesResponse.namespaces:type_name -> odpf.optimus.NamespaceSpecification - 71, // 30: odpf.optimus.RegisterInstanceRequest.scheduled_at:type_name -> google.protobuf.Timestamp + 72, // 30: odpf.optimus.RegisterInstanceRequest.scheduled_at:type_name -> google.protobuf.Timestamp 0, // 31: odpf.optimus.RegisterInstanceRequest.instance_type:type_name -> odpf.optimus.InstanceSpec.Type 2, // 32: odpf.optimus.RegisterInstanceResponse.project:type_name -> odpf.optimus.ProjectSpecification 5, // 33: odpf.optimus.RegisterInstanceResponse.job:type_name -> odpf.optimus.JobSpecification @@ -4831,9 +4889,9 @@ var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ 3, // 35: odpf.optimus.RegisterInstanceResponse.namespace:type_name -> odpf.optimus.NamespaceSpecification 10, // 36: odpf.optimus.RegisterInstanceResponse.context:type_name -> odpf.optimus.InstanceContext 11, // 37: odpf.optimus.JobStatusResponse.statuses:type_name -> odpf.optimus.JobStatus - 71, // 38: odpf.optimus.GetWindowRequest.scheduled_at:type_name -> google.protobuf.Timestamp - 71, // 39: odpf.optimus.GetWindowResponse.start:type_name -> google.protobuf.Timestamp - 71, // 40: odpf.optimus.GetWindowResponse.end:type_name -> google.protobuf.Timestamp + 72, // 38: odpf.optimus.GetWindowRequest.scheduled_at:type_name -> google.protobuf.Timestamp + 72, // 39: odpf.optimus.GetWindowResponse.start:type_name -> google.protobuf.Timestamp + 72, // 40: odpf.optimus.GetWindowResponse.end:type_name -> google.protobuf.Timestamp 12, // 41: odpf.optimus.DeployResourceSpecificationRequest.resources:type_name -> odpf.optimus.ResourceSpecification 12, // 42: odpf.optimus.ListResourceSpecificationResponse.resources:type_name -> odpf.optimus.ResourceSpecification 12, // 43: odpf.optimus.CreateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification @@ -4841,9 +4899,9 @@ var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ 12, // 45: odpf.optimus.UpdateResourceRequest.resource:type_name -> odpf.optimus.ResourceSpecification 59, // 46: odpf.optimus.ReplayDryRunResponse.response:type_name -> odpf.optimus.ReplayExecutionTreeNode 59, // 47: odpf.optimus.ReplayExecutionTreeNode.dependents:type_name -> odpf.optimus.ReplayExecutionTreeNode - 71, // 48: odpf.optimus.ReplayExecutionTreeNode.runs:type_name -> google.protobuf.Timestamp - 66, // 49: odpf.optimus.JobSpecification.Behavior.retry:type_name -> odpf.optimus.JobSpecification.Behavior.Retry - 73, // 50: odpf.optimus.JobSpecification.Behavior.Retry.delay:type_name -> google.protobuf.Duration + 72, // 48: odpf.optimus.ReplayExecutionTreeNode.runs:type_name -> google.protobuf.Timestamp + 67, // 49: odpf.optimus.JobSpecification.Behavior.retry:type_name -> odpf.optimus.JobSpecification.Behavior.Retry + 74, // 50: odpf.optimus.JobSpecification.Behavior.Retry.delay:type_name -> google.protobuf.Duration 13, // 51: odpf.optimus.RuntimeService.Version:input_type -> odpf.optimus.VersionRequest 15, // 52: odpf.optimus.RuntimeService.DeployJobSpecification:input_type -> odpf.optimus.DeployJobSpecificationRequest 29, // 53: odpf.optimus.RuntimeService.CreateJobSpecification:input_type -> odpf.optimus.CreateJobSpecificationRequest @@ -4866,32 +4924,34 @@ var file_odpf_optimus_runtime_service_proto_depIdxs = []int32{ 51, // 70: odpf.optimus.RuntimeService.CreateResource:input_type -> odpf.optimus.CreateResourceRequest 53, // 71: odpf.optimus.RuntimeService.ReadResource:input_type -> odpf.optimus.ReadResourceRequest 55, // 72: odpf.optimus.RuntimeService.UpdateResource:input_type -> odpf.optimus.UpdateResourceRequest - 57, // 73: odpf.optimus.RuntimeService.ReplayDryRun:input_type -> odpf.optimus.ReplayDryRunRequest - 14, // 74: odpf.optimus.RuntimeService.Version:output_type -> odpf.optimus.VersionResponse - 16, // 75: odpf.optimus.RuntimeService.DeployJobSpecification:output_type -> odpf.optimus.DeployJobSpecificationResponse - 30, // 76: odpf.optimus.RuntimeService.CreateJobSpecification:output_type -> odpf.optimus.CreateJobSpecificationResponse - 32, // 77: odpf.optimus.RuntimeService.ReadJobSpecification:output_type -> odpf.optimus.ReadJobSpecificationResponse - 34, // 78: odpf.optimus.RuntimeService.DeleteJobSpecification:output_type -> odpf.optimus.DeleteJobSpecificationResponse - 18, // 79: odpf.optimus.RuntimeService.ListJobSpecification:output_type -> odpf.optimus.ListJobSpecificationResponse - 20, // 80: odpf.optimus.RuntimeService.DumpJobSpecification:output_type -> odpf.optimus.DumpJobSpecificationResponse - 22, // 81: odpf.optimus.RuntimeService.CheckJobSpecification:output_type -> odpf.optimus.CheckJobSpecificationResponse - 24, // 82: odpf.optimus.RuntimeService.CheckJobSpecifications:output_type -> odpf.optimus.CheckJobSpecificationsResponse - 26, // 83: odpf.optimus.RuntimeService.RegisterProject:output_type -> odpf.optimus.RegisterProjectResponse - 28, // 84: odpf.optimus.RuntimeService.RegisterProjectNamespace:output_type -> odpf.optimus.RegisterProjectNamespaceResponse - 36, // 85: odpf.optimus.RuntimeService.RegisterSecret:output_type -> odpf.optimus.RegisterSecretResponse - 38, // 86: odpf.optimus.RuntimeService.ListProjects:output_type -> odpf.optimus.ListProjectsResponse - 40, // 87: odpf.optimus.RuntimeService.ListProjectNamespaces:output_type -> odpf.optimus.ListProjectNamespacesResponse - 42, // 88: odpf.optimus.RuntimeService.RegisterInstance:output_type -> odpf.optimus.RegisterInstanceResponse - 44, // 89: odpf.optimus.RuntimeService.JobStatus:output_type -> odpf.optimus.JobStatusResponse - 46, // 90: odpf.optimus.RuntimeService.GetWindow:output_type -> odpf.optimus.GetWindowResponse - 48, // 91: odpf.optimus.RuntimeService.DeployResourceSpecification:output_type -> odpf.optimus.DeployResourceSpecificationResponse - 50, // 92: odpf.optimus.RuntimeService.ListResourceSpecification:output_type -> odpf.optimus.ListResourceSpecificationResponse - 52, // 93: odpf.optimus.RuntimeService.CreateResource:output_type -> odpf.optimus.CreateResourceResponse - 54, // 94: odpf.optimus.RuntimeService.ReadResource:output_type -> odpf.optimus.ReadResourceResponse - 56, // 95: odpf.optimus.RuntimeService.UpdateResource:output_type -> odpf.optimus.UpdateResourceResponse - 58, // 96: odpf.optimus.RuntimeService.ReplayDryRun:output_type -> odpf.optimus.ReplayDryRunResponse - 74, // [74:97] is the sub-list for method output_type - 51, // [51:74] is the sub-list for method input_type + 57, // 73: odpf.optimus.RuntimeService.ReplayDryRun:input_type -> odpf.optimus.ReplayRequest + 57, // 74: odpf.optimus.RuntimeService.Replay:input_type -> odpf.optimus.ReplayRequest + 14, // 75: odpf.optimus.RuntimeService.Version:output_type -> odpf.optimus.VersionResponse + 16, // 76: odpf.optimus.RuntimeService.DeployJobSpecification:output_type -> odpf.optimus.DeployJobSpecificationResponse + 30, // 77: odpf.optimus.RuntimeService.CreateJobSpecification:output_type -> odpf.optimus.CreateJobSpecificationResponse + 32, // 78: odpf.optimus.RuntimeService.ReadJobSpecification:output_type -> odpf.optimus.ReadJobSpecificationResponse + 34, // 79: odpf.optimus.RuntimeService.DeleteJobSpecification:output_type -> odpf.optimus.DeleteJobSpecificationResponse + 18, // 80: odpf.optimus.RuntimeService.ListJobSpecification:output_type -> odpf.optimus.ListJobSpecificationResponse + 20, // 81: odpf.optimus.RuntimeService.DumpJobSpecification:output_type -> odpf.optimus.DumpJobSpecificationResponse + 22, // 82: odpf.optimus.RuntimeService.CheckJobSpecification:output_type -> odpf.optimus.CheckJobSpecificationResponse + 24, // 83: odpf.optimus.RuntimeService.CheckJobSpecifications:output_type -> odpf.optimus.CheckJobSpecificationsResponse + 26, // 84: odpf.optimus.RuntimeService.RegisterProject:output_type -> odpf.optimus.RegisterProjectResponse + 28, // 85: odpf.optimus.RuntimeService.RegisterProjectNamespace:output_type -> odpf.optimus.RegisterProjectNamespaceResponse + 36, // 86: odpf.optimus.RuntimeService.RegisterSecret:output_type -> odpf.optimus.RegisterSecretResponse + 38, // 87: odpf.optimus.RuntimeService.ListProjects:output_type -> odpf.optimus.ListProjectsResponse + 40, // 88: odpf.optimus.RuntimeService.ListProjectNamespaces:output_type -> odpf.optimus.ListProjectNamespacesResponse + 42, // 89: odpf.optimus.RuntimeService.RegisterInstance:output_type -> odpf.optimus.RegisterInstanceResponse + 44, // 90: odpf.optimus.RuntimeService.JobStatus:output_type -> odpf.optimus.JobStatusResponse + 46, // 91: odpf.optimus.RuntimeService.GetWindow:output_type -> odpf.optimus.GetWindowResponse + 48, // 92: odpf.optimus.RuntimeService.DeployResourceSpecification:output_type -> odpf.optimus.DeployResourceSpecificationResponse + 50, // 93: odpf.optimus.RuntimeService.ListResourceSpecification:output_type -> odpf.optimus.ListResourceSpecificationResponse + 52, // 94: odpf.optimus.RuntimeService.CreateResource:output_type -> odpf.optimus.CreateResourceResponse + 54, // 95: odpf.optimus.RuntimeService.ReadResource:output_type -> odpf.optimus.ReadResourceResponse + 56, // 96: odpf.optimus.RuntimeService.UpdateResource:output_type -> odpf.optimus.UpdateResourceResponse + 58, // 97: odpf.optimus.RuntimeService.ReplayDryRun:output_type -> odpf.optimus.ReplayDryRunResponse + 60, // 98: odpf.optimus.RuntimeService.Replay:output_type -> odpf.optimus.ReplayResponse + 75, // [75:99] is the sub-list for method output_type + 51, // [51:75] is the sub-list for method input_type 51, // [51:51] is the sub-list for extension type_name 51, // [51:51] is the sub-list for extension extendee 0, // [0:51] is the sub-list for field type_name @@ -5564,7 +5624,7 @@ func file_odpf_optimus_runtime_service_proto_init() { } } file_odpf_optimus_runtime_service_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplayDryRunRequest); i { + switch v := v.(*ReplayRequest); i { case 0: return &v.state case 1: @@ -5599,7 +5659,19 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReplayResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_odpf_optimus_runtime_service_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProjectSpecification_ProjectSecret); i { case 0: return &v.state @@ -5611,7 +5683,7 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JobSpecification_Behavior); i { case 0: return &v.state @@ -5623,7 +5695,7 @@ func file_odpf_optimus_runtime_service_proto_init() { return nil } } - file_odpf_optimus_runtime_service_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + file_odpf_optimus_runtime_service_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*JobSpecification_Behavior_Retry); i { case 0: return &v.state @@ -5642,7 +5714,7 @@ func file_odpf_optimus_runtime_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_odpf_optimus_runtime_service_proto_rawDesc, NumEnums: 2, - NumMessages: 69, + NumMessages: 70, NumExtensions: 0, NumServices: 1, }, diff --git a/api/proto/odpf/optimus/runtime_service.pb.gw.go b/api/proto/odpf/optimus/runtime_service.pb.gw.go index cdd10c8cc7..43391d7374 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.gw.go +++ b/api/proto/odpf/optimus/runtime_service.pb.gw.go @@ -1466,7 +1466,7 @@ var ( ) func request_RuntimeService_ReplayDryRun_0(ctx context.Context, marshaler runtime.Marshaler, client RuntimeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReplayDryRunRequest + var protoReq ReplayRequest var metadata runtime.ServerMetadata var ( @@ -1509,7 +1509,7 @@ func request_RuntimeService_ReplayDryRun_0(ctx context.Context, marshaler runtim } func local_request_RuntimeService_ReplayDryRun_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ReplayDryRunRequest + var protoReq ReplayRequest var metadata runtime.ServerMetadata var ( @@ -1551,6 +1551,96 @@ func local_request_RuntimeService_ReplayDryRun_0(ctx context.Context, marshaler } +var ( + filter_RuntimeService_Replay_0 = &utilities.DoubleArray{Encoding: map[string]int{"project_name": 0, "job_name": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}} +) + +func request_RuntimeService_Replay_0(ctx context.Context, marshaler runtime.Marshaler, client RuntimeServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ReplayRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") + } + + protoReq.ProjectName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) + } + + val, ok = pathParams["job_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_name") + } + + protoReq.JobName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_RuntimeService_Replay_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Replay(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_RuntimeService_Replay_0(ctx context.Context, marshaler runtime.Marshaler, server RuntimeServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ReplayRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["project_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "project_name") + } + + protoReq.ProjectName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "project_name", err) + } + + val, ok = pathParams["job_name"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "job_name") + } + + protoReq.JobName, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "job_name", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_RuntimeService_Replay_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Replay(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterRuntimeServiceHandlerServer registers the http handlers for service RuntimeService to "mux". // UnaryRPC :call RuntimeServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -2017,6 +2107,29 @@ func RegisterRuntimeServiceHandlerServer(ctx context.Context, mux *runtime.Serve }) + mux.Handle("GET", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/odpf.optimus.RuntimeService/Replay") + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_RuntimeService_Replay_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_Replay_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -2458,6 +2571,26 @@ func RegisterRuntimeServiceHandlerClient(ctx context.Context, mux *runtime.Serve }) + mux.Handle("GET", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/odpf.optimus.RuntimeService/Replay") + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_RuntimeService_Replay_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_RuntimeService_Replay_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -2501,6 +2634,8 @@ var ( pattern_RuntimeService_UpdateResource_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 4, 2, 5, 1, 0, 4, 1, 5, 6, 2, 7}, []string{"api", "v1", "project", "project_name", "namespace", "datastore", "datastore_name", "resource"}, "")) pattern_RuntimeService_ReplayDryRun_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "project", "project_name", "job", "job_name", "replay-dry-run"}, "")) + + pattern_RuntimeService_Replay_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4, 1, 0, 4, 1, 5, 5, 2, 6}, []string{"api", "v1", "project", "project_name", "job", "job_name", "replay"}, "")) ) var ( @@ -2543,4 +2678,6 @@ var ( forward_RuntimeService_UpdateResource_0 = runtime.ForwardResponseMessage forward_RuntimeService_ReplayDryRun_0 = runtime.ForwardResponseMessage + + forward_RuntimeService_Replay_0 = runtime.ForwardResponseMessage ) diff --git a/api/proto/odpf/optimus/runtime_service_grpc.pb.go b/api/proto/odpf/optimus/runtime_service_grpc.pb.go index 917d8ff00d..0691ca0a9c 100644 --- a/api/proto/odpf/optimus/runtime_service_grpc.pb.go +++ b/api/proto/odpf/optimus/runtime_service_grpc.pb.go @@ -67,7 +67,8 @@ type RuntimeServiceClient interface { CreateResource(ctx context.Context, in *CreateResourceRequest, opts ...grpc.CallOption) (*CreateResourceResponse, error) ReadResource(ctx context.Context, in *ReadResourceRequest, opts ...grpc.CallOption) (*ReadResourceResponse, error) UpdateResource(ctx context.Context, in *UpdateResourceRequest, opts ...grpc.CallOption) (*UpdateResourceResponse, error) - ReplayDryRun(ctx context.Context, in *ReplayDryRunRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) + ReplayDryRun(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) + Replay(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayResponse, error) } type runtimeServiceClient struct { @@ -345,7 +346,7 @@ func (c *runtimeServiceClient) UpdateResource(ctx context.Context, in *UpdateRes return out, nil } -func (c *runtimeServiceClient) ReplayDryRun(ctx context.Context, in *ReplayDryRunRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) { +func (c *runtimeServiceClient) ReplayDryRun(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayDryRunResponse, error) { out := new(ReplayDryRunResponse) err := c.cc.Invoke(ctx, "/odpf.optimus.RuntimeService/ReplayDryRun", in, out, opts...) if err != nil { @@ -354,6 +355,15 @@ func (c *runtimeServiceClient) ReplayDryRun(ctx context.Context, in *ReplayDryRu return out, nil } +func (c *runtimeServiceClient) Replay(ctx context.Context, in *ReplayRequest, opts ...grpc.CallOption) (*ReplayResponse, error) { + out := new(ReplayResponse) + err := c.cc.Invoke(ctx, "/odpf.optimus.RuntimeService/Replay", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // RuntimeServiceServer is the server API for RuntimeService service. // All implementations must embed UnimplementedRuntimeServiceServer // for forward compatibility @@ -407,7 +417,8 @@ type RuntimeServiceServer interface { CreateResource(context.Context, *CreateResourceRequest) (*CreateResourceResponse, error) ReadResource(context.Context, *ReadResourceRequest) (*ReadResourceResponse, error) UpdateResource(context.Context, *UpdateResourceRequest) (*UpdateResourceResponse, error) - ReplayDryRun(context.Context, *ReplayDryRunRequest) (*ReplayDryRunResponse, error) + ReplayDryRun(context.Context, *ReplayRequest) (*ReplayDryRunResponse, error) + Replay(context.Context, *ReplayRequest) (*ReplayResponse, error) mustEmbedUnimplementedRuntimeServiceServer() } @@ -481,9 +492,12 @@ func (UnimplementedRuntimeServiceServer) ReadResource(context.Context, *ReadReso func (UnimplementedRuntimeServiceServer) UpdateResource(context.Context, *UpdateResourceRequest) (*UpdateResourceResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateResource not implemented") } -func (UnimplementedRuntimeServiceServer) ReplayDryRun(context.Context, *ReplayDryRunRequest) (*ReplayDryRunResponse, error) { +func (UnimplementedRuntimeServiceServer) ReplayDryRun(context.Context, *ReplayRequest) (*ReplayDryRunResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReplayDryRun not implemented") } +func (UnimplementedRuntimeServiceServer) Replay(context.Context, *ReplayRequest) (*ReplayResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Replay not implemented") +} func (UnimplementedRuntimeServiceServer) mustEmbedUnimplementedRuntimeServiceServer() {} // UnsafeRuntimeServiceServer may be embedded to opt out of forward compatibility for this service. @@ -903,7 +917,7 @@ func _RuntimeService_UpdateResource_Handler(srv interface{}, ctx context.Context } func _RuntimeService_ReplayDryRun_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReplayDryRunRequest) + in := new(ReplayRequest) if err := dec(in); err != nil { return nil, err } @@ -915,7 +929,25 @@ func _RuntimeService_ReplayDryRun_Handler(srv interface{}, ctx context.Context, FullMethod: "/odpf.optimus.RuntimeService/ReplayDryRun", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RuntimeServiceServer).ReplayDryRun(ctx, req.(*ReplayDryRunRequest)) + return srv.(RuntimeServiceServer).ReplayDryRun(ctx, req.(*ReplayRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RuntimeService_Replay_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReplayRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServiceServer).Replay(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/odpf.optimus.RuntimeService/Replay", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServiceServer).Replay(ctx, req.(*ReplayRequest)) } return interceptor(ctx, in, info, handler) } @@ -1007,6 +1039,10 @@ var RuntimeService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReplayDryRun", Handler: _RuntimeService_ReplayDryRun_Handler, }, + { + MethodName: "Replay", + Handler: _RuntimeService_Replay_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/cmd/replay.go b/cmd/replay.go index fda6433a05..b54d0e2895 100644 --- a/cmd/replay.go +++ b/cmd/replay.go @@ -111,19 +111,32 @@ ReplayDryRun date ranges are inclusive. return nil } - confirm := false - if err := survey.AskOne(&survey.Select{ - Message: "Proceed with replay?", - Options: []string{"Yes", "No"}, - Default: "Yes", - }, &confirm); err != nil { + answers := map[string]interface{}{} + questions := []*survey.Question{ + { + Name: "ProceedReplay", + Prompt: &survey.Select{ + + Message: "Proceed with replay?", + Options: []string{"Yes", "No"}, + Default: "Yes", + }, + }, + } + + if err := survey.Ask(questions, &answers); err != nil { return err } - if !confirm { - l.Print("aborting...") + if option, ok := answers["ProceedReplay"]; ok && option.(survey.OptionAnswer).Value == "No" { + l.Println("aborting...") return nil } + replayId, err := runReplayRequest(l, replayProject, namespace, args[0], args[1], endDate, conf) + if err != nil { + return err + } + l.Printf("🚀 replay request created with id %s\n", replayId) return nil } return reCmd @@ -148,14 +161,14 @@ func printReplayExecutionTree(l logger, projectName, namespace, jobName, startDa l.Println("please wait...") runtime := pb.NewRuntimeServiceClient(conn) // fetch compiled JobSpec by calling the optimus API - replayDryRunRequest := &pb.ReplayDryRunRequest{ + replayRequest := &pb.ReplayRequest{ ProjectName: projectName, JobName: jobName, Namespace: namespace, StartDate: startDate, EndDate: endDate, } - replayDryRunResponse, err := runtime.ReplayDryRun(dumpTimeoutCtx, replayDryRunRequest) + replayDryRunResponse, err := runtime.ReplayDryRun(dumpTimeoutCtx, replayRequest) if err != nil { if errors.Is(err, context.DeadlineExceeded) { l.Println("render process took too long, timing out") @@ -163,12 +176,12 @@ func printReplayExecutionTree(l logger, projectName, namespace, jobName, startDa return errors.Wrapf(err, "request failed for job %s", jobName) } - printReplayDryRunResponse(l, replayDryRunRequest, replayDryRunResponse) + printReplayDryRunResponse(l, replayRequest, replayDryRunResponse) return nil } -func printReplayDryRunResponse(l logger, replayDryRunRequest *pb.ReplayDryRunRequest, replayDryRunResponse *pb.ReplayDryRunResponse) { - l.Printf("For %s project and %s namespace\n\n", coloredNotice(replayDryRunRequest.ProjectName), coloredNotice(replayDryRunRequest.Namespace)) +func printReplayDryRunResponse(l logger, replayRequest *pb.ReplayRequest, replayDryRunResponse *pb.ReplayDryRunResponse) { + l.Printf("For %s project and %s namespace\n\n", coloredNotice(replayRequest.ProjectName), coloredNotice(replayRequest.Namespace)) l.Println(coloredNotice("REPLAY RUNS")) table := tablewriter.NewWriter(l.Writer()) table.SetBorder(false) @@ -202,7 +215,7 @@ func printReplayDryRunResponse(l logger, replayDryRunRequest *pb.ReplayDryRunReq l.Println(fmt.Sprintf("%s", printExecutionTree(replayDryRunResponse.Response, treeprint.New()))) } -// PrintExecutionTree creates a ascii tree to visually inspect +// printExecutionTree creates a ascii tree to visually inspect // instance dependencies that will be recomputed after replay operation func printExecutionTree(instance *pb.ReplayExecutionTreeNode, tree treeprint.Tree) treeprint.Tree { subtree := tree.AddBranch(instance.JobName) @@ -218,3 +231,39 @@ func printExecutionTree(instance *pb.ReplayExecutionTreeNode, tree treeprint.Tre } return tree } + +func runReplayRequest(l logger, projectName, namespace, jobName, startDate, endDate string, conf config.Provider) (string, error) { + dialTimeoutCtx, dialCancel := context.WithTimeout(context.Background(), OptimusDialTimeout) + defer dialCancel() + + conn, err := createConnection(dialTimeoutCtx, conf.GetHost()) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Println("can't reach optimus service") + } + return "", err + } + defer conn.Close() + + dumpTimeoutCtx, dumpCancel := context.WithTimeout(context.Background(), renderTimeout) + defer dumpCancel() + + l.Println("firing the replay request...") + runtime := pb.NewRuntimeServiceClient(conn) + // fetch compiled JobSpec by calling the optimus API + replayRequest := &pb.ReplayRequest{ + ProjectName: projectName, + JobName: jobName, + Namespace: namespace, + StartDate: startDate, + EndDate: endDate, + } + replayResponse, err := runtime.Replay(dumpTimeoutCtx, replayRequest) + if err != nil { + if errors.Is(err, context.DeadlineExceeded) { + l.Println("render process took too long, timing out") + } + return "", errors.Wrapf(err, "request failed for job %s", jobName) + } + return replayResponse.Id, nil +} diff --git a/ext/scheduler/airflow2/airflow.go b/ext/scheduler/airflow2/airflow.go index de158e2014..c2301b562e 100644 --- a/ext/scheduler/airflow2/airflow.go +++ b/ext/scheduler/airflow2/airflow.go @@ -186,17 +186,19 @@ func (a *scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobN } schdHost = strings.Trim(schdHost, "/") - airflowDateFormat := "2006-01-02T15:04:05" - var jsonStr = []byte(fmt.Sprintf(`{"start_date":"%s", "end_date": "%s"}`, + airflowDateFormat := "2006-01-02T15:04:05+00:00" + var jsonStr = []byte(fmt.Sprintf(`{"start_date":"%s", "end_date": "%s", "dry_run": false}`, startDate.UTC().Format(airflowDateFormat), endDate.UTC().Format(airflowDateFormat))) postURL := fmt.Sprintf( fmt.Sprintf("%s/%s", schdHost, dagRunClearURL), jobName) + request, err := http.NewRequest(http.MethodPost, postURL, bytes.NewBuffer(jsonStr)) if err != nil { return errors.Wrapf(err, "failed to build http request for %s", postURL) } + request.Header.Set("Content-Type", "application/json") resp, err := a.httpClient.Do(request) if err != nil { diff --git a/job/replay.go b/job/replay.go index 780ea1e92d..e6e127b247 100644 --- a/job/replay.go +++ b/job/replay.go @@ -15,18 +15,20 @@ const ( ReplayDateFormat = "2006-01-02" ) -func (srv *Service) ReplayDryRun(namespace models.NamespaceSpec, replayJobSpec models.JobSpec, start, end time.Time) (*tree.TreeNode, error) { - projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(namespace.ProjectSpec) - jobSpecs, err := srv.getDependencyResolvedSpecs(namespace.ProjectSpec, projectJobSpecRepo, nil) +func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { + projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(replayRequest.Project) + jobSpecs, err := srv.getDependencyResolvedSpecs(replayRequest.Project, projectJobSpecRepo, nil) if err != nil { return nil, err } + dagSpecMap := make(map[string]models.JobSpec) for _, currSpec := range jobSpecs { dagSpecMap[currSpec.Name] = currSpec } + replayRequest.DagSpecMap = dagSpecMap - rootInstance, err := PrepareTree(dagSpecMap, replayJobSpec.Name, start, end) + rootInstance, err := prepareTree(replayRequest) if err != nil { return nil, err } @@ -34,23 +36,19 @@ func (srv *Service) ReplayDryRun(namespace models.NamespaceSpec, replayJobSpec m return rootInstance, nil } -func (srv *Service) Replay(namespace models.NamespaceSpec, replayJobSpec models.JobSpec, start, end time.Time) (string, error) { - projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(namespace.ProjectSpec) - jobSpecs, err := srv.getDependencyResolvedSpecs(namespace.ProjectSpec, projectJobSpecRepo, nil) +func (srv *Service) Replay(replayRequest *models.ReplayRequestInput) (string, error) { + projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(replayRequest.Project) + jobSpecs, err := srv.getDependencyResolvedSpecs(replayRequest.Project, projectJobSpecRepo, nil) if err != nil { return "", err } + dagSpecMap := make(map[string]models.JobSpec) for _, currSpec := range jobSpecs { dagSpecMap[currSpec.Name] = currSpec } - replayRequest := models.ReplayRequestInput{ - Job: replayJobSpec, - Start: start, - End: end, - Project: namespace.ProjectSpec, - DagSpecMap: dagSpecMap, - } + replayRequest.DagSpecMap = dagSpecMap + replayUUID, err := srv.replayManager.Replay(replayRequest) if err != nil { return "", err @@ -58,17 +56,17 @@ func (srv *Service) Replay(namespace models.NamespaceSpec, replayJobSpec models. return replayUUID, nil } -// PrepareTree creates a execution tree for replay operation -func PrepareTree(dagSpecMap map[string]models.JobSpec, replayJobName string, start, end time.Time) (*tree.TreeNode, error) { - replayJobSpec, found := dagSpecMap[replayJobName] +// prepareTree creates a execution tree for replay operation +func prepareTree(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { + replayJobSpec, found := replayRequest.DagSpecMap[replayRequest.Job.Name] if !found { - return nil, fmt.Errorf("couldn't find any job with name %s", replayJobName) + return nil, fmt.Errorf("couldn't find any job with name %s", replayRequest.Job.Name) } // compute runs that require replay dagTree := tree.NewMultiRootTree() parentNode := tree.NewTreeNode(replayJobSpec) - if runs, err := getRunsBetweenDates(start, end, replayJobSpec.Schedule.Interval); err == nil { + if runs, err := getRunsBetweenDates(replayRequest.Start, replayRequest.End, replayJobSpec.Schedule.Interval); err == nil { for _, run := range runs { parentNode.Runs.Add(run) } @@ -77,7 +75,7 @@ func PrepareTree(dagSpecMap map[string]models.JobSpec, replayJobName string, sta } dagTree.AddNode(parentNode) - rootInstance, err := populateDownstreamDAGs(dagTree, replayJobSpec, dagSpecMap) + rootInstance, err := populateDownstreamDAGs(dagTree, replayJobSpec, replayRequest.DagSpecMap) if err != nil { return nil, err } diff --git a/job/replay_manager.go b/job/replay_manager.go index 71f1a6a8ee..36f2bbfa25 100644 --- a/job/replay_manager.go +++ b/job/replay_manager.go @@ -19,7 +19,7 @@ var ( type ReplayManager interface { Init() - Replay(models.ReplayRequestInput) (string, error) + Replay(*models.ReplayRequestInput) (string, error) } // Manager for replaying operation(s). @@ -34,7 +34,7 @@ type Manager struct { mu sync.Mutex // request queue, used by workers - requestQ chan models.ReplayRequestInput + requestQ chan *models.ReplayRequestInput // request map, used for verifying if a request is // in queue without actually consuming it requestMap map[uuid.UUID]bool @@ -48,7 +48,7 @@ type Manager struct { // Replay a request asynchronously, returns a replay id that can // can be used to query its status -func (m *Manager) Replay(reqInput models.ReplayRequestInput) (string, error) { +func (m *Manager) Replay(reqInput *models.ReplayRequestInput) (string, error) { uuidOb, err := uuid.NewRandom() if err != nil { return "", err @@ -134,7 +134,7 @@ func NewManager(worker ReplayWorker, size int) *Manager { mgr := &Manager{ replayWorker: worker, requestMap: make(map[uuid.UUID]bool), - requestQ: make(chan models.ReplayRequestInput, size), + requestQ: make(chan *models.ReplayRequestInput, size), clearRequestMapListener: make(chan interface{}), } mgr.Init() diff --git a/job/replay_test.go b/job/replay_test.go index e78054c324..ea192b2426 100644 --- a/job/replay_test.go +++ b/job/replay_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/go-multierror" "github.com/pkg/errors" - "github.com/google/uuid" "github.com/odpf/optimus/mock" "github.com/odpf/optimus/models" "github.com/stretchr/testify/assert" @@ -92,12 +91,6 @@ func TestReplay(t *testing.T) { Name: "proj", } - namespaceSpec := models.NamespaceSpec{ - ID: uuid.Must(uuid.NewRandom()), - Name: "dev-team-1", - ProjectSpec: projSpec, - } - t.Run("should fail if unable to fetch jobSpecs from project jobSpecRepo", func(t *testing.T) { projectJobSpecRepo := new(mock.ProjectJobSpecRepository) projectJobSpecRepo.On("GetAll").Return(nil, errors.New("error while getting all dags")) @@ -111,7 +104,13 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) - _, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec1], replayStart, replayEnd) + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) assert.NotNil(t, err) }) @@ -139,7 +138,13 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - _, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec1], replayStart, replayEnd) + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) assert.NotNil(t, err) merr := err.(*multierror.Error) @@ -176,7 +181,13 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - _, err := jobSvc.ReplayDryRun(namespaceSpec, cyclicDagSpec[0], replayStart, replayEnd) + replayRequest := &models.ReplayRequestInput{ + Job: cyclicDagSpec[0], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "a cycle dependency encountered in the tree")) @@ -207,8 +218,14 @@ func TestReplay(t *testing.T) { jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } - tree, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec1], replayStart, replayEnd) + tree, err := jobSvc.ReplayDryRun(replayRequest) assert.Nil(t, err) countMap := make(map[string][]time.Time) @@ -253,8 +270,14 @@ func TestReplay(t *testing.T) { jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec4], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } - tree, err := jobSvc.ReplayDryRun(namespaceSpec, specs[spec4], replayStart, replayEnd) + tree, err := jobSvc.ReplayDryRun(replayRequest) assert.Nil(t, err) countMap := make(map[string][]time.Time) diff --git a/job/replay_worker.go b/job/replay_worker.go index d547e1298d..55ad86b7f9 100644 --- a/job/replay_worker.go +++ b/job/replay_worker.go @@ -2,6 +2,10 @@ package job import ( "context" + "fmt" + "time" + + "github.com/odpf/optimus/core/logger" "github.com/odpf/optimus/core/bus" "github.com/odpf/optimus/core/tree" @@ -17,10 +21,12 @@ const ( // EvtFailedToPrepareForReplay is emitted to event bus when a replay is failed to even prepare // to execute, it passes replay ID as string in bus EvtFailedToPrepareForReplay = "replay_request_failed_to_prepare" + + MsgReplaySuccessfullyCompleted = "Completed successfully" ) type ReplayWorker interface { - Process(context.Context, models.ReplayRequestInput) error + Process(context.Context, *models.ReplayRequestInput) error } type replayWorker struct { @@ -28,7 +34,7 @@ type replayWorker struct { scheduler models.SchedulerUnit } -func (w *replayWorker) Process(ctx context.Context, input models.ReplayRequestInput) (err error) { +func (w *replayWorker) Process(ctx context.Context, input *models.ReplayRequestInput) (err error) { // save replay request replay := models.ReplaySpec{ ID: input.ID, @@ -36,14 +42,13 @@ func (w *replayWorker) Process(ctx context.Context, input models.ReplayRequestIn StartDate: input.Start, EndDate: input.End, Status: models.ReplayStatusAccepted, - Project: input.Project, } if err = w.replayRepo.Insert(&replay); err != nil { bus.Post(EvtFailedToPrepareForReplay, input.ID) return } - replayTree, err := PrepareTree(input.DagSpecMap, input.Job.Name, input.Start, input.End) + replayTree, err := prepareTree(input) if err != nil { return err } @@ -51,12 +56,26 @@ func (w *replayWorker) Process(ctx context.Context, input models.ReplayRequestIn replayDagsMap := make(map[string]*tree.TreeNode) replayTree.GetAllNodes(replayDagsMap) - for jobName := range replayDagsMap { - if err = w.scheduler.Clear(ctx, input.Project, jobName, input.Start, input.End); err != nil { - return errors.Wrapf(err, "error while clearing dag runs for job %s", jobName) + for jobName, treeNode := range replayDagsMap { + runTimes := treeNode.Runs.Values() + startTime := runTimes[0].(time.Time) + endTime := runTimes[treeNode.Runs.Size()-1].(time.Time) + if err = w.scheduler.Clear(ctx, input.Project, jobName, startTime, endTime); err != nil { + err = errors.Wrapf(err, "error while clearing dag runs for job %s", jobName) + logger.W(fmt.Sprintf("error while running replay %s: %s", replay.ID.String(), err.Error())) + err = w.replayRepo.UpdateStatus(replay.ID, models.ReplayStatusFailed, err.Error()) + if err != nil { + return err + } + return err } } + err = w.replayRepo.UpdateStatus(replay.ID, models.ReplayStatusSuccess, MsgReplaySuccessfullyCompleted) + if err != nil { + return err + } + logger.I(fmt.Sprintf("successfully completed replay id: %s", replay.ID.String())) bus.Post(EvtRecordInsertedInDB, replay.ID) return nil } diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go index 1cf63dec9d..5f8aaf5c61 100644 --- a/job/replay_worker_test.go +++ b/job/replay_worker_test.go @@ -17,7 +17,7 @@ func TestReplayWorker(t *testing.T) { startDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-22") endDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-26") currUUID := uuid.Must(uuid.NewRandom()) - replayRequest := models.ReplayRequestInput{ + replayRequest := &models.ReplayRequestInput{ ID: currUUID, Job: models.JobSpec{ Name: "job-name", @@ -34,7 +34,6 @@ func TestReplayWorker(t *testing.T) { StartDate: startDate, EndDate: endDate, Status: models.ReplayStatusAccepted, - Project: replayRequest.Project, Job: replayRequest.Job, } t.Run("Process", func(t *testing.T) { diff --git a/mock/job.go b/mock/job.go index f659228f7a..98d7c23725 100644 --- a/mock/job.go +++ b/mock/job.go @@ -2,7 +2,6 @@ package mock import ( "context" - "time" "github.com/odpf/optimus/job" @@ -197,11 +196,16 @@ func (j *JobService) Delete(ctx context.Context, c models.NamespaceSpec, job mod return args.Error(0) } -func (j *JobService) ReplayDryRun(namespace models.NamespaceSpec, jobSpec models.JobSpec, start time.Time, end time.Time) (*tree.TreeNode, error) { - args := j.Called(namespace, jobSpec, start, end) +func (j *JobService) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { + args := j.Called(replayRequest) return args.Get(0).(*tree.TreeNode), args.Error(1) } +func (j *JobService) Replay(replayRequest *models.ReplayRequestInput) (string, error) { + args := j.Called(replayRequest) + return args.Get(0).(string), args.Error(1) +} + type Compiler struct { mock.Mock } diff --git a/mock/replay.go b/mock/replay.go index 5fe4fdf17c..a5e367235f 100644 --- a/mock/replay.go +++ b/mock/replay.go @@ -18,3 +18,7 @@ func (repo *ReplayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { func (repo *ReplayRepository) Insert(replay *models.ReplaySpec) error { return repo.Called(replay).Error(0) } + +func (repo *ReplayRepository) UpdateStatus(replayID uuid.UUID, status, message string) error { + return repo.Called(replayID, status, message).Error(0) +} diff --git a/models/job.go b/models/job.go index 025316c69f..9725f93d2f 100644 --- a/models/job.go +++ b/models/job.go @@ -292,8 +292,10 @@ type JobService interface { GetByName(string, NamespaceSpec) (JobSpec, error) // Dump returns the compiled Job Dump(NamespaceSpec, JobSpec) (Job, error) - // ReplayDryRun replays the jobSpec and its dependencies between start and endDate - ReplayDryRun(NamespaceSpec, JobSpec, time.Time, time.Time) (*tree.TreeNode, error) + // ReplayDryRun returns the execution tree of jobSpec and its dependencies between start and endDate + ReplayDryRun(*ReplayRequestInput) (*tree.TreeNode, error) + // Replay replays the jobSpec and its dependencies between start and endDate + Replay(*ReplayRequestInput) (string, error) // KeepOnly deletes all jobs except the ones provided for a namespace KeepOnly(NamespaceSpec, []JobSpec, progress.Observer) error // GetAll reads all job specifications of the given namespace diff --git a/models/replay.go b/models/replay.go index 6c1c912dd9..de4f94f605 100644 --- a/models/replay.go +++ b/models/replay.go @@ -9,6 +9,9 @@ import ( const ( // ReplayStatusAccepted worker picked up the request ReplayStatusAccepted = "Accepted" + // ReplayStatusFailed worker fail while processing the replay request + ReplayStatusFailed = "Failed" // end state + ReplayStatusSuccess = "Success" // end state ) type ReplayRequestInput struct { @@ -28,7 +31,6 @@ type ReplaySpec struct { Status string Message string CommitID string - Project ProjectSpec } type Syncer interface { @@ -38,4 +40,5 @@ type Syncer interface { type ReplayRepository interface { Insert(replay *ReplaySpec) error GetByID(id uuid.UUID) (ReplaySpec, error) + UpdateStatus(replayID uuid.UUID, status, message string) error } diff --git a/resources/pack/migrations/000009_create_replay_table.down.sql b/store/postgres/migrations/000010_create_replay_table.down.sql similarity index 100% rename from resources/pack/migrations/000009_create_replay_table.down.sql rename to store/postgres/migrations/000010_create_replay_table.down.sql diff --git a/resources/pack/migrations/000009_create_replay_table.up.sql b/store/postgres/migrations/000010_create_replay_table.up.sql similarity index 80% rename from resources/pack/migrations/000009_create_replay_table.up.sql rename to store/postgres/migrations/000010_create_replay_table.up.sql index bcb1d9a804..ee5ec8b3e1 100644 --- a/resources/pack/migrations/000009_create_replay_table.up.sql +++ b/store/postgres/migrations/000010_create_replay_table.up.sql @@ -1,10 +1,11 @@ CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE TABLE IF NOT EXISTS replay ( id UUID PRIMARY KEY NOT NULL, - dag_id TEXT NOT NULL, + job_id UUID NOT NULL, start_date TIMESTAMP WITH TIME ZONE NOT NULL, end_date TIMESTAMP WITH TIME ZONE NOT NULL, - status TEXT NOT NULL, + status varchar(20) NOT NULL, + commit_id varchar(20), message TEXT, created_at TIMESTAMP WITH TIME ZONE NOT NULL, updated_at TIMESTAMP WITH TIME ZONE NOT NULL diff --git a/store/postgres/replay_repository.go b/store/postgres/replay_repository.go index db8c2326ab..dbc35340fa 100644 --- a/store/postgres/replay_repository.go +++ b/store/postgres/replay_repository.go @@ -16,12 +16,9 @@ type Replay struct { JobID uuid.UUID `gorm:"not null"` Job Job `gorm:"foreignKey:JobID"` - ProjectID uuid.UUID `gorm:"not null"` - Project Project `gorm:"foreignKey:ProjectID"` - - StartDate time.Time - EndDate time.Time - Status string + StartDate time.Time `gorm:"not null"` + EndDate time.Time `gorm:"not null"` + Status string `gorm:"not null"` Message string CommitID string @@ -33,7 +30,6 @@ func (p Replay) FromSpec(spec *models.ReplaySpec) (Replay, error) { return Replay{ ID: spec.ID, JobID: spec.Job.ID, - ProjectID: spec.Project.ID, StartDate: spec.StartDate, EndDate: spec.EndDate, Status: spec.Status, @@ -81,3 +77,17 @@ func (repo *replayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { } return r.ToSpec() } + +func (repo *replayRepository) UpdateStatus(replayID uuid.UUID, status, message string) error { + var r Replay + if err := repo.DB.Where("id = ?", replayID).Find(&r).Error; err != nil { + return errors.New("could not update non-existing replay") + } + r.Status = status + r.UpdatedAt = time.Now() + r.Message = message + if err := repo.DB.Save(&r).Error; err != nil { + return err + } + return nil +} diff --git a/store/postgres/replay_repository_test.go b/store/postgres/replay_repository_test.go index 5be85f62ac..32cf307d6c 100644 --- a/store/postgres/replay_repository_test.go +++ b/store/postgres/replay_repository_test.go @@ -41,9 +41,6 @@ func TestReplayRepository(t *testing.T) { jobSpec := models.JobSpec{ Name: "job-name", } - projectSpec := models.ProjectSpec{ - Name: "project-name", - } startTime, _ := time.Parse(job.ReplayDateFormat, "2020-01-15") endTime, _ := time.Parse(job.ReplayDateFormat, "2020-01-20") @@ -55,7 +52,6 @@ func TestReplayRepository(t *testing.T) { StartDate: startTime, EndDate: endTime, Status: models.ReplayStatusAccepted, - Project: projectSpec, }, } diff --git a/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json b/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json index 91b25ac19d..581c8a6329 100644 --- a/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json +++ b/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json @@ -229,6 +229,60 @@ ] } }, + "/api/v1/project/{projectName}/job/{jobName}/replay": { + "get": { + "operationId": "RuntimeService_Replay", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/optimusReplayResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "projectName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "jobName", + "in": "path", + "required": true, + "type": "string" + }, + { + "name": "namespace", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "startDate", + "in": "query", + "required": false, + "type": "string" + }, + { + "name": "endDate", + "in": "query", + "required": false, + "type": "string" + } + ], + "tags": [ + "RuntimeService" + ] + } + }, "/api/v1/project/{projectName}/job/{jobName}/replay-dry-run": { "get": { "operationId": "RuntimeService_ReplayDryRun", @@ -1476,6 +1530,14 @@ } } }, + "optimusReplayResponse": { + "type": "object", + "properties": { + "id": { + "type": "string" + } + } + }, "optimusResourceSpecification": { "type": "object", "properties": { From c2bca106488379fffb3b52d5d8af8b49c99f5bcd Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Tue, 22 Jun 2021 17:10:39 +0530 Subject: [PATCH 3/6] fix: pass replaySpecRepo to replay worker --- cmd/server/server.go | 25 +- core/tree/multi_root_tree_test.go | 45 +- core/tree/tree_node_test.go | 21 + job/replay.go | 22 +- job/replay_manager.go | 28 +- job/replay_manager_test.go | 89 ++- job/replay_test.go | 518 +++++++++++------- job/replay_worker.go | 48 +- job/replay_worker_test.go | 162 +++++- job/service.go | 5 + mock/replay.go | 40 +- mock/scheduler.go | 43 ++ mock/uuid.go | 15 + models/replay.go | 25 +- .../000010_create_replay_table.up.sql | 3 +- store/postgres/replay_repository.go | 41 +- store/postgres/replay_repository_test.go | 29 +- store/store.go | 9 + utils/uuid.go | 18 + 19 files changed, 891 insertions(+), 295 deletions(-) create mode 100644 mock/scheduler.go create mode 100644 mock/uuid.go create mode 100644 utils/uuid.go diff --git a/cmd/server/server.go b/cmd/server/server.go index 77837ef9e8..7915a3cd72 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -12,6 +12,8 @@ import ( "syscall" "time" + "github.com/odpf/optimus/utils" + "github.com/odpf/optimus/ext/scheduler/airflow" "github.com/odpf/optimus/config" @@ -69,6 +71,15 @@ func (fac *projectJobSpecRepoFactory) New(project models.ProjectSpec) store.Proj return postgres.NewProjectJobSpecRepository(fac.db, project, postgres.NewAdapter(models.TaskRegistry, models.HookRegistry)) } +type replaySpecRepoRepository struct { + db *gorm.DB + jobSpecRepoFac jobSpecRepoFactory +} + +func (fac *replaySpecRepoRepository) New(job models.JobSpec) store.ReplaySpecRepository { + return postgres.NewReplayRepository(fac.db, job) +} + // jobSpecRepoFactory stores raw specifications type jobSpecRepoFactory struct { db *gorm.DB @@ -379,9 +390,13 @@ func Initialize(conf config.Provider) error { db: dbConn, projectResourceSpecRepoFac: projectResourceSpecRepoFac, } - replayRepo := postgres.NewReplayRepository(dbConn) - replayWorker := job.NewReplayWorker(replayRepo, models.Scheduler) - replayManager := job.NewManager(replayWorker, conf.GetServe().JobQueueSize) + + replaySpecRepoFac := &replaySpecRepoRepository{ + db: dbConn, + jobSpecRepoFac: jobSpecRepoFac, + } + replayWorker := job.NewReplayWorker(replaySpecRepoFac, models.Scheduler) + replayManager := job.NewManager(replayWorker, replaySpecRepoFac, utils.NewUUIDProvider(), conf.GetServe().JobQueueSize) // runtime service instance over grpc pb.RegisterRuntimeServiceServer(grpcServer, v1handler.NewRuntimeServiceServer( @@ -470,6 +485,10 @@ func Initialize(conf config.Provider) error { // Block until we receive our signal. <-termChan mainLog.Info("termination request received") + err = replayManager.Close() + if err != nil { + return err + } // Create a deadline to wait for server ctxProxy, cancelProxy := context.WithTimeout(context.Background(), shutdownWait) diff --git a/core/tree/multi_root_tree_test.go b/core/tree/multi_root_tree_test.go index f1d725b2e1..fb43fd7bda 100644 --- a/core/tree/multi_root_tree_test.go +++ b/core/tree/multi_root_tree_test.go @@ -11,13 +11,13 @@ import ( func TestMultiRootDagTree(t *testing.T) { t.Run("GetNameAndDependents", func(t *testing.T) { - multiRootTree := tree.NewMultiRootTree() treeNode1 := tree.NewTreeNode(models.JobSpec{ Name: "job1", }) treeNode2 := tree.NewTreeNode(models.JobSpec{ Name: "job2", }) + multiRootTree := tree.NewMultiRootTree() treeNode1.AddDependent(treeNode2) treeNode2.AddDependent(treeNode1) multiRootTree.AddNodeIfNotExist(treeNode1) @@ -27,4 +27,47 @@ func TestMultiRootDagTree(t *testing.T) { assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), tree.ErrCyclicDependencyEncountered.Error())) }) + t.Run("MarkRoot", func(t *testing.T) { + treeNode1 := tree.NewTreeNode(models.JobSpec{ + Name: "job1", + }) + multiRootTree := tree.NewMultiRootTree() + multiRootTree.AddNode(treeNode1) + multiRootTree.MarkRoot(treeNode1) + rootNodes := multiRootTree.GetRootNodes() + assert.Equal(t, 1, len(rootNodes)) + assert.Equal(t, "job1", rootNodes[0].Data.GetName()) + }) + t.Run("IsCyclic", func(t *testing.T) { + t.Run("should throw an error if cyclic", func(t *testing.T) { + treeNode1 := tree.NewTreeNode(models.JobSpec{ + Name: "job1", + }) + treeNode2 := tree.NewTreeNode(models.JobSpec{ + Name: "job2", + }) + multiRootTree := tree.NewMultiRootTree() + multiRootTree.AddNode(treeNode1) + multiRootTree.AddNode(treeNode2) + treeNode1.AddDependent(treeNode2) + treeNode2.AddDependent(treeNode1) + err := multiRootTree.IsCyclic() + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "cycle dependency")) + }) + t.Run("should not return error if not cyclic", func(t *testing.T) { + treeNode1 := tree.NewTreeNode(models.JobSpec{ + Name: "job1", + }) + treeNode2 := tree.NewTreeNode(models.JobSpec{ + Name: "job2", + }) + multiRootTree := tree.NewMultiRootTree() + multiRootTree.AddNode(treeNode1) + multiRootTree.AddNode(treeNode2) + treeNode1.AddDependent(treeNode2) + err := multiRootTree.IsCyclic() + assert.Nil(t, err) + }) + }) } diff --git a/core/tree/tree_node_test.go b/core/tree/tree_node_test.go index 6b5547c081..ffadec884e 100644 --- a/core/tree/tree_node_test.go +++ b/core/tree/tree_node_test.go @@ -24,4 +24,25 @@ func TestDagNode(t *testing.T) { dagNode.AddDependent(dependentDagNode) assert.Equal(t, jobName, dagNode.GetName()) }) + t.Run("GetAllNodes", func(t *testing.T) { + treeNode := tree.TreeNode{ + Data: models.JobSpec{ + Name: "parent-job", + }, + Dependents: []*tree.TreeNode{ + { + Data: models.JobSpec{ + Name: "child-job", + }, + }, + }, + } + nodesMap := make(map[string]*tree.TreeNode) + treeNode.GetAllNodes(nodesMap) + assert.Equal(t, 2, len(nodesMap)) + _, parentNodeFound := nodesMap["parent-job"] + assert.True(t, parentNodeFound) + _, childNodeFound := nodesMap["child-job"] + assert.True(t, childNodeFound) + }) } diff --git a/job/replay.go b/job/replay.go index e6e127b247..0c3287f1e8 100644 --- a/job/replay.go +++ b/job/replay.go @@ -15,18 +15,25 @@ const ( ReplayDateFormat = "2006-01-02" ) -func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { +func (srv *Service) populateRequestWithJobSpecs(replayRequest *models.ReplayRequestInput) error { projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(replayRequest.Project) jobSpecs, err := srv.getDependencyResolvedSpecs(replayRequest.Project, projectJobSpecRepo, nil) if err != nil { - return nil, err + return err } - dagSpecMap := make(map[string]models.JobSpec) for _, currSpec := range jobSpecs { dagSpecMap[currSpec.Name] = currSpec } replayRequest.DagSpecMap = dagSpecMap + return nil +} + +func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { + err := srv.populateRequestWithJobSpecs(replayRequest) + if err != nil { + return nil, err + } rootInstance, err := prepareTree(replayRequest) if err != nil { @@ -37,18 +44,11 @@ func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tre } func (srv *Service) Replay(replayRequest *models.ReplayRequestInput) (string, error) { - projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(replayRequest.Project) - jobSpecs, err := srv.getDependencyResolvedSpecs(replayRequest.Project, projectJobSpecRepo, nil) + err := srv.populateRequestWithJobSpecs(replayRequest) if err != nil { return "", err } - dagSpecMap := make(map[string]models.JobSpec) - for _, currSpec := range jobSpecs { - dagSpecMap[currSpec.Name] = currSpec - } - replayRequest.DagSpecMap = dagSpecMap - replayUUID, err := srv.replayManager.Replay(replayRequest) if err != nil { return "", err diff --git a/job/replay_manager.go b/job/replay_manager.go index 36f2bbfa25..0f5e18bf84 100644 --- a/job/replay_manager.go +++ b/job/replay_manager.go @@ -4,6 +4,8 @@ import ( "context" "sync" + "github.com/odpf/optimus/utils" + "github.com/google/uuid" "github.com/odpf/optimus/core/bus" "github.com/odpf/optimus/core/logger" @@ -33,6 +35,8 @@ type Manager struct { wg sync.WaitGroup mu sync.Mutex + uuidProvider utils.UUIDProvider + // request queue, used by workers requestQ chan *models.ReplayRequestInput // request map, used for verifying if a request is @@ -43,18 +47,32 @@ type Manager struct { clearRequestMapListener chan interface{} //request worker - replayWorker ReplayWorker + replayWorker ReplayWorker + replaySpecRepoFac ReplaySpecRepoFactory } // Replay a request asynchronously, returns a replay id that can // can be used to query its status func (m *Manager) Replay(reqInput *models.ReplayRequestInput) (string, error) { - uuidOb, err := uuid.NewRandom() + uuidOb, err := m.uuidProvider.NewUUID() if err != nil { return "", err } reqInput.ID = uuidOb + // save replay request and mark status as accepted + replay := models.ReplaySpec{ + ID: uuidOb, + Job: reqInput.Job, + StartDate: reqInput.Start, + EndDate: reqInput.End, + Status: models.ReplayStatusAccepted, + } + replaySpecRepo := m.replaySpecRepoFac.New(reqInput.Job) + if err = replaySpecRepo.Insert(&replay); err != nil { + return "", err + } + // try sending the job request down the request queue // if full return error indicating that we don't have capacity // to process this request at the moment @@ -77,7 +95,7 @@ func (m *Manager) spawnServiceWorker() { go func() { defer m.wg.Done() for reqInput := range m.requestQ { - logger.I("worker picked up the request for ", reqInput.Project.Name) + logger.I("worker picked up the request for ", reqInput.Job.Name) ctx := context.Background() if err := m.replayWorker.Process(ctx, reqInput); err != nil { @@ -130,12 +148,14 @@ func (m *Manager) Init() { } // NewManager constructs a new instance of Manager -func NewManager(worker ReplayWorker, size int) *Manager { +func NewManager(worker ReplayWorker, replaySpecRepoFac ReplaySpecRepoFactory, uuidProvider utils.UUIDProvider, size int) *Manager { mgr := &Manager{ replayWorker: worker, requestMap: make(map[uuid.UUID]bool), requestQ: make(chan *models.ReplayRequestInput, size), + replaySpecRepoFac: replaySpecRepoFac, clearRequestMapListener: make(chan interface{}), + uuidProvider: uuidProvider, } mgr.Init() return mgr diff --git a/job/replay_manager_test.go b/job/replay_manager_test.go index 8d7765aa64..87f20218ec 100644 --- a/job/replay_manager_test.go +++ b/job/replay_manager_test.go @@ -1 +1,88 @@ -package job +package job_test + +import ( + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/odpf/optimus/core/logger" + "github.com/odpf/optimus/job" + "github.com/odpf/optimus/mock" + "github.com/odpf/optimus/models" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" +) + +func TestReplayManager(t *testing.T) { + t.Run("Close", func(t *testing.T) { + logger.Init(logger.ERROR) + manager := job.NewManager(nil, nil, nil, 5) + err := manager.Close() + assert.Nil(t, err) + }) + t.Run("Replay", func(t *testing.T) { + dagStartTime, _ := time.Parse(job.ReplayDateFormat, "2020-04-05") + startDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-22") + endDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-26") + jobSpec := models.JobSpec{ + Name: "job-name", + Schedule: models.JobSpecSchedule{ + StartDate: dagStartTime, + Interval: "0 2 * * *", + }, + } + replayRequest := &models.ReplayRequestInput{ + Job: jobSpec, + Start: startDate, + End: endDate, + Project: models.ProjectSpec{ + Name: "project-name", + }, + DagSpecMap: map[string]models.JobSpec{ + "job-name": jobSpec, + }, + } + t.Run("should throw error if uuid provider returns failure", func(t *testing.T) { + logger.Init(logger.ERROR) + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + objUUID := uuid.Must(uuid.NewRandom()) + errMessage := "error while generating uuid" + uuidProvider.On("NewUUID").Return(objUUID, errors.New(errMessage)) + + replayManager := job.NewManager(nil, nil, uuidProvider, 5) + _, err := replayManager.Replay(replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), errMessage)) + }) + t.Run("should throw an error if replay repo throws error", func(t *testing.T) { + logger.Init(logger.ERROR) + replayRepository := new(mock.ReplayRepository) + defer replayRepository.AssertExpectations(t) + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) + + uuidProvider := new(mock.UUIDProvider) + defer uuidProvider.AssertExpectations(t) + objUUID := uuid.Must(uuid.NewRandom()) + uuidProvider.On("NewUUID").Return(objUUID, nil) + + errMessage := "error with replay repo" + toInsertReplaySpec := &models.ReplaySpec{ + ID: objUUID, + Job: jobSpec, + StartDate: startDate, + EndDate: endDate, + Status: models.ReplayStatusAccepted, + } + replayRepository.On("Insert", toInsertReplaySpec).Return(errors.New(errMessage)) + + replayManager := job.NewManager(nil, replaySpecRepoFac, uuidProvider, 5) + _, err := replayManager.Replay(replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), errMessage)) + }) + }) +} diff --git a/job/replay_test.go b/job/replay_test.go index ea192b2426..de52de0959 100644 --- a/job/replay_test.go +++ b/job/replay_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/google/uuid" + "github.com/odpf/optimus/job" "github.com/odpf/optimus/core/tree" @@ -91,211 +93,319 @@ func TestReplay(t *testing.T) { Name: "proj", } - t.Run("should fail if unable to fetch jobSpecs from project jobSpecRepo", func(t *testing.T) { - projectJobSpecRepo := new(mock.ProjectJobSpecRepository) - projectJobSpecRepo.On("GetAll").Return(nil, errors.New("error while getting all dags")) - defer projectJobSpecRepo.AssertExpectations(t) - - projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) - projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) - defer projJobSpecRepoFac.AssertExpectations(t) - - replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - - jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ - Job: specs[spec1], - Start: replayStart, - End: replayEnd, - Project: projSpec, - } - _, err := jobSvc.ReplayDryRun(replayRequest) - - assert.NotNil(t, err) - }) - - t.Run("should fail if unable to resolve jobs using dependency resolver", func(t *testing.T) { - projectJobSpecRepo := new(mock.ProjectJobSpecRepository) - projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) - defer projectJobSpecRepo.AssertExpectations(t) - - projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) - projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) - defer projJobSpecRepoFac.AssertExpectations(t) - - // resolve dependencies - depenResolver := new(mock.DependencyResolver) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(models.JobSpec{}, errors.New("error while fetching dag1")) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(models.JobSpec{}, errors.New("error while fetching dag3")) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(models.JobSpec{}, errors.New("error while fetching dag4")) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) - defer depenResolver.AssertExpectations(t) - - replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - - jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ - Job: specs[spec1], - Start: replayStart, - End: replayEnd, - Project: projSpec, - } - _, err := jobSvc.ReplayDryRun(replayRequest) - - assert.NotNil(t, err) - merr := err.(*multierror.Error) - assert.Equal(t, 3, merr.Len()) - }) - - t.Run("should fail if tree is cyclic", func(t *testing.T) { - cyclicDagSpec := make([]models.JobSpec, 0) - cyclicDag1 := models.JobSpec{Name: "dag1-deps-on-dag2", Schedule: twoAMSchedule, Task: oneDayTaskWindow} - cyclicDag2 := models.JobSpec{Name: "dag2-deps-on-dag1", Schedule: twoAMSchedule, Task: oneDayTaskWindow} - cyclicDag1Deps := make(map[string]models.JobSpecDependency) - cyclicDag1Deps[cyclicDag1.Name] = models.JobSpecDependency{Job: &cyclicDag2} - cyclicDag2Deps := make(map[string]models.JobSpecDependency) - cyclicDag2Deps[cyclicDag2.Name] = models.JobSpecDependency{Job: &cyclicDag1} - cyclicDag1.Dependencies = cyclicDag1Deps - cyclicDag2.Dependencies = cyclicDag2Deps - cyclicDagSpec = append(cyclicDagSpec, cyclicDag1, cyclicDag2) - - projectJobSpecRepo := new(mock.ProjectJobSpecRepository) - projectJobSpecRepo.On("GetAll").Return(cyclicDagSpec, nil) - defer projectJobSpecRepo.AssertExpectations(t) - - projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) - projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) - defer projJobSpecRepoFac.AssertExpectations(t) - - // resolve dependencies - depenResolver := new(mock.DependencyResolver) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, cyclicDagSpec[0], nil).Return(cyclicDagSpec[0], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, cyclicDagSpec[1], nil).Return(cyclicDagSpec[1], nil) - defer depenResolver.AssertExpectations(t) - - replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - - jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ - Job: cyclicDagSpec[0], - Start: replayStart, - End: replayEnd, - Project: projSpec, - } - _, err := jobSvc.ReplayDryRun(replayRequest) - - assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "a cycle dependency encountered in the tree")) + t.Run("ReplayDryRun", func(t *testing.T) { + t.Run("should fail if unable to fetch jobSpecs from project jobSpecRepo", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(nil, errors.New("error while getting all dags")) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) + + assert.NotNil(t, err) + }) + + t.Run("should fail if unable to resolve jobs using dependency resolver", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + // resolve dependencies + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(models.JobSpec{}, errors.New("error while fetching dag1")) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(models.JobSpec{}, errors.New("error while fetching dag3")) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(models.JobSpec{}, errors.New("error while fetching dag4")) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) + defer depenResolver.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) + + assert.NotNil(t, err) + merr := err.(*multierror.Error) + assert.Equal(t, 3, merr.Len()) + }) + + t.Run("should fail if tree is cyclic", func(t *testing.T) { + cyclicDagSpec := make([]models.JobSpec, 0) + cyclicDag1 := models.JobSpec{Name: "dag1-deps-on-dag2", Schedule: twoAMSchedule, Task: oneDayTaskWindow} + cyclicDag2 := models.JobSpec{Name: "dag2-deps-on-dag1", Schedule: twoAMSchedule, Task: oneDayTaskWindow} + cyclicDag1Deps := make(map[string]models.JobSpecDependency) + cyclicDag1Deps[cyclicDag1.Name] = models.JobSpecDependency{Job: &cyclicDag2} + cyclicDag2Deps := make(map[string]models.JobSpecDependency) + cyclicDag2Deps[cyclicDag2.Name] = models.JobSpecDependency{Job: &cyclicDag1} + cyclicDag1.Dependencies = cyclicDag1Deps + cyclicDag2.Dependencies = cyclicDag2Deps + cyclicDagSpec = append(cyclicDagSpec, cyclicDag1, cyclicDag2) + + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(cyclicDagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + // resolve dependencies + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, cyclicDagSpec[0], nil).Return(cyclicDagSpec[0], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, cyclicDagSpec[1], nil).Return(cyclicDagSpec[1], nil) + defer depenResolver.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) + replayRequest := &models.ReplayRequestInput{ + Job: cyclicDagSpec[0], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.ReplayDryRun(replayRequest) + + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), "a cycle dependency encountered in the tree")) + }) + + t.Run("resolve create replay tree for a dag with three day task window and mentioned dependencies", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + // resolve dependencies + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) + defer depenResolver.AssertExpectations(t) + + compiler := new(mock.Compiler) + defer compiler.AssertExpectations(t) + + jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + + tree, err := jobSvc.ReplayDryRun(replayRequest) + + assert.Nil(t, err) + countMap := make(map[string][]time.Time) + getRuns(tree, countMap) + expectedRunMap := map[string][]time.Time{} + expectedRunMap[spec1] = []time.Time{ + time.Date(2020, time.Month(8), 5, 2, 0, 0, 0, time.UTC), + time.Date(2020, time.Month(8), 6, 2, 0, 0, 0, time.UTC), + time.Date(2020, time.Month(8), 7, 2, 0, 0, 0, time.UTC), + } + expectedRunMap[spec2] = expectedRunMap[spec1] + expectedRunMap[spec2] = append(expectedRunMap[spec2], time.Date(2020, time.Month(8), 8, 2, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 9, 2, 0, 0, 0, time.UTC)) + expectedRunMap[spec3] = expectedRunMap[spec2] + expectedRunMap[spec3] = append(expectedRunMap[spec3], time.Date(2020, time.Month(8), 10, 2, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 11, 2, 0, 0, 0, time.UTC)) + for k, v := range countMap { + assert.Equal(t, expectedRunMap[k], v) + } + }) + + t.Run("resolve create replay tree for a dag with three day task window and mentioned dependencies", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + // resolve dependencies + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) + defer depenResolver.AssertExpectations(t) + + compiler := new(mock.Compiler) + defer compiler.AssertExpectations(t) + + jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec4], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + + tree, err := jobSvc.ReplayDryRun(replayRequest) + + assert.Nil(t, err) + countMap := make(map[string][]time.Time) + getRuns(tree, countMap) + expectedRunMap := map[string][]time.Time{} + expectedRunMap[spec4] = []time.Time{} + for i := 0; i <= 23; i++ { + expectedRunMap[spec4] = append(expectedRunMap[spec4], time.Date(2020, time.Month(8), 5, i, 0, 0, 0, time.UTC)) + } + expectedRunMap[spec5] = []time.Time{ + time.Date(2020, time.Month(8), 5, 0, 0, 0, 0, time.UTC), + time.Date(2020, time.Month(8), 6, 0, 0, 0, 0, time.UTC), + time.Date(2020, time.Month(8), 7, 0, 0, 0, 0, time.UTC), + time.Date(2020, time.Month(8), 8, 0, 0, 0, 0, time.UTC), + } + expectedRunMap[spec6] = append(expectedRunMap[spec5], time.Date(2020, time.Month(8), 9, 0, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 10, 0, 0, 0, 0, time.UTC)) + for k, v := range countMap { + assert.Equal(t, expectedRunMap[k], v) + } + }) }) - t.Run("resolve create replay tree for a dag with three day task window and mentioned dependencies", func(t *testing.T) { - projectJobSpecRepo := new(mock.ProjectJobSpecRepository) - projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) - defer projectJobSpecRepo.AssertExpectations(t) - - projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) - projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) - defer projJobSpecRepoFac.AssertExpectations(t) - - // resolve dependencies - depenResolver := new(mock.DependencyResolver) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) - defer depenResolver.AssertExpectations(t) - - compiler := new(mock.Compiler) - defer compiler.AssertExpectations(t) - - jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - replayRequest := &models.ReplayRequestInput{ - Job: specs[spec1], - Start: replayStart, - End: replayEnd, - Project: projSpec, - } - - tree, err := jobSvc.ReplayDryRun(replayRequest) - - assert.Nil(t, err) - countMap := make(map[string][]time.Time) - getRuns(tree, countMap) - expectedRunMap := map[string][]time.Time{} - expectedRunMap[spec1] = []time.Time{ - time.Date(2020, time.Month(8), 5, 2, 0, 0, 0, time.UTC), - time.Date(2020, time.Month(8), 6, 2, 0, 0, 0, time.UTC), - time.Date(2020, time.Month(8), 7, 2, 0, 0, 0, time.UTC), - } - expectedRunMap[spec2] = expectedRunMap[spec1] - expectedRunMap[spec2] = append(expectedRunMap[spec2], time.Date(2020, time.Month(8), 8, 2, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 9, 2, 0, 0, 0, time.UTC)) - expectedRunMap[spec3] = expectedRunMap[spec2] - expectedRunMap[spec3] = append(expectedRunMap[spec3], time.Date(2020, time.Month(8), 10, 2, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 11, 2, 0, 0, 0, time.UTC)) - for k, v := range countMap { - assert.Equal(t, expectedRunMap[k], v) - } - }) - - t.Run("resolve create replay tree for a dag with three day task window and mentioned dependencies", func(t *testing.T) { - projectJobSpecRepo := new(mock.ProjectJobSpecRepository) - projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) - defer projectJobSpecRepo.AssertExpectations(t) - - projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) - projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) - defer projJobSpecRepoFac.AssertExpectations(t) - - // resolve dependencies - depenResolver := new(mock.DependencyResolver) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) - depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) - defer depenResolver.AssertExpectations(t) - - compiler := new(mock.Compiler) - defer compiler.AssertExpectations(t) - - jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayRequest := &models.ReplayRequestInput{ - Job: specs[spec4], - Start: replayStart, - End: replayEnd, - Project: projSpec, - } - - tree, err := jobSvc.ReplayDryRun(replayRequest) - - assert.Nil(t, err) - countMap := make(map[string][]time.Time) - getRuns(tree, countMap) - expectedRunMap := map[string][]time.Time{} - expectedRunMap[spec4] = []time.Time{} - for i := 0; i <= 23; i++ { - expectedRunMap[spec4] = append(expectedRunMap[spec4], time.Date(2020, time.Month(8), 5, i, 0, 0, 0, time.UTC)) - } - expectedRunMap[spec5] = []time.Time{ - time.Date(2020, time.Month(8), 5, 0, 0, 0, 0, time.UTC), - time.Date(2020, time.Month(8), 6, 0, 0, 0, 0, time.UTC), - time.Date(2020, time.Month(8), 7, 0, 0, 0, 0, time.UTC), - time.Date(2020, time.Month(8), 8, 0, 0, 0, 0, time.UTC), - } - expectedRunMap[spec6] = append(expectedRunMap[spec5], time.Date(2020, time.Month(8), 9, 0, 0, 0, 0, time.UTC), time.Date(2020, time.Month(8), 10, 0, 0, 0, 0, time.UTC)) - for k, v := range countMap { - assert.Equal(t, expectedRunMap[k], v) - } + t.Run("Replay", func(t *testing.T) { + t.Run("should fail if unable to fetch jobSpecs from project jobSpecRepo", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(nil, errors.New("error while getting all dags")) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + } + _, err := jobSvc.Replay(replayRequest) + + assert.NotNil(t, err) + }) + + t.Run("should fail if replay manager throws an error", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) + defer depenResolver.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + DagSpecMap: specs, + } + + errMessage := "error with replay manager" + replayManager := new(mock.ReplayManager) + replayManager.On("Replay", replayRequest).Return("", errors.New(errMessage)) + defer replayManager.AssertExpectations(t) + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, replayManager) + + _, err := jobSvc.Replay(replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), errMessage)) + }) + + t.Run("should succeed if replay manager successfully processes request", func(t *testing.T) { + projectJobSpecRepo := new(mock.ProjectJobSpecRepository) + projectJobSpecRepo.On("GetAll").Return(dagSpec, nil) + defer projectJobSpecRepo.AssertExpectations(t) + + projJobSpecRepoFac := new(mock.ProjectJobSpecRepoFactory) + projJobSpecRepoFac.On("New", projSpec).Return(projectJobSpecRepo) + defer projJobSpecRepoFac.AssertExpectations(t) + + depenResolver := new(mock.DependencyResolver) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[0], nil).Return(dagSpec[0], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[1], nil).Return(dagSpec[1], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[2], nil).Return(dagSpec[2], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[3], nil).Return(dagSpec[3], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[4], nil).Return(dagSpec[4], nil) + depenResolver.On("Resolve", projSpec, projectJobSpecRepo, dagSpec[5], nil).Return(dagSpec[5], nil) + defer depenResolver.AssertExpectations(t) + + replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") + replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") + replayRequest := &models.ReplayRequestInput{ + Job: specs[spec1], + Start: replayStart, + End: replayEnd, + Project: projSpec, + DagSpecMap: specs, + } + + replayManager := new(mock.ReplayManager) + objUUID := uuid.Must(uuid.NewRandom()) + replayManager.On("Replay", replayRequest).Return(objUUID.String(), nil) + defer replayManager.AssertExpectations(t) + + jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, replayManager) + + replayUUID, err := jobSvc.Replay(replayRequest) + assert.Nil(t, err) + assert.Equal(t, objUUID.String(), replayUUID) + }) }) } diff --git a/job/replay_worker.go b/job/replay_worker.go index 55ad86b7f9..9a56aea5ae 100644 --- a/job/replay_worker.go +++ b/job/replay_worker.go @@ -23,6 +23,7 @@ const ( EvtFailedToPrepareForReplay = "replay_request_failed_to_prepare" MsgReplaySuccessfullyCompleted = "Completed successfully" + MsgReplayInProgress = "Replay Request Picked up by replay worker" ) type ReplayWorker interface { @@ -30,22 +31,19 @@ type ReplayWorker interface { } type replayWorker struct { - replayRepo models.ReplayRepository - scheduler models.SchedulerUnit + replaySpecRepoFac ReplaySpecRepoFactory + scheduler models.SchedulerUnit } func (w *replayWorker) Process(ctx context.Context, input *models.ReplayRequestInput) (err error) { - // save replay request - replay := models.ReplaySpec{ - ID: input.ID, - Job: input.Job, - StartDate: input.Start, - EndDate: input.End, - Status: models.ReplayStatusAccepted, - } - if err = w.replayRepo.Insert(&replay); err != nil { - bus.Post(EvtFailedToPrepareForReplay, input.ID) - return + replaySpecRepo := w.replaySpecRepoFac.New(input.Job) + // mark replay request in progress + inProgressErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusInProgress, models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: MsgReplayInProgress, + }) + if inProgressErr != nil { + return inProgressErr } replayTree, err := prepareTree(input) @@ -62,24 +60,30 @@ func (w *replayWorker) Process(ctx context.Context, input *models.ReplayRequestI endTime := runTimes[treeNode.Runs.Size()-1].(time.Time) if err = w.scheduler.Clear(ctx, input.Project, jobName, startTime, endTime); err != nil { err = errors.Wrapf(err, "error while clearing dag runs for job %s", jobName) - logger.W(fmt.Sprintf("error while running replay %s: %s", replay.ID.String(), err.Error())) - err = w.replayRepo.UpdateStatus(replay.ID, models.ReplayStatusFailed, err.Error()) - if err != nil { - return err + logger.W(fmt.Sprintf("error while running replay %s: %s", input.ID.String(), err.Error())) + updateStatusErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusFailed, models.ReplayMessage{ + Status: models.ReplayStatusFailed, + Message: err.Error(), + }) + if updateStatusErr != nil { + return updateStatusErr } return err } } - err = w.replayRepo.UpdateStatus(replay.ID, models.ReplayStatusSuccess, MsgReplaySuccessfullyCompleted) + err = replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusSuccess, models.ReplayMessage{ + Status: models.ReplayStatusSuccess, + Message: MsgReplaySuccessfullyCompleted, + }) if err != nil { return err } - logger.I(fmt.Sprintf("successfully completed replay id: %s", replay.ID.String())) - bus.Post(EvtRecordInsertedInDB, replay.ID) + logger.I(fmt.Sprintf("successfully completed replay id: %s", input.ID.String())) + bus.Post(EvtRecordInsertedInDB, input.ID) return nil } -func NewReplayWorker(replayRepo models.ReplayRepository, scheduler models.SchedulerUnit) *replayWorker { - return &replayWorker{replayRepo: replayRepo, scheduler: scheduler} +func NewReplayWorker(replaySpecRepoFac ReplaySpecRepoFactory, scheduler models.SchedulerUnit) *replayWorker { + return &replayWorker{replaySpecRepoFac: replaySpecRepoFac, scheduler: scheduler} } diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go index 5f8aaf5c61..3528f50d77 100644 --- a/job/replay_worker_test.go +++ b/job/replay_worker_test.go @@ -2,9 +2,12 @@ package job_test import ( "context" + "strings" "testing" "time" + "github.com/odpf/optimus/core/logger" + "github.com/google/uuid" "github.com/odpf/optimus/job" "github.com/odpf/optimus/mock" @@ -14,27 +17,30 @@ import ( ) func TestReplayWorker(t *testing.T) { + dagStartTime, _ := time.Parse(job.ReplayDateFormat, "2020-04-05") startDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-22") endDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-26") currUUID := uuid.Must(uuid.NewRandom()) - replayRequest := &models.ReplayRequestInput{ - ID: currUUID, - Job: models.JobSpec{ - Name: "job-name", + dagRunStartTime := time.Date(2020, time.Month(8), 22, 2, 0, 0, 0, time.UTC) + dagRunEndTime := time.Date(2020, time.Month(8), 26, 2, 0, 0, 0, time.UTC) + jobSpec := models.JobSpec{ + Name: "job-name", + Schedule: models.JobSpecSchedule{ + StartDate: dagStartTime, + Interval: "0 2 * * *", }, + } + replayRequest := &models.ReplayRequestInput{ + ID: currUUID, + Job: jobSpec, Start: startDate, End: endDate, Project: models.ProjectSpec{ Name: "project-name", }, - DagSpecMap: make(map[string]models.JobSpec), - } - replaySpecToInsert := &models.ReplaySpec{ - ID: currUUID, - StartDate: startDate, - EndDate: endDate, - Status: models.ReplayStatusAccepted, - Job: replayRequest.Job, + DagSpecMap: map[string]models.JobSpec{ + "job-name": jobSpec, + }, } t.Run("Process", func(t *testing.T) { t.Run("should throw an error when replayRepo throws an error", func(t *testing.T) { @@ -42,13 +48,141 @@ func TestReplayWorker(t *testing.T) { replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) errMessage := "replay repo error" + message := models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: job.MsgReplayInProgress, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(errors.New(errMessage)) - replayRepository.On("Insert", replaySpecToInsert).Return(errors.New(errMessage)) + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) - worker := job.NewReplayWorker(replayRepository, nil) + worker := job.NewReplayWorker(replaySpecRepoFac, nil) err := worker.Process(ctx, replayRequest) assert.NotNil(t, err) assert.Equal(t, errMessage, err.Error()) }) + t.Run("should throw an error when scheduler throws an error", func(t *testing.T) { + logger.Init(logger.ERROR) + ctx := context.Background() + replayRepository := new(mock.ReplayRepository) + defer replayRepository.AssertExpectations(t) + inProgressReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: job.MsgReplayInProgress, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, inProgressReplayMessage).Return(nil) + errMessage := "error while clearing dag runs for job job-name: scheduler clear error" + failedReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusFailed, + Message: errMessage, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusFailed, failedReplayMessage).Return(nil) + + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) + + scheduler := new(mock.MockScheduler) + defer scheduler.AssertExpectations(t) + errorMessage := "scheduler clear error" + scheduler.On("Clear", ctx, replayRequest.Project, "job-name", dagRunStartTime, dagRunEndTime).Return(errors.New(errorMessage)) + + worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) + err := worker.Process(ctx, replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), errorMessage)) + }) + t.Run("should throw an error when updatestatus throws an error for failed request", func(t *testing.T) { + logger.Init(logger.ERROR) + ctx := context.Background() + replayRepository := new(mock.ReplayRepository) + defer replayRepository.AssertExpectations(t) + inProgressReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: job.MsgReplayInProgress, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, inProgressReplayMessage).Return(nil) + errMessage := "error while clearing dag runs for job job-name: scheduler clear error" + failedReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusFailed, + Message: errMessage, + } + updateStatusErr := errors.New("error while updating status to failed") + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusFailed, failedReplayMessage).Return(updateStatusErr) + + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) + + scheduler := new(mock.MockScheduler) + defer scheduler.AssertExpectations(t) + errorMessage := "scheduler clear error" + scheduler.On("Clear", ctx, replayRequest.Project, "job-name", dagRunStartTime, dagRunEndTime).Return(errors.New(errorMessage)) + + worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) + err := worker.Process(ctx, replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), updateStatusErr.Error())) + }) + + t.Run("should throw an error when updatestatus throws an error for successful request", func(t *testing.T) { + logger.Init(logger.ERROR) + ctx := context.Background() + replayRepository := new(mock.ReplayRepository) + defer replayRepository.AssertExpectations(t) + message := models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: job.MsgReplayInProgress, + } + successReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusSuccess, + Message: job.MsgReplaySuccessfullyCompleted, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(nil) + updateSuccessStatusErr := errors.New("error while updating replay request") + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, successReplayMessage).Return(updateSuccessStatusErr) + + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) + + scheduler := new(mock.MockScheduler) + defer scheduler.AssertExpectations(t) + scheduler.On("Clear", ctx, replayRequest.Project, "job-name", dagRunStartTime, dagRunEndTime).Return(nil) + + worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) + err := worker.Process(ctx, replayRequest) + assert.NotNil(t, err) + assert.True(t, strings.Contains(err.Error(), updateSuccessStatusErr.Error())) + }) + t.Run("should update replay status if successful", func(t *testing.T) { + logger.Init(logger.ERROR) + ctx := context.Background() + replayRepository := new(mock.ReplayRepository) + message := models.ReplayMessage{ + Status: models.ReplayStatusInProgress, + Message: job.MsgReplayInProgress, + } + successReplayMessage := models.ReplayMessage{ + Status: models.ReplayStatusSuccess, + Message: job.MsgReplaySuccessfullyCompleted, + } + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, successReplayMessage).Return(nil) + + replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) + defer replaySpecRepoFac.AssertExpectations(t) + replaySpecRepoFac.On("New", replayRequest.Job).Return(replayRepository) + + scheduler := new(mock.MockScheduler) + defer scheduler.AssertExpectations(t) + scheduler.On("Clear", ctx, replayRequest.Project, "job-name", dagRunStartTime, dagRunEndTime).Return(nil) + + worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) + err := worker.Process(ctx, replayRequest) + assert.Nil(t, err) + }) }) } diff --git a/job/service.go b/job/service.go index 4515b0ac58..20528eff68 100644 --- a/job/service.go +++ b/job/service.go @@ -52,6 +52,11 @@ type JobRepoFactory interface { New(context.Context, models.ProjectSpec) (store.JobRepository, error) } +// ReplaySpecRepoFactory is used to manage replay spec objects from store +type ReplaySpecRepoFactory interface { + New(jobSpec models.JobSpec) store.ReplaySpecRepository +} + // Service compiles all jobs with its dependencies, priority and // and other properties. Finally, it syncs the jobs with corresponding // store diff --git a/mock/replay.go b/mock/replay.go index a5e367235f..4cb8372715 100644 --- a/mock/replay.go +++ b/mock/replay.go @@ -1,8 +1,11 @@ package mock import ( + "context" + "github.com/google/uuid" "github.com/odpf/optimus/models" + "github.com/odpf/optimus/store" "github.com/stretchr/testify/mock" ) @@ -11,14 +14,45 @@ type ReplayRepository struct { } func (repo *ReplayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { - called := repo.Called(id) - return called.Get(0).(models.ReplaySpec), called.Error(1) + args := repo.Called(id) + return args.Get(0).(models.ReplaySpec), args.Error(1) } func (repo *ReplayRepository) Insert(replay *models.ReplaySpec) error { return repo.Called(replay).Error(0) } -func (repo *ReplayRepository) UpdateStatus(replayID uuid.UUID, status, message string) error { +func (repo *ReplayRepository) UpdateStatus(replayID uuid.UUID, status string, message models.ReplayMessage) error { return repo.Called(replayID, status, message).Error(0) } + +type ReplaySpecRepoFactory struct { + mock.Mock +} + +func (fac *ReplaySpecRepoFactory) New(jobSpec models.JobSpec) store.ReplaySpecRepository { + return fac.Called(jobSpec).Get(0).(store.ReplaySpecRepository) +} + +type ReplayManager struct { + mock.Mock +} + +func (rm *ReplayManager) Replay(reqInput *models.ReplayRequestInput) (string, error) { + args := rm.Called(reqInput) + return args.Get(0).(string), args.Error(1) +} + +func (rm *ReplayManager) Init() { + rm.Called() + return +} + +type ReplayWorker struct { + mock.Mock +} + +func (rm *ReplayWorker) Process(ctx context.Context, replayRequest *models.ReplayRequestInput) error { + args := rm.Called(ctx, replayRequest) + return args.Error(0) +} diff --git a/mock/scheduler.go b/mock/scheduler.go new file mode 100644 index 0000000000..641e6c70c1 --- /dev/null +++ b/mock/scheduler.go @@ -0,0 +1,43 @@ +package mock + +import ( + "context" + "time" + + "github.com/odpf/optimus/models" + "github.com/stretchr/testify/mock" +) + +type MockScheduler struct { + mock.Mock +} + +func (ms *MockScheduler) GetName() string { + return "" +} + +func (ms *MockScheduler) GetTemplate() []byte { + return []byte{} +} + +func (ms *MockScheduler) GetJobsDir() string { + return "" +} + +func (ms *MockScheduler) GetJobsExtension() string { + return "" +} + +func (ms *MockScheduler) Bootstrap(ctx context.Context, projectSpec models.ProjectSpec) error { + return ms.Called(ctx, projectSpec).Error(0) +} + +func (ms *MockScheduler) GetJobStatus(ctx context.Context, projSpec models.ProjectSpec, jobName string) ([]models.JobStatus, error) { + args := ms.Called(ctx, projSpec, jobName) + return args.Get(0).([]models.JobStatus), args.Error(1) +} + +func (ms *MockScheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobName string, startDate, endDate time.Time) error { + args := ms.Called(ctx, projSpec, jobName, startDate, endDate) + return args.Error(0) +} diff --git a/mock/uuid.go b/mock/uuid.go new file mode 100644 index 0000000000..62b6648cb5 --- /dev/null +++ b/mock/uuid.go @@ -0,0 +1,15 @@ +package mock + +import ( + "github.com/google/uuid" + "github.com/stretchr/testify/mock" +) + +type UUIDProvider struct { + mock.Mock +} + +func (up *UUIDProvider) NewUUID() (uuid.UUID, error) { + args := up.Called() + return args.Get(0).(uuid.UUID), args.Error(1) +} diff --git a/models/replay.go b/models/replay.go index de4f94f605..39dcb474b9 100644 --- a/models/replay.go +++ b/models/replay.go @@ -8,12 +8,18 @@ import ( const ( // ReplayStatusAccepted worker picked up the request - ReplayStatusAccepted = "Accepted" + ReplayStatusAccepted = "accepted" + ReplayStatusInProgress = "inprogress" // ReplayStatusFailed worker fail while processing the replay request - ReplayStatusFailed = "Failed" // end state - ReplayStatusSuccess = "Success" // end state + ReplayStatusFailed = "failed" // end state + ReplayStatusSuccess = "success" // end state ) +type ReplayMessage struct { + Status string + Message string +} + type ReplayRequestInput struct { ID uuid.UUID Job JobSpec @@ -29,16 +35,5 @@ type ReplaySpec struct { StartDate time.Time EndDate time.Time Status string - Message string - CommitID string -} - -type Syncer interface { - SyncReplayStatusWithAirflow(ReplaySpec) error -} - -type ReplayRepository interface { - Insert(replay *ReplaySpec) error - GetByID(id uuid.UUID) (ReplaySpec, error) - UpdateStatus(replayID uuid.UUID, status, message string) error + Message ReplayMessage } diff --git a/store/postgres/migrations/000010_create_replay_table.up.sql b/store/postgres/migrations/000010_create_replay_table.up.sql index ee5ec8b3e1..332d5728e1 100644 --- a/store/postgres/migrations/000010_create_replay_table.up.sql +++ b/store/postgres/migrations/000010_create_replay_table.up.sql @@ -5,8 +5,7 @@ CREATE TABLE IF NOT EXISTS replay ( start_date TIMESTAMP WITH TIME ZONE NOT NULL, end_date TIMESTAMP WITH TIME ZONE NOT NULL, status varchar(20) NOT NULL, - commit_id varchar(20), - message TEXT, + message JSONB, created_at TIMESTAMP WITH TIME ZONE NOT NULL, updated_at TIMESTAMP WITH TIME ZONE NOT NULL ); diff --git a/store/postgres/replay_repository.go b/store/postgres/replay_repository.go index dbc35340fa..418ce6b545 100644 --- a/store/postgres/replay_repository.go +++ b/store/postgres/replay_repository.go @@ -1,9 +1,12 @@ package postgres import ( + "encoding/json" "errors" "time" + "gorm.io/datatypes" + "github.com/google/uuid" "github.com/jinzhu/gorm" "github.com/odpf/optimus/models" @@ -19,43 +22,51 @@ type Replay struct { StartDate time.Time `gorm:"not null"` EndDate time.Time `gorm:"not null"` Status string `gorm:"not null"` - Message string - CommitID string + Message datatypes.JSON CreatedAt time.Time `gorm:"not null" json:"created_at"` UpdatedAt time.Time `gorm:"not null" json:"updated_at"` } func (p Replay) FromSpec(spec *models.ReplaySpec) (Replay, error) { + jsonBytes, err := json.Marshal(spec.Message) + if err != nil { + return Replay{}, nil + } return Replay{ ID: spec.ID, JobID: spec.Job.ID, StartDate: spec.StartDate, EndDate: spec.EndDate, Status: spec.Status, - CommitID: spec.CommitID, - Message: spec.Message, + Message: jsonBytes, }, nil } -func (p Replay) ToSpec() (models.ReplaySpec, error) { +func (p Replay) ToSpec(jobSpec models.JobSpec) (models.ReplaySpec, error) { + message := models.ReplayMessage{} + if err := json.Unmarshal(p.Message, &message); err != nil { + return models.ReplaySpec{}, nil + } return models.ReplaySpec{ ID: p.ID, + Job: jobSpec, Status: p.Status, StartDate: p.StartDate, EndDate: p.EndDate, - Message: p.Message, - CommitID: p.CommitID, + Message: message, }, nil } type replayRepository struct { - DB *gorm.DB + DB *gorm.DB + jobSpec models.JobSpec } -func NewReplayRepository(db *gorm.DB) *replayRepository { +func NewReplayRepository(db *gorm.DB, jobSpec models.JobSpec) *replayRepository { return &replayRepository{ - DB: db, + DB: db, + jobSpec: jobSpec, } } @@ -75,17 +86,21 @@ func (repo *replayRepository) GetByID(id uuid.UUID) (models.ReplaySpec, error) { } return models.ReplaySpec{}, err } - return r.ToSpec() + return r.ToSpec(repo.jobSpec) } -func (repo *replayRepository) UpdateStatus(replayID uuid.UUID, status, message string) error { +func (repo *replayRepository) UpdateStatus(replayID uuid.UUID, status string, message models.ReplayMessage) error { var r Replay if err := repo.DB.Where("id = ?", replayID).Find(&r).Error; err != nil { return errors.New("could not update non-existing replay") } + jsonBytes, err := json.Marshal(message) + if err != nil { + return err + } r.Status = status r.UpdatedAt = time.Now() - r.Message = message + r.Message = jsonBytes if err := repo.DB.Save(&r).Error; err != nil { return err } diff --git a/store/postgres/replay_repository_test.go b/store/postgres/replay_repository_test.go index 32cf307d6c..f4729dff4c 100644 --- a/store/postgres/replay_repository_test.go +++ b/store/postgres/replay_repository_test.go @@ -55,13 +55,13 @@ func TestReplayRepository(t *testing.T) { }, } - t.Run("Insert", func(t *testing.T) { + t.Run("Insert and GetByID", func(t *testing.T) { db := DBSetup() defer db.Close() testModels := []*models.ReplaySpec{} testModels = append(testModels, testConfigs...) - repo := NewReplayRepository(db) + repo := NewReplayRepository(db, jobSpec) err := repo.Insert(testModels[0]) assert.Nil(t, err) @@ -70,4 +70,29 @@ func TestReplayRepository(t *testing.T) { assert.Nil(t, err) assert.Equal(t, uuid, checkModel.ID) }) + + t.Run("UpdateStatus", func(t *testing.T) { + db := DBSetup() + defer db.Close() + testModels := []*models.ReplaySpec{} + testModels = append(testModels, testConfigs...) + + repo := NewReplayRepository(db, jobSpec) + + err := repo.Insert(testModels[0]) + assert.Nil(t, err) + + errMessage := "failed to execute" + replayMessage := models.ReplayMessage{ + Status: models.ReplayStatusFailed, + Message: errMessage, + } + err = repo.UpdateStatus(uuid, models.ReplayStatusFailed, replayMessage) + assert.Nil(t, err) + + checkModel, err := repo.GetByID(testModels[0].ID) + assert.Nil(t, err) + assert.Equal(t, models.ReplayStatusFailed, checkModel.Status) + assert.Equal(t, errMessage, checkModel.Message.Message) + }) } diff --git a/store/store.go b/store/store.go index 3294c29a16..de0183001f 100644 --- a/store/store.go +++ b/store/store.go @@ -6,6 +6,8 @@ import ( "io" "time" + "github.com/google/uuid" + "github.com/odpf/optimus/models" ) @@ -85,3 +87,10 @@ type ObjectWriter interface { type ObjectReader interface { NewReader(bucket, path string) (io.ReadCloser, error) } + +// ReplaySpecRepository represents a storage interface for replay objects +type ReplaySpecRepository interface { + Insert(replay *models.ReplaySpec) error + GetByID(id uuid.UUID) (models.ReplaySpec, error) + UpdateStatus(replayID uuid.UUID, status string, message models.ReplayMessage) error +} diff --git a/utils/uuid.go b/utils/uuid.go new file mode 100644 index 0000000000..63cd76a009 --- /dev/null +++ b/utils/uuid.go @@ -0,0 +1,18 @@ +package utils + +import "github.com/google/uuid" + +type UUIDProvider interface { + NewUUID() (uuid.UUID, error) +} + +type uuidProvider struct { +} + +func (*uuidProvider) NewUUID() (uuid.UUID, error) { + return uuid.NewRandom() +} + +func NewUUIDProvider() *uuidProvider { + return &uuidProvider{} +} From 20c45165628c0c2df2f0301dcfe3d71501cbae79 Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Tue, 29 Jun 2021 17:24:25 +0530 Subject: [PATCH 4/6] fix: incorporate feedback changes --- Makefile | 2 +- api/handler/v1/runtime.go | 20 ++--- api/handler/v1/runtime_test.go | 4 +- api/proto/odpf/optimus/runtime_service.pb.go | 2 +- .../odpf/optimus/runtime_service.pb.gw.go | 4 +- cmd/replay.go | 56 ++++++------- cmd/server/server.go | 8 +- config/config.go | 11 ++- core/bus/bus.go | 75 ----------------- core/tree/tree_node.go | 15 +++- core/tree/tree_node_test.go | 23 +++--- ext/scheduler/airflow/airflow.go | 14 ++-- job/replay.go | 14 ++-- job/replay_manager.go | 81 ++++++++----------- job/replay_manager_test.go | 12 ++- job/replay_test.go | 16 ++-- job/replay_worker.go | 47 +++-------- job/replay_worker_test.go | 48 +++-------- mock/job.go | 4 +- mock/replay.go | 4 +- models/job.go | 4 +- models/replay.go | 4 +- .../000010_create_replay_table.up.sql | 2 +- store/postgres/replay_repository.go | 6 +- store/postgres/replay_repository_test.go | 2 +- .../odpf/optimus/runtime_service.swagger.json | 20 +---- 26 files changed, 173 insertions(+), 325 deletions(-) delete mode 100644 core/bus/bus.go diff --git a/Makefile b/Makefile index 83dc2eb74b..6e13fe0c24 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ pack-files: generate-proto: ## regenerate protos @echo " > cloning protobuf from odpf/proton" @rm -rf proton/ - @git -c advice.detachedHead=false clone https://github.com/odpf/proton --depth 1 --quiet --branch DBTCH-1024 + @git -c advice.detachedHead=false clone https://github.com/odpf/proton --depth 1 --quiet --branch main @echo " > generating protobuf" @echo " > info: make sure correct version of dependencies are installed using 'install'" @buf generate diff --git a/api/handler/v1/runtime.go b/api/handler/v1/runtime.go index 2b64a93b0e..611afcee2a 100644 --- a/api/handler/v1/runtime.go +++ b/api/handler/v1/runtime.go @@ -728,19 +728,19 @@ func (sv *RuntimeServiceServer) ListResourceSpecification(ctx context.Context, r } func (sv *RuntimeServiceServer) ReplayDryRun(ctx context.Context, req *pb.ReplayRequest) (*pb.ReplayDryRunResponse, error) { - replayRequestInput, err := sv.parseReplayRequest(req) + replayWorkerRequest, err := sv.parseReplayRequest(req) if err != nil { - return nil, err + return nil, status.Error(codes.Internal, fmt.Sprintf("error while parsing replay dry run request: %v", err)) } - rootNode, err := sv.jobSvc.ReplayDryRun(replayRequestInput) + rootNode, err := sv.jobSvc.ReplayDryRun(replayWorkerRequest) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay dry run: %v", err)) } node, err := sv.adapter.ToReplayExecutionTreeNode(rootNode) if err != nil { - return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) + return nil, status.Error(codes.Internal, fmt.Sprintf("error while preparing replay dry run response: %v", err)) } return &pb.ReplayDryRunResponse{ Success: true, @@ -749,12 +749,12 @@ func (sv *RuntimeServiceServer) ReplayDryRun(ctx context.Context, req *pb.Replay } func (sv *RuntimeServiceServer) Replay(ctx context.Context, req *pb.ReplayRequest) (*pb.ReplayResponse, error) { - replayRequestInput, err := sv.parseReplayRequest(req) + replayWorkerRequest, err := sv.parseReplayRequest(req) if err != nil { - return nil, err + return nil, status.Error(codes.Internal, fmt.Sprintf("error while parsing replay request: %v", err)) } - replayUUID, err := sv.jobSvc.Replay(replayRequestInput) + replayUUID, err := sv.jobSvc.Replay(replayWorkerRequest) if err != nil { return nil, status.Error(codes.Internal, fmt.Sprintf("error while processing replay: %v", err)) } @@ -764,7 +764,7 @@ func (sv *RuntimeServiceServer) Replay(ctx context.Context, req *pb.ReplayReques }, nil } -func (sv *RuntimeServiceServer) parseReplayRequest(req *pb.ReplayRequest) (*models.ReplayRequestInput, error) { +func (sv *RuntimeServiceServer) parseReplayRequest(req *pb.ReplayRequest) (*models.ReplayWorkerRequest, error) { projectRepo := sv.projectRepoFactory.New() projSpec, err := projectRepo.GetByName(req.GetProjectName()) if err != nil { @@ -797,7 +797,7 @@ func (sv *RuntimeServiceServer) parseReplayRequest(req *pb.ReplayRequest) (*mode if endDate.Before(startDate) { return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("replay end date cannot be before start date")) } - replayRequest := models.ReplayRequestInput{ + replayRequest := models.ReplayWorkerRequest{ Job: jobSpec, Start: startDate, End: endDate, diff --git a/api/handler/v1/runtime_test.go b/api/handler/v1/runtime_test.go index cf7fd7ec98..859cf3cf18 100644 --- a/api/handler/v1/runtime_test.go +++ b/api/handler/v1/runtime_test.go @@ -1479,7 +1479,7 @@ func TestRuntimeServiceServer(t *testing.T) { }, }), } - replayRequestInput := &models.ReplayRequestInput{ + replayWorkerRequest := &models.ReplayWorkerRequest{ Job: jobSpec, Start: startDate, End: endDate, @@ -1489,7 +1489,7 @@ func TestRuntimeServiceServer(t *testing.T) { jobService := new(mock.JobService) jobService.On("GetByName", jobName, namespaceSpec).Return(jobSpec, nil) - jobService.On("ReplayDryRun", replayRequestInput).Return(dagNode, nil) + jobService.On("ReplayDryRun", replayWorkerRequest).Return(dagNode, nil) defer jobService.AssertExpectations(t) projectRepository := new(mock.ProjectRepository) diff --git a/api/proto/odpf/optimus/runtime_service.pb.go b/api/proto/odpf/optimus/runtime_service.pb.go index 041376a69c..a355c92c5e 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.go +++ b/api/proto/odpf/optimus/runtime_service.pb.go @@ -4745,7 +4745,7 @@ var file_odpf_optimus_runtime_service_proto_rawDesc = []byte{ 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x6f, 0x64, 0x70, 0x66, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x36, 0x12, 0x34, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x36, 0x22, 0x34, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x6a, 0x6f, 0x62, 0x2f, 0x7b, 0x6a, 0x6f, 0x62, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x7d, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x79, 0x42, 0x70, 0x0a, 0x16, 0x69, 0x6f, 0x2e, 0x6f, 0x64, diff --git a/api/proto/odpf/optimus/runtime_service.pb.gw.go b/api/proto/odpf/optimus/runtime_service.pb.gw.go index 43391d7374..c4c8401a8d 100644 --- a/api/proto/odpf/optimus/runtime_service.pb.gw.go +++ b/api/proto/odpf/optimus/runtime_service.pb.gw.go @@ -2107,7 +2107,7 @@ func RegisterRuntimeServiceHandlerServer(ctx context.Context, mux *runtime.Serve }) - mux.Handle("GET", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream @@ -2571,7 +2571,7 @@ func RegisterRuntimeServiceHandlerClient(ctx context.Context, mux *runtime.Serve }) - mux.Handle("GET", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_RuntimeService_Replay_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) diff --git a/cmd/replay.go b/cmd/replay.go index b54d0e2895..a1f175dea6 100644 --- a/cmd/replay.go +++ b/cmd/replay.go @@ -19,6 +19,10 @@ import ( "google.golang.org/grpc" ) +var ( + replayTimeout = time.Minute * 1 +) + type taskRunBlock struct { name string height int @@ -37,8 +41,8 @@ func taskRunBlockComperator(a, b interface{}) int { return strings.Compare(aAsserted.name, bAsserted.name) } -//formatRunsPerDAGInstance returns a hashmap with DAG -> Runs[] mapping -func formatRunsPerDAGInstance(instance *pb.ReplayExecutionTreeNode, taskReruns map[string]taskRunBlock, height int) { +//formatRunsPerJobInstance returns a hashmap with Job -> Runs[] mapping +func formatRunsPerJobInstance(instance *pb.ReplayExecutionTreeNode, taskReruns map[string]taskRunBlock, height int) { if _, ok := taskReruns[instance.JobName]; !ok { taskReruns[instance.JobName] = taskRunBlock{ name: instance.JobName, @@ -51,7 +55,7 @@ func formatRunsPerDAGInstance(instance *pb.ReplayExecutionTreeNode, taskReruns m taskReruns[instance.JobName].runs.Add(taskRun.AsTime()) } for _, child := range instance.Dependents { - formatRunsPerDAGInstance(child, taskReruns, height+1) + formatRunsPerJobInstance(child, taskReruns, height+1) } } @@ -111,23 +115,16 @@ ReplayDryRun date ranges are inclusive. return nil } - answers := map[string]interface{}{} - questions := []*survey.Question{ - { - Name: "ProceedReplay", - Prompt: &survey.Select{ - - Message: "Proceed with replay?", - Options: []string{"Yes", "No"}, - Default: "Yes", - }, - }, - } - - if err := survey.Ask(questions, &answers); err != nil { + proceedWithReplay := "Yes" + if err := survey.AskOne(&survey.Select{ + Message: "Proceed with replay?", + Options: []string{"Yes", "No"}, + Default: "Yes", + }, &proceedWithReplay); err != nil { return err } - if option, ok := answers["ProceedReplay"]; ok && option.(survey.OptionAnswer).Value == "No" { + + if proceedWithReplay == "No" { l.Println("aborting...") return nil } @@ -155,12 +152,11 @@ func printReplayExecutionTree(l logger, projectName, namespace, jobName, startDa } defer conn.Close() - dumpTimeoutCtx, dumpCancel := context.WithTimeout(context.Background(), renderTimeout) - defer dumpCancel() + replayRequestTimeout, replayRequestCancel := context.WithTimeout(context.Background(), replayTimeout) + defer replayRequestCancel() l.Println("please wait...") runtime := pb.NewRuntimeServiceClient(conn) - // fetch compiled JobSpec by calling the optimus API replayRequest := &pb.ReplayRequest{ ProjectName: projectName, JobName: jobName, @@ -168,10 +164,10 @@ func printReplayExecutionTree(l logger, projectName, namespace, jobName, startDa StartDate: startDate, EndDate: endDate, } - replayDryRunResponse, err := runtime.ReplayDryRun(dumpTimeoutCtx, replayRequest) + replayDryRunResponse, err := runtime.ReplayDryRun(replayRequestTimeout, replayRequest) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - l.Println("render process took too long, timing out") + l.Println("replay dry run took too long, timing out") } return errors.Wrapf(err, "request failed for job %s", jobName) } @@ -187,12 +183,11 @@ func printReplayDryRunResponse(l logger, replayRequest *pb.ReplayRequest, replay table.SetBorder(false) table.SetHeader([]string{ "Index", - "DAG", + "Job", "Run", }) - // generate basic details taskRerunsMap := make(map[string]taskRunBlock) - formatRunsPerDAGInstance(replayDryRunResponse.Response, taskRerunsMap, 0) + formatRunsPerJobInstance(replayDryRunResponse.Response, taskRerunsMap, 0) //sort run block taskRerunsSorted := set.NewTreeSetWith(taskRunBlockComperator) @@ -245,12 +240,11 @@ func runReplayRequest(l logger, projectName, namespace, jobName, startDate, endD } defer conn.Close() - dumpTimeoutCtx, dumpCancel := context.WithTimeout(context.Background(), renderTimeout) - defer dumpCancel() + replayRequestTimeout, replayRequestCancel := context.WithTimeout(context.Background(), replayTimeout) + defer replayRequestCancel() l.Println("firing the replay request...") runtime := pb.NewRuntimeServiceClient(conn) - // fetch compiled JobSpec by calling the optimus API replayRequest := &pb.ReplayRequest{ ProjectName: projectName, JobName: jobName, @@ -258,10 +252,10 @@ func runReplayRequest(l logger, projectName, namespace, jobName, startDate, endD StartDate: startDate, EndDate: endDate, } - replayResponse, err := runtime.Replay(dumpTimeoutCtx, replayRequest) + replayResponse, err := runtime.Replay(replayRequestTimeout, replayRequest) if err != nil { if errors.Is(err, context.DeadlineExceeded) { - l.Println("render process took too long, timing out") + l.Println("replay request took too long, timing out") } return "", errors.Wrapf(err, "request failed for job %s", jobName) } diff --git a/cmd/server/server.go b/cmd/server/server.go index 7915a3cd72..295322bd6b 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -396,7 +396,10 @@ func Initialize(conf config.Provider) error { jobSpecRepoFac: jobSpecRepoFac, } replayWorker := job.NewReplayWorker(replaySpecRepoFac, models.Scheduler) - replayManager := job.NewManager(replayWorker, replaySpecRepoFac, utils.NewUUIDProvider(), conf.GetServe().JobQueueSize) + replayManager := job.NewManager(replayWorker, replaySpecRepoFac, utils.NewUUIDProvider(), job.ReplayManagerConfig{ + QueueSize: conf.GetServe().ReplayJobQueueSize, + WorkerTimeout: conf.GetServe().ReplayWorkerTimeout, + }) // runtime service instance over grpc pb.RegisterRuntimeServiceServer(grpcServer, v1handler.NewRuntimeServiceServer( @@ -485,8 +488,7 @@ func Initialize(conf config.Provider) error { // Block until we receive our signal. <-termChan mainLog.Info("termination request received") - err = replayManager.Close() - if err != nil { + if err = replayManager.Close(); err != nil { return err } diff --git a/config/config.go b/config/config.go index d5ecf9af31..aabc0dd9dd 100644 --- a/config/config.go +++ b/config/config.go @@ -32,6 +32,8 @@ var ( KeyServeMetadataKafkaBrokers = "serve.metadata.kafka_brokers" KeyServeMetadataKafkaJobTopic = "serve.metadata.kafka_job_topic" KeyServeMetadataKafkaBatchSize = "serve.metadata.kafka_batch_size" + KeyServeReplayJobQueueSize = "serve.replay_job_queue_size" + KeyServeReplayWorkerTimeout = "serve.replay_worker_timeout" KeySchedulerName = "scheduler.name" @@ -96,9 +98,10 @@ type ServerConfig struct { // random 32 character hash used for encrypting secrets AppKey string `yaml:"app_key"` - DB DBConfig `yaml:"db"` - Metadata MetadataConfig `yaml:"metadata"` - JobQueueSize int `yaml:"job_queue_size"` + DB DBConfig `yaml:"db"` + Metadata MetadataConfig `yaml:"metadata"` + ReplayJobQueueSize int `yaml:"replay_job_queue_size"` + ReplayWorkerTimeout int `yaml:"replay_worker_timeout"` } type DBConfig struct { @@ -186,6 +189,8 @@ func (o Optimus) GetServe() ServerConfig { KafkaBrokers: o.eKs(KeyServeMetadataKafkaBrokers), KafkaBatchSize: o.eKi(KeyServeMetadataKafkaBatchSize), }, + ReplayJobQueueSize: o.k.Int(KeyServeReplayJobQueueSize), + ReplayWorkerTimeout: o.k.Int(KeyServeReplayWorkerTimeout), } } diff --git a/core/bus/bus.go b/core/bus/bus.go deleted file mode 100644 index 6519950968..0000000000 --- a/core/bus/bus.go +++ /dev/null @@ -1,75 +0,0 @@ -package bus - -// allows independent components of an application to -// observe events produced by decoupled producers -// -// producer of "someevent" -// bus.Post("someevent", "data") -// -// observer of "someevent" -// myChan := make(chan string) -// bus.Listen("someevent", myChan) -// for { -// data := <-myChan -// fmt.Printf("someevent: %s", data) -// } -// -// make sure these events are unique - -import ( - "errors" - "sync" -) - -var ( - ErrNotFound = errors.New("not found") -) - -var ( - // mapping of event to listening channels - eventBus = make(map[string][]chan<- interface{}) - rwMutex sync.RWMutex -) - -// Listen observing the specified event via provided channel -func Listen(event string, out chan interface{}) { - rwMutex.Lock() - defer rwMutex.Unlock() - eventBus[event] = append(eventBus[event], out) -} - -// Stop observing the specified event on the channel -func Stop(event string, out chan interface{}) error { - rwMutex.Lock() - defer rwMutex.Unlock() - - newEventBus := make([]chan<- interface{}, 0) - outChans, ok := eventBus[event] - if !ok { - return ErrNotFound - } - for _, ch := range outChans { - if ch != out { - newEventBus = append(newEventBus, ch) - } - } - eventBus[event] = newEventBus - - return nil -} - -// Post a notification to the specified event -func Post(event string, data interface{}) error { - rwMutex.RLock() - defer rwMutex.RUnlock() - - if listeners, ok := eventBus[event]; ok { - //push this to all listeners - for _, out := range listeners { - out <- data - } - } else { - return ErrNotFound - } - return nil -} diff --git a/core/tree/tree_node.go b/core/tree/tree_node.go index 3fdb7e5584..1d18141e33 100644 --- a/core/tree/tree_node.go +++ b/core/tree/tree_node.go @@ -15,11 +15,18 @@ type TreeNode struct { Runs set.Set } -func (t *TreeNode) GetAllNodes(allNodes map[string]*TreeNode) { - allNodes[t.Data.GetName()] = t - for _, dep := range t.Dependents { - dep.GetAllNodes(allNodes) +// GetAllNodes returns level order traversal of tree starting from current node +func (t *TreeNode) GetAllNodes() []*TreeNode { + allNodes := make([]*TreeNode, 0) + nodesQueue := make([]*TreeNode, 0) + nodesQueue = append(nodesQueue, t) + for len(nodesQueue) != 0 { + topNode := nodesQueue[0] + nodesQueue = nodesQueue[1:] + allNodes = append(allNodes, topNode) + nodesQueue = append(nodesQueue, topNode.Dependents...) } + return allNodes } func (t *TreeNode) GetName() string { diff --git a/core/tree/tree_node_test.go b/core/tree/tree_node_test.go index ffadec884e..c411dc9100 100644 --- a/core/tree/tree_node_test.go +++ b/core/tree/tree_node_test.go @@ -27,22 +27,27 @@ func TestDagNode(t *testing.T) { t.Run("GetAllNodes", func(t *testing.T) { treeNode := tree.TreeNode{ Data: models.JobSpec{ - Name: "parent-job", + Name: "job-level-0", }, Dependents: []*tree.TreeNode{ { Data: models.JobSpec{ - Name: "child-job", + Name: "job-level-1", + }, + Dependents: []*tree.TreeNode{ + { + Data: models.JobSpec{ + Name: "job-level-2", + }, + }, }, }, }, } - nodesMap := make(map[string]*tree.TreeNode) - treeNode.GetAllNodes(nodesMap) - assert.Equal(t, 2, len(nodesMap)) - _, parentNodeFound := nodesMap["parent-job"] - assert.True(t, parentNodeFound) - _, childNodeFound := nodesMap["child-job"] - assert.True(t, childNodeFound) + allNodes := treeNode.GetAllNodes() + assert.Equal(t, 3, len(allNodes)) + assert.Equal(t, "job-level-0", allNodes[0].Data.GetName()) + assert.Equal(t, "job-level-1", allNodes[1].Data.GetName()) + assert.Equal(t, "job-level-2", allNodes[2].Data.GetName()) }) } diff --git a/ext/scheduler/airflow/airflow.go b/ext/scheduler/airflow/airflow.go index 963a46de4d..7b8f552147 100644 --- a/ext/scheduler/airflow/airflow.go +++ b/ext/scheduler/airflow/airflow.go @@ -181,24 +181,23 @@ func (a *scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobN schdHost = strings.Trim(schdHost, "/") airflowDateFormat := "2006-01-02T15:04:05" utcTimezone, _ := time.LoadLocation("UTC") - fetchURL := fmt.Sprintf( + clearDagRunURL := fmt.Sprintf( fmt.Sprintf("%s/%s", schdHost, dagRunClearURL), jobName, startDate.In(utcTimezone).Format(airflowDateFormat), endDate.In(utcTimezone).Format(airflowDateFormat)) - request, err := http.NewRequest(http.MethodGet, fetchURL, nil) + request, err := http.NewRequest(http.MethodGet, clearDagRunURL, nil) if err != nil { - return errors.Wrapf(err, "failed to build http request for %s", fetchURL) + return errors.Wrapf(err, "failed to build http request for %s", clearDagRunURL) } resp, err := a.httpClient.Do(request) if err != nil { - return errors.Wrapf(err, "failed to clear airflow dag runs from %s", fetchURL) + return errors.Wrapf(err, "failed to clear airflow dag runs from %s", clearDagRunURL) } if resp.StatusCode != http.StatusOK { - return errors.Errorf("failed to clear airflow dag runs from %s", fetchURL) + return errors.Errorf("failed to clear airflow dag runs from %s", clearDagRunURL) } - defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { @@ -217,8 +216,7 @@ func (a *scheduler) Clear(ctx context.Context, projSpec models.ProjectSpec, jobN responseFields := []string{"http_response_code", "status"} for _, field := range responseFields { - _, ok := responseJSON[field] - if !ok { + if _, ok := responseJSON[field]; !ok { return errors.Errorf("failed to find required response fields %s in %s", field, responseJSON) } } diff --git a/job/replay.go b/job/replay.go index 0c3287f1e8..3e36d723dc 100644 --- a/job/replay.go +++ b/job/replay.go @@ -15,7 +15,7 @@ const ( ReplayDateFormat = "2006-01-02" ) -func (srv *Service) populateRequestWithJobSpecs(replayRequest *models.ReplayRequestInput) error { +func (srv *Service) populateRequestWithJobSpecs(replayRequest *models.ReplayWorkerRequest) error { projectJobSpecRepo := srv.projectJobSpecRepoFactory.New(replayRequest.Project) jobSpecs, err := srv.getDependencyResolvedSpecs(replayRequest.Project, projectJobSpecRepo, nil) if err != nil { @@ -29,9 +29,8 @@ func (srv *Service) populateRequestWithJobSpecs(replayRequest *models.ReplayRequ return nil } -func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { - err := srv.populateRequestWithJobSpecs(replayRequest) - if err != nil { +func (srv *Service) ReplayDryRun(replayRequest *models.ReplayWorkerRequest) (*tree.TreeNode, error) { + if err := srv.populateRequestWithJobSpecs(replayRequest); err != nil { return nil, err } @@ -43,9 +42,8 @@ func (srv *Service) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tre return rootInstance, nil } -func (srv *Service) Replay(replayRequest *models.ReplayRequestInput) (string, error) { - err := srv.populateRequestWithJobSpecs(replayRequest) - if err != nil { +func (srv *Service) Replay(replayRequest *models.ReplayWorkerRequest) (string, error) { + if err := srv.populateRequestWithJobSpecs(replayRequest); err != nil { return "", err } @@ -57,7 +55,7 @@ func (srv *Service) Replay(replayRequest *models.ReplayRequestInput) (string, er } // prepareTree creates a execution tree for replay operation -func prepareTree(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { +func prepareTree(replayRequest *models.ReplayWorkerRequest) (*tree.TreeNode, error) { replayJobSpec, found := replayRequest.DagSpecMap[replayRequest.Job.Name] if !found { return nil, fmt.Errorf("couldn't find any job with name %s", replayRequest.Job.Name) diff --git a/job/replay_manager.go b/job/replay_manager.go index 0f5e18bf84..5867197367 100644 --- a/job/replay_manager.go +++ b/job/replay_manager.go @@ -3,11 +3,11 @@ package job import ( "context" "sync" + "time" "github.com/odpf/optimus/utils" "github.com/google/uuid" - "github.com/odpf/optimus/core/bus" "github.com/odpf/optimus/core/logger" "github.com/odpf/optimus/models" "github.com/pkg/errors" @@ -19,9 +19,14 @@ var ( ErrRequestQueueFull = errors.New("request queue is full") ) +type ReplayManagerConfig struct { + QueueSize int + WorkerTimeout int +} + type ReplayManager interface { Init() - Replay(*models.ReplayRequestInput) (string, error) + Replay(*models.ReplayWorkerRequest) (string, error) } // Manager for replaying operation(s). @@ -36,16 +41,14 @@ type Manager struct { mu sync.Mutex uuidProvider utils.UUIDProvider + config ReplayManagerConfig // request queue, used by workers - requestQ chan *models.ReplayRequestInput + requestQ chan *models.ReplayWorkerRequest // request map, used for verifying if a request is // in queue without actually consuming it requestMap map[uuid.UUID]bool - //listen for replay requests inserted in db - clearRequestMapListener chan interface{} - //request worker replayWorker ReplayWorker replaySpecRepoFac ReplaySpecRepoFactory @@ -53,7 +56,7 @@ type Manager struct { // Replay a request asynchronously, returns a replay id that can // can be used to query its status -func (m *Manager) Replay(reqInput *models.ReplayRequestInput) (string, error) { +func (m *Manager) Replay(reqInput *models.ReplayWorkerRequest) (string, error) { uuidOb, err := m.uuidProvider.NewUUID() if err != nil { return "", err @@ -91,19 +94,20 @@ func (m *Manager) Replay(reqInput *models.ReplayRequestInput) (string, error) { // start a worker goroutine that runs the deployment pipeline in background func (m *Manager) spawnServiceWorker() { + defer m.wg.Done() m.wg.Add(1) - go func() { - defer m.wg.Done() - for reqInput := range m.requestQ { - logger.I("worker picked up the request for ", reqInput.Job.Name) - ctx := context.Background() - - if err := m.replayWorker.Process(ctx, reqInput); err != nil { - //do something about this error - logger.E(errors.Wrap(err, "worker failed to process")) - } + + for reqInput := range m.requestQ { + logger.I("worker picked up the request for ", reqInput.Job.Name) + ctx, cancelCtx := context.WithTimeout(context.Background(), time.Millisecond*time.Duration(m.config.WorkerTimeout)) + + if err := m.replayWorker.Process(ctx, reqInput); err != nil { + //do something about this error + logger.E(errors.Wrap(err, "worker failed to process")) + cancelCtx() } - }() + cancelCtx() + } } //Close stops consuming any new request @@ -116,46 +120,25 @@ func (m *Manager) Close() error { //wait for request worker to finish m.wg.Wait() - _ = bus.Stop(EvtRecordInsertedInDB, m.clearRequestMapListener) - _ = bus.Stop(EvtFailedToPrepareForReplay, m.clearRequestMapListener) - if m.clearRequestMapListener != nil { - close(m.clearRequestMapListener) - } return nil } func (m *Manager) Init() { logger.I("starting replay workers") - m.spawnServiceWorker() - - // listen for replay request being inserted in db - bus.Listen(EvtRecordInsertedInDB, m.clearRequestMapListener) - // listen when replay failed to even prepare to start - bus.Listen(EvtFailedToPrepareForReplay, m.clearRequestMapListener) - go func() { - for { - raw, ok := <-m.clearRequestMapListener - if !ok { - return - } - - ID := raw.(uuid.UUID) - m.mu.Lock() - delete(m.requestMap, ID) - m.mu.Unlock() - } - }() + for i := 0; i < m.config.QueueSize; i++ { + go m.spawnServiceWorker() + } } // NewManager constructs a new instance of Manager -func NewManager(worker ReplayWorker, replaySpecRepoFac ReplaySpecRepoFactory, uuidProvider utils.UUIDProvider, size int) *Manager { +func NewManager(worker ReplayWorker, replaySpecRepoFac ReplaySpecRepoFactory, uuidProvider utils.UUIDProvider, config ReplayManagerConfig) *Manager { mgr := &Manager{ - replayWorker: worker, - requestMap: make(map[uuid.UUID]bool), - requestQ: make(chan *models.ReplayRequestInput, size), - replaySpecRepoFac: replaySpecRepoFac, - clearRequestMapListener: make(chan interface{}), - uuidProvider: uuidProvider, + replayWorker: worker, + requestMap: make(map[uuid.UUID]bool), + config: config, + requestQ: make(chan *models.ReplayWorkerRequest, config.QueueSize), + replaySpecRepoFac: replaySpecRepoFac, + uuidProvider: uuidProvider, } mgr.Init() return mgr diff --git a/job/replay_manager_test.go b/job/replay_manager_test.go index 87f20218ec..c590f891be 100644 --- a/job/replay_manager_test.go +++ b/job/replay_manager_test.go @@ -15,9 +15,13 @@ import ( ) func TestReplayManager(t *testing.T) { + replayManagerConfig := job.ReplayManagerConfig{ + QueueSize: 5, + WorkerTimeout: 1000, + } t.Run("Close", func(t *testing.T) { logger.Init(logger.ERROR) - manager := job.NewManager(nil, nil, nil, 5) + manager := job.NewManager(nil, nil, nil, replayManagerConfig) err := manager.Close() assert.Nil(t, err) }) @@ -32,7 +36,7 @@ func TestReplayManager(t *testing.T) { Interval: "0 2 * * *", }, } - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: jobSpec, Start: startDate, End: endDate, @@ -51,7 +55,7 @@ func TestReplayManager(t *testing.T) { errMessage := "error while generating uuid" uuidProvider.On("NewUUID").Return(objUUID, errors.New(errMessage)) - replayManager := job.NewManager(nil, nil, uuidProvider, 5) + replayManager := job.NewManager(nil, nil, uuidProvider, replayManagerConfig) _, err := replayManager.Replay(replayRequest) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), errMessage)) @@ -79,7 +83,7 @@ func TestReplayManager(t *testing.T) { } replayRepository.On("Insert", toInsertReplaySpec).Return(errors.New(errMessage)) - replayManager := job.NewManager(nil, replaySpecRepoFac, uuidProvider, 5) + replayManager := job.NewManager(nil, replaySpecRepoFac, uuidProvider, replayManagerConfig) _, err := replayManager.Replay(replayRequest) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), errMessage)) diff --git a/job/replay_test.go b/job/replay_test.go index de52de0959..f954d2fdc2 100644 --- a/job/replay_test.go +++ b/job/replay_test.go @@ -107,7 +107,7 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, @@ -141,7 +141,7 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, @@ -184,7 +184,7 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: cyclicDagSpec[0], Start: replayStart, End: replayEnd, @@ -221,7 +221,7 @@ func TestReplay(t *testing.T) { jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, @@ -273,7 +273,7 @@ func TestReplay(t *testing.T) { jobSvc := job.NewService(nil, nil, compiler, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec4], Start: replayStart, End: replayEnd, @@ -317,7 +317,7 @@ func TestReplay(t *testing.T) { replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") jobSvc := job.NewService(nil, nil, nil, dumpAssets, nil, nil, nil, projJobSpecRepoFac, nil) - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, @@ -348,7 +348,7 @@ func TestReplay(t *testing.T) { replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, @@ -388,7 +388,7 @@ func TestReplay(t *testing.T) { replayStart, _ := time.Parse(job.ReplayDateFormat, "2020-08-05") replayEnd, _ := time.Parse(job.ReplayDateFormat, "2020-08-07") - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ Job: specs[spec1], Start: replayStart, End: replayEnd, diff --git a/job/replay_worker.go b/job/replay_worker.go index 9a56aea5ae..d08e73be2e 100644 --- a/job/replay_worker.go +++ b/job/replay_worker.go @@ -7,27 +7,16 @@ import ( "github.com/odpf/optimus/core/logger" - "github.com/odpf/optimus/core/bus" - "github.com/odpf/optimus/core/tree" "github.com/odpf/optimus/models" "github.com/pkg/errors" ) const ( - // EvtRecordInsertedInDB is emitted to event bus when a replay record is inserted in db - // it passes replay ID as string in bus - EvtRecordInsertedInDB = "replay_record_inserted_in_db" - - // EvtFailedToPrepareForReplay is emitted to event bus when a replay is failed to even prepare - // to execute, it passes replay ID as string in bus - EvtFailedToPrepareForReplay = "replay_request_failed_to_prepare" - - MsgReplaySuccessfullyCompleted = "Completed successfully" - MsgReplayInProgress = "Replay Request Picked up by replay worker" + AirflowClearDagRunFailed = "failed to clear airflow dag run" ) type ReplayWorker interface { - Process(context.Context, *models.ReplayRequestInput) error + Process(context.Context, *models.ReplayWorkerRequest) error } type replayWorker struct { @@ -35,14 +24,10 @@ type replayWorker struct { scheduler models.SchedulerUnit } -func (w *replayWorker) Process(ctx context.Context, input *models.ReplayRequestInput) (err error) { +func (w *replayWorker) Process(ctx context.Context, input *models.ReplayWorkerRequest) (err error) { replaySpecRepo := w.replaySpecRepoFac.New(input.Job) // mark replay request in progress - inProgressErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusInProgress, models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: MsgReplayInProgress, - }) - if inProgressErr != nil { + if inProgressErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusInProgress, models.ReplayMessage{}); inProgressErr != nil { return inProgressErr } @@ -51,36 +36,28 @@ func (w *replayWorker) Process(ctx context.Context, input *models.ReplayRequestI return err } - replayDagsMap := make(map[string]*tree.TreeNode) - replayTree.GetAllNodes(replayDagsMap) - - for jobName, treeNode := range replayDagsMap { + replayDagsMap := replayTree.GetAllNodes() + for _, treeNode := range replayDagsMap { runTimes := treeNode.Runs.Values() startTime := runTimes[0].(time.Time) endTime := runTimes[treeNode.Runs.Size()-1].(time.Time) - if err = w.scheduler.Clear(ctx, input.Project, jobName, startTime, endTime); err != nil { - err = errors.Wrapf(err, "error while clearing dag runs for job %s", jobName) + if err = w.scheduler.Clear(ctx, input.Project, treeNode.GetName(), startTime, endTime); err != nil { + err = errors.Wrapf(err, "error while clearing dag runs for job %s", treeNode.GetName()) logger.W(fmt.Sprintf("error while running replay %s: %s", input.ID.String(), err.Error())) - updateStatusErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusFailed, models.ReplayMessage{ - Status: models.ReplayStatusFailed, + if updateStatusErr := replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusFailed, models.ReplayMessage{ + Type: AirflowClearDagRunFailed, Message: err.Error(), - }) - if updateStatusErr != nil { + }); updateStatusErr != nil { return updateStatusErr } return err } } - err = replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusSuccess, models.ReplayMessage{ - Status: models.ReplayStatusSuccess, - Message: MsgReplaySuccessfullyCompleted, - }) - if err != nil { + if err = replaySpecRepo.UpdateStatus(input.ID, models.ReplayStatusSuccess, models.ReplayMessage{}); err != nil { return err } logger.I(fmt.Sprintf("successfully completed replay id: %s", input.ID.String())) - bus.Post(EvtRecordInsertedInDB, input.ID) return nil } diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go index 3528f50d77..e9b0f33da1 100644 --- a/job/replay_worker_test.go +++ b/job/replay_worker_test.go @@ -30,7 +30,7 @@ func TestReplayWorker(t *testing.T) { Interval: "0 2 * * *", }, } - replayRequest := &models.ReplayRequestInput{ + replayRequest := &models.ReplayWorkerRequest{ ID: currUUID, Job: jobSpec, Start: startDate, @@ -48,11 +48,7 @@ func TestReplayWorker(t *testing.T) { replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) errMessage := "replay repo error" - message := models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: job.MsgReplayInProgress, - } - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(errors.New(errMessage)) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(errors.New(errMessage)) replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) defer replaySpecRepoFac.AssertExpectations(t) @@ -68,14 +64,10 @@ func TestReplayWorker(t *testing.T) { ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) - inProgressReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: job.MsgReplayInProgress, - } - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, inProgressReplayMessage).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(nil) errMessage := "error while clearing dag runs for job job-name: scheduler clear error" failedReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusFailed, + Type: job.AirflowClearDagRunFailed, Message: errMessage, } replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusFailed, failedReplayMessage).Return(nil) @@ -99,14 +91,10 @@ func TestReplayWorker(t *testing.T) { ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) - inProgressReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: job.MsgReplayInProgress, - } - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, inProgressReplayMessage).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(nil) errMessage := "error while clearing dag runs for job job-name: scheduler clear error" failedReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusFailed, + Type: job.AirflowClearDagRunFailed, Message: errMessage, } updateStatusErr := errors.New("error while updating status to failed") @@ -132,17 +120,9 @@ func TestReplayWorker(t *testing.T) { ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) - message := models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: job.MsgReplayInProgress, - } - successReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusSuccess, - Message: job.MsgReplaySuccessfullyCompleted, - } - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(nil) updateSuccessStatusErr := errors.New("error while updating replay request") - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, successReplayMessage).Return(updateSuccessStatusErr) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, models.ReplayMessage{}).Return(updateSuccessStatusErr) replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) defer replaySpecRepoFac.AssertExpectations(t) @@ -161,16 +141,8 @@ func TestReplayWorker(t *testing.T) { logger.Init(logger.ERROR) ctx := context.Background() replayRepository := new(mock.ReplayRepository) - message := models.ReplayMessage{ - Status: models.ReplayStatusInProgress, - Message: job.MsgReplayInProgress, - } - successReplayMessage := models.ReplayMessage{ - Status: models.ReplayStatusSuccess, - Message: job.MsgReplaySuccessfullyCompleted, - } - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, message).Return(nil) - replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, successReplayMessage).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(nil) + replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusSuccess, models.ReplayMessage{}).Return(nil) replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) defer replaySpecRepoFac.AssertExpectations(t) diff --git a/mock/job.go b/mock/job.go index 98d7c23725..f4ec37a790 100644 --- a/mock/job.go +++ b/mock/job.go @@ -196,12 +196,12 @@ func (j *JobService) Delete(ctx context.Context, c models.NamespaceSpec, job mod return args.Error(0) } -func (j *JobService) ReplayDryRun(replayRequest *models.ReplayRequestInput) (*tree.TreeNode, error) { +func (j *JobService) ReplayDryRun(replayRequest *models.ReplayWorkerRequest) (*tree.TreeNode, error) { args := j.Called(replayRequest) return args.Get(0).(*tree.TreeNode), args.Error(1) } -func (j *JobService) Replay(replayRequest *models.ReplayRequestInput) (string, error) { +func (j *JobService) Replay(replayRequest *models.ReplayWorkerRequest) (string, error) { args := j.Called(replayRequest) return args.Get(0).(string), args.Error(1) } diff --git a/mock/replay.go b/mock/replay.go index 4cb8372715..a9342f071c 100644 --- a/mock/replay.go +++ b/mock/replay.go @@ -38,7 +38,7 @@ type ReplayManager struct { mock.Mock } -func (rm *ReplayManager) Replay(reqInput *models.ReplayRequestInput) (string, error) { +func (rm *ReplayManager) Replay(reqInput *models.ReplayWorkerRequest) (string, error) { args := rm.Called(reqInput) return args.Get(0).(string), args.Error(1) } @@ -52,7 +52,7 @@ type ReplayWorker struct { mock.Mock } -func (rm *ReplayWorker) Process(ctx context.Context, replayRequest *models.ReplayRequestInput) error { +func (rm *ReplayWorker) Process(ctx context.Context, replayRequest *models.ReplayWorkerRequest) error { args := rm.Called(ctx, replayRequest) return args.Error(0) } diff --git a/models/job.go b/models/job.go index 9725f93d2f..e9220947c3 100644 --- a/models/job.go +++ b/models/job.go @@ -293,9 +293,9 @@ type JobService interface { // Dump returns the compiled Job Dump(NamespaceSpec, JobSpec) (Job, error) // ReplayDryRun returns the execution tree of jobSpec and its dependencies between start and endDate - ReplayDryRun(*ReplayRequestInput) (*tree.TreeNode, error) + ReplayDryRun(*ReplayWorkerRequest) (*tree.TreeNode, error) // Replay replays the jobSpec and its dependencies between start and endDate - Replay(*ReplayRequestInput) (string, error) + Replay(*ReplayWorkerRequest) (string, error) // KeepOnly deletes all jobs except the ones provided for a namespace KeepOnly(NamespaceSpec, []JobSpec, progress.Observer) error // GetAll reads all job specifications of the given namespace diff --git a/models/replay.go b/models/replay.go index 39dcb474b9..d229a1ec40 100644 --- a/models/replay.go +++ b/models/replay.go @@ -16,11 +16,11 @@ const ( ) type ReplayMessage struct { - Status string + Type string Message string } -type ReplayRequestInput struct { +type ReplayWorkerRequest struct { ID uuid.UUID Job JobSpec Start time.Time diff --git a/store/postgres/migrations/000010_create_replay_table.up.sql b/store/postgres/migrations/000010_create_replay_table.up.sql index 332d5728e1..220fe6f5a5 100644 --- a/store/postgres/migrations/000010_create_replay_table.up.sql +++ b/store/postgres/migrations/000010_create_replay_table.up.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS replay ( job_id UUID NOT NULL, start_date TIMESTAMP WITH TIME ZONE NOT NULL, end_date TIMESTAMP WITH TIME ZONE NOT NULL, - status varchar(20) NOT NULL, + status varchar(30) NOT NULL, message JSONB, created_at TIMESTAMP WITH TIME ZONE NOT NULL, updated_at TIMESTAMP WITH TIME ZONE NOT NULL diff --git a/store/postgres/replay_repository.go b/store/postgres/replay_repository.go index 418ce6b545..0d7669bb07 100644 --- a/store/postgres/replay_repository.go +++ b/store/postgres/replay_repository.go @@ -99,10 +99,6 @@ func (repo *replayRepository) UpdateStatus(replayID uuid.UUID, status string, me return err } r.Status = status - r.UpdatedAt = time.Now() r.Message = jsonBytes - if err := repo.DB.Save(&r).Error; err != nil { - return err - } - return nil + return repo.DB.Save(&r).Error } diff --git a/store/postgres/replay_repository_test.go b/store/postgres/replay_repository_test.go index f4729dff4c..84ef2dd474 100644 --- a/store/postgres/replay_repository_test.go +++ b/store/postgres/replay_repository_test.go @@ -84,7 +84,7 @@ func TestReplayRepository(t *testing.T) { errMessage := "failed to execute" replayMessage := models.ReplayMessage{ - Status: models.ReplayStatusFailed, + Type: "test failure", Message: errMessage, } err = repo.UpdateStatus(uuid, models.ReplayStatusFailed, replayMessage) diff --git a/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json b/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json index 581c8a6329..20a6b82e45 100644 --- a/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json +++ b/third_party/OpenAPI/odpf/optimus/runtime_service.swagger.json @@ -230,7 +230,7 @@ } }, "/api/v1/project/{projectName}/job/{jobName}/replay": { - "get": { + "post": { "operationId": "RuntimeService_Replay", "responses": { "200": { @@ -258,24 +258,6 @@ "in": "path", "required": true, "type": "string" - }, - { - "name": "namespace", - "in": "query", - "required": false, - "type": "string" - }, - { - "name": "startDate", - "in": "query", - "required": false, - "type": "string" - }, - { - "name": "endDate", - "in": "query", - "required": false, - "type": "string" } ], "tags": [ From 1fdcf6a5fbac312bedb46ad4062d13e85d3d2332 Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Wed, 30 Jun 2021 13:32:09 +0530 Subject: [PATCH 5/6] fix: incorporate feedback changes --- cmd/server/server.go | 8 ++++--- config/config.go | 39 ++++++++++++++++--------------- config/loader.go | 20 +++++++++------- core/tree/multi_root_tree_test.go | 5 ++-- job/priority_resolver.go | 8 +++---- job/priority_resolver_test.go | 7 +++--- job/replay.go | 16 ++++++------- job/replay_manager.go | 19 +++++++-------- job/replay_manager_test.go | 9 ++++--- job/replay_test.go | 9 ++++--- job/replay_worker_test.go | 9 ++++--- job/service_test.go | 7 +++--- models/replay.go | 2 +- store/gcs/job_repository_test.go | 8 +++---- utils/convert_test.go | 3 +-- 15 files changed, 82 insertions(+), 87 deletions(-) diff --git a/cmd/server/server.go b/cmd/server/server.go index 295322bd6b..3930a997f4 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -237,7 +237,9 @@ func checkRequiredConfigs(conf config.Provider) error { if conf.GetServe().IngressHost == "" { return errors.Wrap(errRequiredMissing, "serve.ingress_host") } - + if conf.GetServe().ReplayNumWorkers < 1 { + return errors.New(fmt.Sprintf("%s should be greater than 0", config.KeyServeReplayNumWorkers)) + } if conf.GetServe().DB.DSN == "" { return errors.Wrap(errRequiredMissing, "serve.db.dsn") } @@ -397,8 +399,8 @@ func Initialize(conf config.Provider) error { } replayWorker := job.NewReplayWorker(replaySpecRepoFac, models.Scheduler) replayManager := job.NewManager(replayWorker, replaySpecRepoFac, utils.NewUUIDProvider(), job.ReplayManagerConfig{ - QueueSize: conf.GetServe().ReplayJobQueueSize, - WorkerTimeout: conf.GetServe().ReplayWorkerTimeout, + NumWorkers: conf.GetServe().ReplayNumWorkers, + WorkerTimeout: conf.GetServe().ReplayWorkerTimeoutMillis, }) // runtime service instance over grpc diff --git a/config/config.go b/config/config.go index aabc0dd9dd..b8f9315c71 100644 --- a/config/config.go +++ b/config/config.go @@ -2,6 +2,7 @@ package config import ( "strings" + "time" "github.com/knadh/koanf" ) @@ -21,19 +22,19 @@ var ( KeyLogLevel = "log.level" KeyLogFormat = "log.format" - KeyServeHost = "serve.host" - KeyServePort = "serve.port" - KeyServeAppKey = "serve.app_key" - KeyServeIngressHost = "serve.ingress_host" - KeyServeDBDSN = "serve.db.dsn" - KeyServeDBMaxIdleConnection = "serve.db.max_idle_connection" - KeyServeDBMaxOpenConnection = "serve.db.max_open_connection" - KeyServeMetadataWriterBatchSize = "serve.metadata.writer_batch_size" - KeyServeMetadataKafkaBrokers = "serve.metadata.kafka_brokers" - KeyServeMetadataKafkaJobTopic = "serve.metadata.kafka_job_topic" - KeyServeMetadataKafkaBatchSize = "serve.metadata.kafka_batch_size" - KeyServeReplayJobQueueSize = "serve.replay_job_queue_size" - KeyServeReplayWorkerTimeout = "serve.replay_worker_timeout" + KeyServeHost = "serve.host" + KeyServePort = "serve.port" + KeyServeAppKey = "serve.app_key" + KeyServeIngressHost = "serve.ingress_host" + KeyServeDBDSN = "serve.db.dsn" + KeyServeDBMaxIdleConnection = "serve.db.max_idle_connection" + KeyServeDBMaxOpenConnection = "serve.db.max_open_connection" + KeyServeMetadataWriterBatchSize = "serve.metadata.writer_batch_size" + KeyServeMetadataKafkaBrokers = "serve.metadata.kafka_brokers" + KeyServeMetadataKafkaJobTopic = "serve.metadata.kafka_job_topic" + KeyServeMetadataKafkaBatchSize = "serve.metadata.kafka_batch_size" + KeyServeReplayNumWorkers = "serve.replay_num_workers" + KeyServeReplayWorkerTimeoutMillis = "serve.replay_worker_timeout_millis" KeySchedulerName = "scheduler.name" @@ -98,10 +99,10 @@ type ServerConfig struct { // random 32 character hash used for encrypting secrets AppKey string `yaml:"app_key"` - DB DBConfig `yaml:"db"` - Metadata MetadataConfig `yaml:"metadata"` - ReplayJobQueueSize int `yaml:"replay_job_queue_size"` - ReplayWorkerTimeout int `yaml:"replay_worker_timeout"` + DB DBConfig `yaml:"db"` + Metadata MetadataConfig `yaml:"metadata"` + ReplayNumWorkers int `yaml:"replay_num_workers"` + ReplayWorkerTimeoutMillis time.Duration `yaml:"replay_worker_timeout_millis"` } type DBConfig struct { @@ -189,8 +190,8 @@ func (o Optimus) GetServe() ServerConfig { KafkaBrokers: o.eKs(KeyServeMetadataKafkaBrokers), KafkaBatchSize: o.eKi(KeyServeMetadataKafkaBatchSize), }, - ReplayJobQueueSize: o.k.Int(KeyServeReplayJobQueueSize), - ReplayWorkerTimeout: o.k.Int(KeyServeReplayWorkerTimeout), + ReplayNumWorkers: o.k.Int(KeyServeReplayNumWorkers), + ReplayWorkerTimeoutMillis: time.Millisecond * time.Duration(o.k.Int(KeyServeReplayWorkerTimeoutMillis)), } } diff --git a/config/loader.go b/config/loader.go index 1c59cf1e3d..42c5c0f3dd 100644 --- a/config/loader.go +++ b/config/loader.go @@ -50,15 +50,17 @@ func InitOptimus() (*Optimus, error) { // load defaults if err := configuration.k.Load(confmap.Provider(map[string]interface{}{ - KeyLogLevel: "info", - KeyServePort: 9100, - KeyServeHost: "0.0.0.0", - KeyServeDBMaxOpenConnection: 10, - KeyServeDBMaxIdleConnection: 5, - KeyServeMetadataKafkaJobTopic: "resource_optimus_job_log", - KeyServeMetadataKafkaBatchSize: 50, - KeyServeMetadataWriterBatchSize: 50, - KeySchedulerName: "airflow2", + KeyLogLevel: "info", + KeyServePort: 9100, + KeyServeHost: "0.0.0.0", + KeyServeDBMaxOpenConnection: 10, + KeyServeDBMaxIdleConnection: 5, + KeyServeMetadataKafkaJobTopic: "resource_optimus_job_log", + KeyServeMetadataKafkaBatchSize: 50, + KeyServeMetadataWriterBatchSize: 50, + KeySchedulerName: "airflow2", + KeyServeReplayNumWorkers: 1, + KeyServeReplayWorkerTimeoutMillis: 1000, }, "."), nil); err != nil { return nil, errors.Wrap(err, "k.Load: error loading config defaults") } diff --git a/core/tree/multi_root_tree_test.go b/core/tree/multi_root_tree_test.go index fb43fd7bda..f56a1f7e94 100644 --- a/core/tree/multi_root_tree_test.go +++ b/core/tree/multi_root_tree_test.go @@ -1,7 +1,6 @@ package tree_test import ( - "strings" "testing" "github.com/odpf/optimus/core/tree" @@ -25,7 +24,7 @@ func TestMultiRootDagTree(t *testing.T) { err := multiRootTree.IsCyclic() assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), tree.ErrCyclicDependencyEncountered.Error())) + assert.Contains(t, err.Error(), tree.ErrCyclicDependencyEncountered.Error()) }) t.Run("MarkRoot", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ @@ -53,7 +52,7 @@ func TestMultiRootDagTree(t *testing.T) { treeNode2.AddDependent(treeNode1) err := multiRootTree.IsCyclic() assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "cycle dependency")) + assert.Contains(t, err.Error(), "cycle dependency") }) t.Run("should not return error if not cyclic", func(t *testing.T) { treeNode1 := tree.NewTreeNode(models.JobSpec{ diff --git a/job/priority_resolver.go b/job/priority_resolver.go index 6803616f16..30ac99b1c7 100644 --- a/job/priority_resolver.go +++ b/job/priority_resolver.go @@ -100,19 +100,19 @@ func (a *priorityResolver) assignWeight(rootNodes []*tree.TreeNode, weight int, // based on the dependencies of each DAG. func (a *priorityResolver) buildMultiRootDependencyTree(jobSpecs []models.JobSpec) (*tree.MultiRootTree, error) { // creates map[jobName]jobSpec for faster retrieval - dagSpecMap := make(map[string]models.JobSpec) + jobSpecMap := make(map[string]models.JobSpec) for _, dagSpec := range jobSpecs { - dagSpecMap[dagSpec.Name] = dagSpec + jobSpecMap[dagSpec.Name] = dagSpec } // build a multi root tree and assign dependencies // ignore any other dependency apart from intra-tenant tree := tree.NewMultiRootTree() - for _, childSpec := range dagSpecMap { + for _, childSpec := range jobSpecMap { childNode := a.findOrCreateDAGNode(tree, childSpec) for _, depDAG := range childSpec.Dependencies { var isExternal = false - parentSpec, ok := dagSpecMap[depDAG.Job.Name] + parentSpec, ok := jobSpecMap[depDAG.Job.Name] if !ok { if depDAG.Type == models.JobSpecDependencyTypeIntra { return nil, errors.Wrap(ErrJobSpecNotFound, depDAG.Job.Name) diff --git a/job/priority_resolver_test.go b/job/priority_resolver_test.go index 8222e15248..799c2ac48d 100644 --- a/job/priority_resolver_test.go +++ b/job/priority_resolver_test.go @@ -1,7 +1,6 @@ package job_test import ( - "strings" "testing" "github.com/odpf/optimus/core/tree" @@ -336,7 +335,7 @@ func TestPriorityWeightResolver(t *testing.T) { assginer := job.NewPriorityResolver() _, err := assginer.Resolve(dagSpec) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), tree.ErrCyclicDependencyEncountered.Error())) + assert.Contains(t, err.Error(), tree.ErrCyclicDependencyEncountered.Error()) }) t.Run("Resolve should assign correct weights (maxWeight) with no dependencies", func(t *testing.T) { @@ -524,7 +523,7 @@ func TestMultiRootDAGTree(t *testing.T) { err := dagTree.IsCyclic() assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), tree.ErrCyclicDependencyEncountered.Error())) + assert.Contains(t, err.Error(), tree.ErrCyclicDependencyEncountered.Error()) }) t.Run("should create tree with multi level dependencies", func(t *testing.T) { @@ -653,6 +652,6 @@ func TestMultiRootDAGTree(t *testing.T) { err := dagTree.IsCyclic() assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), tree.ErrCyclicDependencyEncountered.Error())) + assert.Contains(t, err.Error(), tree.ErrCyclicDependencyEncountered.Error()) }) } diff --git a/job/replay.go b/job/replay.go index 3e36d723dc..b1ecee303d 100644 --- a/job/replay.go +++ b/job/replay.go @@ -21,11 +21,11 @@ func (srv *Service) populateRequestWithJobSpecs(replayRequest *models.ReplayWork if err != nil { return err } - dagSpecMap := make(map[string]models.JobSpec) + jobSpecMap := make(map[string]models.JobSpec) for _, currSpec := range jobSpecs { - dagSpecMap[currSpec.Name] = currSpec + jobSpecMap[currSpec.Name] = currSpec } - replayRequest.DagSpecMap = dagSpecMap + replayRequest.JobSpecMap = jobSpecMap return nil } @@ -56,7 +56,7 @@ func (srv *Service) Replay(replayRequest *models.ReplayWorkerRequest) (string, e // prepareTree creates a execution tree for replay operation func prepareTree(replayRequest *models.ReplayWorkerRequest) (*tree.TreeNode, error) { - replayJobSpec, found := replayRequest.DagSpecMap[replayRequest.Job.Name] + replayJobSpec, found := replayRequest.JobSpecMap[replayRequest.Job.Name] if !found { return nil, fmt.Errorf("couldn't find any job with name %s", replayRequest.Job.Name) } @@ -73,7 +73,7 @@ func prepareTree(replayRequest *models.ReplayWorkerRequest) (*tree.TreeNode, err } dagTree.AddNode(parentNode) - rootInstance, err := populateDownstreamDAGs(dagTree, replayJobSpec, replayRequest.DagSpecMap) + rootInstance, err := populateDownstreamDAGs(dagTree, replayJobSpec, replayRequest.JobSpecMap) if err != nil { return nil, err } @@ -95,12 +95,12 @@ func findOrCreateDAGNode(dagTree *tree.MultiRootTree, dagSpec models.JobSpec) *t return node } -func populateDownstreamDAGs(dagTree *tree.MultiRootTree, jobSpec models.JobSpec, dagSpecMap map[string]models.JobSpec) (*tree.TreeNode, error) { - for _, childSpec := range dagSpecMap { +func populateDownstreamDAGs(dagTree *tree.MultiRootTree, jobSpec models.JobSpec, jobSpecMap map[string]models.JobSpec) (*tree.TreeNode, error) { + for _, childSpec := range jobSpecMap { childNode := findOrCreateDAGNode(dagTree, childSpec) for _, depDAG := range childSpec.Dependencies { var isExternal = false - parentSpec, ok := dagSpecMap[depDAG.Job.Name] + parentSpec, ok := jobSpecMap[depDAG.Job.Name] if !ok { if depDAG.Type == models.JobSpecDependencyTypeIntra { return nil, errors.Wrap(ErrJobSpecNotFound, depDAG.Job.Name) diff --git a/job/replay_manager.go b/job/replay_manager.go index 5867197367..f03bbb93ec 100644 --- a/job/replay_manager.go +++ b/job/replay_manager.go @@ -20,8 +20,8 @@ var ( ) type ReplayManagerConfig struct { - QueueSize int - WorkerTimeout int + NumWorkers int + WorkerTimeout time.Duration } type ReplayManager interface { @@ -31,10 +31,8 @@ type ReplayManager interface { // Manager for replaying operation(s). // Offers an asynchronous interface to pipeline, with a fixed size request queue -// Only one replay happens at one time, any other request is queued, and executed -// when any in-progress operation is complete. -// The zero value of a Manager is an invalid Manager. Use `NewManager` constructor for -// creating a manager. +// Each replay request is handled by a replay worker and the number of parallel replay workers +// can be provided through configuration. type Manager struct { // wait group to synchronise on workers wg sync.WaitGroup @@ -95,12 +93,10 @@ func (m *Manager) Replay(reqInput *models.ReplayWorkerRequest) (string, error) { // start a worker goroutine that runs the deployment pipeline in background func (m *Manager) spawnServiceWorker() { defer m.wg.Done() - m.wg.Add(1) for reqInput := range m.requestQ { logger.I("worker picked up the request for ", reqInput.Job.Name) - ctx, cancelCtx := context.WithTimeout(context.Background(), time.Millisecond*time.Duration(m.config.WorkerTimeout)) - + ctx, cancelCtx := context.WithTimeout(context.Background(), m.config.WorkerTimeout) if err := m.replayWorker.Process(ctx, reqInput); err != nil { //do something about this error logger.E(errors.Wrap(err, "worker failed to process")) @@ -125,7 +121,8 @@ func (m *Manager) Close() error { func (m *Manager) Init() { logger.I("starting replay workers") - for i := 0; i < m.config.QueueSize; i++ { + for i := 0; i < m.config.NumWorkers; i++ { + m.wg.Add(1) go m.spawnServiceWorker() } } @@ -136,7 +133,7 @@ func NewManager(worker ReplayWorker, replaySpecRepoFac ReplaySpecRepoFactory, uu replayWorker: worker, requestMap: make(map[uuid.UUID]bool), config: config, - requestQ: make(chan *models.ReplayWorkerRequest, config.QueueSize), + requestQ: make(chan *models.ReplayWorkerRequest, 0), replaySpecRepoFac: replaySpecRepoFac, uuidProvider: uuidProvider, } diff --git a/job/replay_manager_test.go b/job/replay_manager_test.go index c590f891be..483d05cdb2 100644 --- a/job/replay_manager_test.go +++ b/job/replay_manager_test.go @@ -1,7 +1,6 @@ package job_test import ( - "strings" "testing" "time" @@ -16,7 +15,7 @@ import ( func TestReplayManager(t *testing.T) { replayManagerConfig := job.ReplayManagerConfig{ - QueueSize: 5, + NumWorkers: 5, WorkerTimeout: 1000, } t.Run("Close", func(t *testing.T) { @@ -43,7 +42,7 @@ func TestReplayManager(t *testing.T) { Project: models.ProjectSpec{ Name: "project-name", }, - DagSpecMap: map[string]models.JobSpec{ + JobSpecMap: map[string]models.JobSpec{ "job-name": jobSpec, }, } @@ -58,7 +57,7 @@ func TestReplayManager(t *testing.T) { replayManager := job.NewManager(nil, nil, uuidProvider, replayManagerConfig) _, err := replayManager.Replay(replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), errMessage)) + assert.Contains(t, err.Error(), errMessage) }) t.Run("should throw an error if replay repo throws error", func(t *testing.T) { logger.Init(logger.ERROR) @@ -86,7 +85,7 @@ func TestReplayManager(t *testing.T) { replayManager := job.NewManager(nil, replaySpecRepoFac, uuidProvider, replayManagerConfig) _, err := replayManager.Replay(replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), errMessage)) + assert.Contains(t, err.Error(), errMessage) }) }) } diff --git a/job/replay_test.go b/job/replay_test.go index f954d2fdc2..0eb031486c 100644 --- a/job/replay_test.go +++ b/job/replay_test.go @@ -1,7 +1,6 @@ package job_test import ( - "strings" "testing" "time" @@ -193,7 +192,7 @@ func TestReplay(t *testing.T) { _, err := jobSvc.ReplayDryRun(replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "a cycle dependency encountered in the tree")) + assert.Contains(t, err.Error(), "a cycle dependency encountered in the tree") }) t.Run("resolve create replay tree for a dag with three day task window and mentioned dependencies", func(t *testing.T) { @@ -353,7 +352,7 @@ func TestReplay(t *testing.T) { Start: replayStart, End: replayEnd, Project: projSpec, - DagSpecMap: specs, + JobSpecMap: specs, } errMessage := "error with replay manager" @@ -365,7 +364,7 @@ func TestReplay(t *testing.T) { _, err := jobSvc.Replay(replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), errMessage)) + assert.Contains(t, err.Error(), errMessage) }) t.Run("should succeed if replay manager successfully processes request", func(t *testing.T) { @@ -393,7 +392,7 @@ func TestReplay(t *testing.T) { Start: replayStart, End: replayEnd, Project: projSpec, - DagSpecMap: specs, + JobSpecMap: specs, } replayManager := new(mock.ReplayManager) diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go index e9b0f33da1..f8b252c37a 100644 --- a/job/replay_worker_test.go +++ b/job/replay_worker_test.go @@ -2,7 +2,6 @@ package job_test import ( "context" - "strings" "testing" "time" @@ -38,7 +37,7 @@ func TestReplayWorker(t *testing.T) { Project: models.ProjectSpec{ Name: "project-name", }, - DagSpecMap: map[string]models.JobSpec{ + JobSpecMap: map[string]models.JobSpec{ "job-name": jobSpec, }, } @@ -84,7 +83,7 @@ func TestReplayWorker(t *testing.T) { worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) err := worker.Process(ctx, replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), errorMessage)) + assert.Contains(t, err.Error(), errorMessage) }) t.Run("should throw an error when updatestatus throws an error for failed request", func(t *testing.T) { logger.Init(logger.ERROR) @@ -112,7 +111,7 @@ func TestReplayWorker(t *testing.T) { worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) err := worker.Process(ctx, replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), updateStatusErr.Error())) + assert.Contains(t, err.Error(), updateStatusErr.Error()) }) t.Run("should throw an error when updatestatus throws an error for successful request", func(t *testing.T) { @@ -135,7 +134,7 @@ func TestReplayWorker(t *testing.T) { worker := job.NewReplayWorker(replaySpecRepoFac, scheduler) err := worker.Process(ctx, replayRequest) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), updateSuccessStatusErr.Error())) + assert.Contains(t, err.Error(), updateSuccessStatusErr.Error()) }) t.Run("should update replay status if successful", func(t *testing.T) { logger.Init(logger.ERROR) diff --git a/job/service_test.go b/job/service_test.go index 67e02dc0f3..90c76a244d 100644 --- a/job/service_test.go +++ b/job/service_test.go @@ -2,7 +2,6 @@ package job_test import ( "context" - "strings" "testing" "time" @@ -362,9 +361,9 @@ func TestService(t *testing.T) { svc := job.NewService(jobSpecRepoFac, nil, nil, dumpAssets, depenResolver, nil, nil, projJobSpecRepoFac, nil) err := svc.Sync(ctx, namespaceSpec, nil) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "2 errors occurred")) - assert.True(t, strings.Contains(err.Error(), "error test")) - assert.True(t, strings.Contains(err.Error(), "error test-2")) + assert.Contains(t, err.Error(), "2 errors occurred") + assert.Contains(t, err.Error(), "error test") + assert.Contains(t, err.Error(), "error test-2") }) t.Run("should successfully publish metadata for all job specs", func(t *testing.T) { diff --git a/models/replay.go b/models/replay.go index d229a1ec40..6f68f2ad4f 100644 --- a/models/replay.go +++ b/models/replay.go @@ -26,7 +26,7 @@ type ReplayWorkerRequest struct { Start time.Time End time.Time Project ProjectSpec - DagSpecMap map[string]JobSpec + JobSpecMap map[string]JobSpec } type ReplaySpec struct { diff --git a/store/gcs/job_repository_test.go b/store/gcs/job_repository_test.go index cdf7b8e409..0ba5045486 100644 --- a/store/gcs/job_repository_test.go +++ b/store/gcs/job_repository_test.go @@ -197,8 +197,8 @@ func TestJobRepository(t *testing.T) { Prefix: prefix, } err := repo.Delete(ctx, namespaceSpec, jobName) - - assert.Error(t, models.ErrNoSuchJob, err) + assert.Error(t, err) + assert.Contains(t, err.Error(), models.ErrNoSuchJob.Error()) }) t.Run("should return err when unable to get the object info", func(t *testing.T) { namespaceSpec := models.NamespaceSpec{ @@ -366,8 +366,8 @@ func TestJobRepository(t *testing.T) { Suffix: suffix, } _, err := repo.GetByName(ctx, nonExistentDAGName) - - assert.Error(t, models.ErrNoSuchJob, err) + assert.Error(t, err) + assert.Contains(t, err.Error(), models.ErrNoSuchJob.Error()) }) t.Run("should return error when failed to get the bucket", func(t *testing.T) { expected := errors.New("failed to get bucket attrs") diff --git a/utils/convert_test.go b/utils/convert_test.go index 5fff17538c..b9be4ec99d 100644 --- a/utils/convert_test.go +++ b/utils/convert_test.go @@ -1,7 +1,6 @@ package utils_test import ( - "strings" "testing" "github.com/AlecAivazis/survey/v2" @@ -31,6 +30,6 @@ func TestConvert(t *testing.T) { } _, err := utils.ConvertToStringMap(inputs) assert.NotNil(t, err) - assert.True(t, strings.Contains(err.Error(), "unknown type found while parsing user inputs")) + assert.Contains(t, err.Error(), "unknown type found while parsing user inputs") }) } From bc9adc2c716b0de3739613c7617e572623de2053 Mon Sep 17 00:00:00 2001 From: Maulik Soneji Date: Wed, 30 Jun 2021 15:28:43 +0530 Subject: [PATCH 6/6] fix: incorporate feedback changes --- cmd/server/server.go | 2 +- config/config.go | 38 +++++++++++++++++++------------------- config/loader.go | 22 +++++++++++----------- job/replay_manager_test.go | 8 ++++---- job/replay_worker_test.go | 6 ++---- 5 files changed, 37 insertions(+), 39 deletions(-) diff --git a/cmd/server/server.go b/cmd/server/server.go index 3930a997f4..0f4553097b 100644 --- a/cmd/server/server.go +++ b/cmd/server/server.go @@ -400,7 +400,7 @@ func Initialize(conf config.Provider) error { replayWorker := job.NewReplayWorker(replaySpecRepoFac, models.Scheduler) replayManager := job.NewManager(replayWorker, replaySpecRepoFac, utils.NewUUIDProvider(), job.ReplayManagerConfig{ NumWorkers: conf.GetServe().ReplayNumWorkers, - WorkerTimeout: conf.GetServe().ReplayWorkerTimeoutMillis, + WorkerTimeout: conf.GetServe().ReplayWorkerTimeoutSecs, }) // runtime service instance over grpc diff --git a/config/config.go b/config/config.go index b8f9315c71..b364a35e4e 100644 --- a/config/config.go +++ b/config/config.go @@ -22,19 +22,19 @@ var ( KeyLogLevel = "log.level" KeyLogFormat = "log.format" - KeyServeHost = "serve.host" - KeyServePort = "serve.port" - KeyServeAppKey = "serve.app_key" - KeyServeIngressHost = "serve.ingress_host" - KeyServeDBDSN = "serve.db.dsn" - KeyServeDBMaxIdleConnection = "serve.db.max_idle_connection" - KeyServeDBMaxOpenConnection = "serve.db.max_open_connection" - KeyServeMetadataWriterBatchSize = "serve.metadata.writer_batch_size" - KeyServeMetadataKafkaBrokers = "serve.metadata.kafka_brokers" - KeyServeMetadataKafkaJobTopic = "serve.metadata.kafka_job_topic" - KeyServeMetadataKafkaBatchSize = "serve.metadata.kafka_batch_size" - KeyServeReplayNumWorkers = "serve.replay_num_workers" - KeyServeReplayWorkerTimeoutMillis = "serve.replay_worker_timeout_millis" + KeyServeHost = "serve.host" + KeyServePort = "serve.port" + KeyServeAppKey = "serve.app_key" + KeyServeIngressHost = "serve.ingress_host" + KeyServeDBDSN = "serve.db.dsn" + KeyServeDBMaxIdleConnection = "serve.db.max_idle_connection" + KeyServeDBMaxOpenConnection = "serve.db.max_open_connection" + KeyServeMetadataWriterBatchSize = "serve.metadata.writer_batch_size" + KeyServeMetadataKafkaBrokers = "serve.metadata.kafka_brokers" + KeyServeMetadataKafkaJobTopic = "serve.metadata.kafka_job_topic" + KeyServeMetadataKafkaBatchSize = "serve.metadata.kafka_batch_size" + KeyServeReplayNumWorkers = "serve.replay_num_workers" + KeyServeReplayWorkerTimeoutSecs = "serve.replay_worker_timeout_secs" KeySchedulerName = "scheduler.name" @@ -99,10 +99,10 @@ type ServerConfig struct { // random 32 character hash used for encrypting secrets AppKey string `yaml:"app_key"` - DB DBConfig `yaml:"db"` - Metadata MetadataConfig `yaml:"metadata"` - ReplayNumWorkers int `yaml:"replay_num_workers"` - ReplayWorkerTimeoutMillis time.Duration `yaml:"replay_worker_timeout_millis"` + DB DBConfig `yaml:"db"` + Metadata MetadataConfig `yaml:"metadata"` + ReplayNumWorkers int `yaml:"replay_num_workers"` + ReplayWorkerTimeoutSecs time.Duration `yaml:"replay_worker_timeout_secs"` } type DBConfig struct { @@ -190,8 +190,8 @@ func (o Optimus) GetServe() ServerConfig { KafkaBrokers: o.eKs(KeyServeMetadataKafkaBrokers), KafkaBatchSize: o.eKi(KeyServeMetadataKafkaBatchSize), }, - ReplayNumWorkers: o.k.Int(KeyServeReplayNumWorkers), - ReplayWorkerTimeoutMillis: time.Millisecond * time.Duration(o.k.Int(KeyServeReplayWorkerTimeoutMillis)), + ReplayNumWorkers: o.k.Int(KeyServeReplayNumWorkers), + ReplayWorkerTimeoutSecs: time.Second * time.Duration(o.k.Int(KeyServeReplayWorkerTimeoutSecs)), } } diff --git a/config/loader.go b/config/loader.go index 42c5c0f3dd..349515c8ce 100644 --- a/config/loader.go +++ b/config/loader.go @@ -50,17 +50,17 @@ func InitOptimus() (*Optimus, error) { // load defaults if err := configuration.k.Load(confmap.Provider(map[string]interface{}{ - KeyLogLevel: "info", - KeyServePort: 9100, - KeyServeHost: "0.0.0.0", - KeyServeDBMaxOpenConnection: 10, - KeyServeDBMaxIdleConnection: 5, - KeyServeMetadataKafkaJobTopic: "resource_optimus_job_log", - KeyServeMetadataKafkaBatchSize: 50, - KeyServeMetadataWriterBatchSize: 50, - KeySchedulerName: "airflow2", - KeyServeReplayNumWorkers: 1, - KeyServeReplayWorkerTimeoutMillis: 1000, + KeyLogLevel: "info", + KeyServePort: 9100, + KeyServeHost: "0.0.0.0", + KeyServeDBMaxOpenConnection: 10, + KeyServeDBMaxIdleConnection: 5, + KeyServeMetadataKafkaJobTopic: "resource_optimus_job_log", + KeyServeMetadataKafkaBatchSize: 50, + KeyServeMetadataWriterBatchSize: 50, + KeySchedulerName: "airflow2", + KeyServeReplayNumWorkers: 1, + KeyServeReplayWorkerTimeoutSecs: 120, }, "."), nil); err != nil { return nil, errors.Wrap(err, "k.Load: error loading config defaults") } diff --git a/job/replay_manager_test.go b/job/replay_manager_test.go index 483d05cdb2..1eadd412d2 100644 --- a/job/replay_manager_test.go +++ b/job/replay_manager_test.go @@ -1,11 +1,13 @@ package job_test import ( + "io/ioutil" "testing" "time" - "github.com/google/uuid" "github.com/odpf/optimus/core/logger" + + "github.com/google/uuid" "github.com/odpf/optimus/job" "github.com/odpf/optimus/mock" "github.com/odpf/optimus/models" @@ -14,12 +16,12 @@ import ( ) func TestReplayManager(t *testing.T) { + logger.InitWithWriter(logger.DEBUG, ioutil.Discard) replayManagerConfig := job.ReplayManagerConfig{ NumWorkers: 5, WorkerTimeout: 1000, } t.Run("Close", func(t *testing.T) { - logger.Init(logger.ERROR) manager := job.NewManager(nil, nil, nil, replayManagerConfig) err := manager.Close() assert.Nil(t, err) @@ -47,7 +49,6 @@ func TestReplayManager(t *testing.T) { }, } t.Run("should throw error if uuid provider returns failure", func(t *testing.T) { - logger.Init(logger.ERROR) uuidProvider := new(mock.UUIDProvider) defer uuidProvider.AssertExpectations(t) objUUID := uuid.Must(uuid.NewRandom()) @@ -60,7 +61,6 @@ func TestReplayManager(t *testing.T) { assert.Contains(t, err.Error(), errMessage) }) t.Run("should throw an error if replay repo throws error", func(t *testing.T) { - logger.Init(logger.ERROR) replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) replaySpecRepoFac := new(mock.ReplaySpecRepoFactory) diff --git a/job/replay_worker_test.go b/job/replay_worker_test.go index f8b252c37a..c466dbe90c 100644 --- a/job/replay_worker_test.go +++ b/job/replay_worker_test.go @@ -2,6 +2,7 @@ package job_test import ( "context" + "io/ioutil" "testing" "time" @@ -16,6 +17,7 @@ import ( ) func TestReplayWorker(t *testing.T) { + logger.InitWithWriter(logger.DEBUG, ioutil.Discard) dagStartTime, _ := time.Parse(job.ReplayDateFormat, "2020-04-05") startDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-22") endDate, _ := time.Parse(job.ReplayDateFormat, "2020-08-26") @@ -59,7 +61,6 @@ func TestReplayWorker(t *testing.T) { assert.Equal(t, errMessage, err.Error()) }) t.Run("should throw an error when scheduler throws an error", func(t *testing.T) { - logger.Init(logger.ERROR) ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) @@ -86,7 +87,6 @@ func TestReplayWorker(t *testing.T) { assert.Contains(t, err.Error(), errorMessage) }) t.Run("should throw an error when updatestatus throws an error for failed request", func(t *testing.T) { - logger.Init(logger.ERROR) ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) @@ -115,7 +115,6 @@ func TestReplayWorker(t *testing.T) { }) t.Run("should throw an error when updatestatus throws an error for successful request", func(t *testing.T) { - logger.Init(logger.ERROR) ctx := context.Background() replayRepository := new(mock.ReplayRepository) defer replayRepository.AssertExpectations(t) @@ -137,7 +136,6 @@ func TestReplayWorker(t *testing.T) { assert.Contains(t, err.Error(), updateSuccessStatusErr.Error()) }) t.Run("should update replay status if successful", func(t *testing.T) { - logger.Init(logger.ERROR) ctx := context.Background() replayRepository := new(mock.ReplayRepository) replayRepository.On("UpdateStatus", currUUID, models.ReplayStatusInProgress, models.ReplayMessage{}).Return(nil)